Repository: spex66/RTSP-Camera-for-Android Branch: master Commit: ce20a50dd019 Files: 388 Total size: 3.6 MB Directory structure: gitextract_nchk1d3j/ ├── README.md ├── RtspCamera/ │ ├── .classpath │ ├── .gitignore │ ├── .project │ ├── .settings/ │ │ └── org.jboss.ide.eclipse.as.core.prefs │ ├── AndroidManifest.xml │ ├── docs/ │ │ └── wire-udp4000-h264-with-inband-sps.pps-nativeencoder.pcap │ ├── gpl.txt │ ├── jni/ │ │ ├── Android.mk │ │ ├── Application.mk │ │ ├── avc_h264/ │ │ │ ├── Android.mk │ │ │ ├── common/ │ │ │ │ ├── include/ │ │ │ │ │ ├── avcapi_common.h │ │ │ │ │ ├── avcint_common.h │ │ │ │ │ └── avclib_common.h │ │ │ │ └── src/ │ │ │ │ ├── deblock.cpp │ │ │ │ ├── dpb.cpp │ │ │ │ ├── fmo.cpp │ │ │ │ ├── mb_access.cpp │ │ │ │ └── reflist.cpp │ │ │ ├── dec/ │ │ │ │ ├── Android.mk │ │ │ │ ├── include/ │ │ │ │ │ ├── avcdec_api.h │ │ │ │ │ ├── pvavcdecoder.h │ │ │ │ │ ├── pvavcdecoder_factory.h │ │ │ │ │ └── pvavcdecoderinterface.h │ │ │ │ └── src/ │ │ │ │ ├── 3GPVideoParser.cpp │ │ │ │ ├── 3GPVideoParser.h │ │ │ │ ├── NativeH264Decoder.cpp │ │ │ │ ├── NativeH264Decoder.h │ │ │ │ ├── avc_bitstream.cpp │ │ │ │ ├── avcdec_api.cpp │ │ │ │ ├── avcdec_bitstream.h │ │ │ │ ├── avcdec_int.h │ │ │ │ ├── avcdec_lib.h │ │ │ │ ├── header.cpp │ │ │ │ ├── itrans.cpp │ │ │ │ ├── pred_inter.cpp │ │ │ │ ├── pred_intra.cpp │ │ │ │ ├── pvavcdecoder.cpp │ │ │ │ ├── pvavcdecoder_factory.cpp │ │ │ │ ├── residual.cpp │ │ │ │ ├── slice.cpp │ │ │ │ ├── vlc.cpp │ │ │ │ ├── yuv2rgb.cpp │ │ │ │ └── yuv2rgb.h │ │ │ ├── enc/ │ │ │ │ ├── Android.mk │ │ │ │ ├── include/ │ │ │ │ │ ├── pvavcencoder.h │ │ │ │ │ ├── pvavcencoder_factory.h │ │ │ │ │ └── pvavcencoderinterface.h │ │ │ │ └── src/ │ │ │ │ ├── NativeH264Encoder.cpp │ │ │ │ ├── NativeH264Encoder.cpp__orig │ │ │ │ ├── NativeH264Encoder.h │ │ │ │ ├── avcenc_api.cpp │ │ │ │ ├── avcenc_api.h │ │ │ │ ├── avcenc_int.h │ │ │ │ ├── avcenc_lib.h │ │ │ │ ├── bitstream_io.cpp │ │ │ │ ├── block.cpp │ │ │ │ ├── findhalfpel.cpp │ │ │ │ ├── header.cpp │ │ │ │ ├── init.cpp │ │ │ │ ├── intra_est.cpp │ │ │ │ ├── motion_comp.cpp │ │ │ │ ├── motion_est.cpp │ │ │ │ ├── pvavcencoder.cpp │ │ │ │ ├── pvavcencoder_factory.cpp │ │ │ │ ├── rate_control.cpp │ │ │ │ ├── residual.cpp │ │ │ │ ├── sad.cpp │ │ │ │ ├── sad_halfpel.cpp │ │ │ │ ├── sad_halfpel_inline.h │ │ │ │ ├── sad_inline.h │ │ │ │ ├── sad_mb_offset.h │ │ │ │ ├── slice.cpp │ │ │ │ └── vlc_encode.cpp │ │ │ └── oscl/ │ │ │ ├── oscl_base.h │ │ │ ├── oscl_base_macros.h │ │ │ ├── oscl_config.h │ │ │ ├── oscl_dll.h │ │ │ ├── oscl_error.h │ │ │ ├── oscl_error_codes.h │ │ │ ├── oscl_exception.h │ │ │ ├── oscl_math.h │ │ │ ├── oscl_mem.h │ │ │ ├── oscl_string.h │ │ │ ├── oscl_types.h │ │ │ └── osclconfig_compiler_warnings.h │ │ └── m4v_h263/ │ │ ├── Android.mk │ │ ├── dec/ │ │ │ ├── Android.mk │ │ │ ├── include/ │ │ │ │ ├── mp4dec_api.h │ │ │ │ ├── pvm4vdecoder.h │ │ │ │ ├── pvm4vdecoder_dpi.h │ │ │ │ ├── pvm4vdecoder_factory.h │ │ │ │ ├── pvvideodecoderinterface.h │ │ │ │ └── visual_header.h │ │ │ ├── oscl/ │ │ │ │ ├── oscl_base.h │ │ │ │ ├── oscl_base_macros.h │ │ │ │ ├── oscl_config.h │ │ │ │ ├── oscl_dll.h │ │ │ │ ├── oscl_error.h │ │ │ │ ├── oscl_error_codes.h │ │ │ │ ├── oscl_exception.h │ │ │ │ ├── oscl_math.h │ │ │ │ ├── oscl_mem.h │ │ │ │ ├── oscl_types.h │ │ │ │ └── osclconfig_compiler_warnings.h │ │ │ └── src/ │ │ │ ├── 3GPVideoParser.cpp │ │ │ ├── 3GPVideoParser.h │ │ │ ├── adaptive_smooth_no_mmx.cpp │ │ │ ├── bitstream.cpp │ │ │ ├── bitstream.h │ │ │ ├── block_idct.cpp │ │ │ ├── cal_dc_scaler.cpp │ │ │ ├── chv_filter.cpp │ │ │ ├── chvr_filter.cpp │ │ │ ├── com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder.cpp │ │ │ ├── com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder.h │ │ │ ├── combined_decode.cpp │ │ │ ├── conceal.cpp │ │ │ ├── datapart_decode.cpp │ │ │ ├── dcac_prediction.cpp │ │ │ ├── dec_pred_intra_dc.cpp │ │ │ ├── deringing_chroma.cpp │ │ │ ├── deringing_luma.cpp │ │ │ ├── find_min_max.cpp │ │ │ ├── get_pred_adv_b_add.cpp │ │ │ ├── get_pred_outside.cpp │ │ │ ├── idct.cpp │ │ │ ├── idct.h │ │ │ ├── idct_vca.cpp │ │ │ ├── max_level.h │ │ │ ├── mb_motion_comp.cpp │ │ │ ├── mb_utils.cpp │ │ │ ├── mbtype_mode.h │ │ │ ├── motion_comp.h │ │ │ ├── mp4dec_lib.h │ │ │ ├── mp4def.h │ │ │ ├── mp4lib_int.h │ │ │ ├── packet_util.cpp │ │ │ ├── post_filter.cpp │ │ │ ├── post_proc.h │ │ │ ├── post_proc_semaphore.cpp │ │ │ ├── pp_semaphore_chroma_inter.cpp │ │ │ ├── pp_semaphore_luma.cpp │ │ │ ├── pvdec_api.cpp │ │ │ ├── pvm4vdecoder.cpp │ │ │ ├── pvm4vdecoder_factory.cpp │ │ │ ├── scaling.h │ │ │ ├── scaling_tab.cpp │ │ │ ├── vlc_dec_tab.h │ │ │ ├── vlc_decode.cpp │ │ │ ├── vlc_decode.h │ │ │ ├── vlc_dequant.cpp │ │ │ ├── vlc_tab.cpp │ │ │ ├── vop.cpp │ │ │ ├── yuv2rgb.cpp │ │ │ ├── yuv2rgb.h │ │ │ ├── zigzag.h │ │ │ └── zigzag_tab.cpp │ │ └── enc/ │ │ ├── Android.mk │ │ ├── include/ │ │ │ ├── cvei.h │ │ │ ├── mp4enc_api.h │ │ │ └── pvm4vencoder.h │ │ ├── oscl/ │ │ │ ├── oscl_base.h │ │ │ ├── oscl_base_macros.h │ │ │ ├── oscl_config.h │ │ │ ├── oscl_dll.h │ │ │ ├── oscl_error.h │ │ │ ├── oscl_error_codes.h │ │ │ ├── oscl_exception.h │ │ │ ├── oscl_math.h │ │ │ ├── oscl_mem.h │ │ │ ├── oscl_types.h │ │ │ └── osclconfig_compiler_warnings.h │ │ └── src/ │ │ ├── bitstream_io.cpp │ │ ├── bitstream_io.h │ │ ├── com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder.cpp │ │ ├── com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder.h │ │ ├── combined_encode.cpp │ │ ├── datapart_encode.cpp │ │ ├── dct.cpp │ │ ├── dct.h │ │ ├── dct_inline.h │ │ ├── fastcodemb.cpp │ │ ├── fastcodemb.h │ │ ├── fastidct.cpp │ │ ├── fastquant.cpp │ │ ├── fastquant_inline.h │ │ ├── findhalfpel.cpp │ │ ├── m4venc_oscl.h │ │ ├── me_utils.cpp │ │ ├── motion_comp.cpp │ │ ├── motion_est.cpp │ │ ├── mp4def.h │ │ ├── mp4enc_api.cpp │ │ ├── mp4enc_api.cpp.original │ │ ├── mp4enc_lib.h │ │ ├── mp4lib_int.h │ │ ├── pvm4vencoder.cpp │ │ ├── rate_control.cpp │ │ ├── rate_control.h │ │ ├── sad.cpp │ │ ├── sad_halfpel.cpp │ │ ├── sad_halfpel_inline.h │ │ ├── sad_inline.h │ │ ├── sad_mb_offset.h │ │ ├── vlc_enc_tab.h │ │ ├── vlc_encode.cpp │ │ ├── vlc_encode.h │ │ ├── vlc_encode_inline.h │ │ └── vop.cpp │ ├── proguard.cfg │ ├── project.properties │ ├── res/ │ │ ├── layout/ │ │ │ ├── cameraapicodecs.xml │ │ │ └── cameranativecodecs.xml │ │ └── values/ │ │ └── strings.xml │ └── src/ │ ├── com/ │ │ └── orangelabs/ │ │ └── rcs/ │ │ ├── core/ │ │ │ ├── CoreException.java │ │ │ └── ims/ │ │ │ └── protocol/ │ │ │ └── rtp/ │ │ │ ├── CodecChain.java │ │ │ ├── MediaRegistry.java │ │ │ ├── MediaRtpReceiver.java │ │ │ ├── Processor.java │ │ │ ├── RtpException.java │ │ │ ├── codec/ │ │ │ │ ├── Codec.java │ │ │ │ └── video/ │ │ │ │ ├── VideoCodec.java │ │ │ │ ├── h263/ │ │ │ │ │ ├── H263Config.java │ │ │ │ │ ├── H263RtpHeader.java │ │ │ │ │ ├── JavaDepacketizer.java │ │ │ │ │ ├── JavaPacketizer.java │ │ │ │ │ ├── decoder/ │ │ │ │ │ │ ├── NativeH263Decoder.java │ │ │ │ │ │ └── VideoSample.java │ │ │ │ │ └── encoder/ │ │ │ │ │ ├── NativeH263Encoder.java │ │ │ │ │ └── NativeH263EncoderParams.java │ │ │ │ └── h264/ │ │ │ │ ├── H264Config.java │ │ │ │ ├── decoder/ │ │ │ │ │ └── NativeH264Decoder.java │ │ │ │ └── encoder/ │ │ │ │ └── NativeH264Encoder.java │ │ │ ├── core/ │ │ │ │ ├── RtcpAppPacket.java │ │ │ │ ├── RtcpByePacket.java │ │ │ │ ├── RtcpCompoundPacket.java │ │ │ │ ├── RtcpPacket.java │ │ │ │ ├── RtcpPacketReceiver.java │ │ │ │ ├── RtcpPacketTransmitter.java │ │ │ │ ├── RtcpPacketUtils.java │ │ │ │ ├── RtcpReceiverReportPacket.java │ │ │ │ ├── RtcpReport.java │ │ │ │ ├── RtcpSdesBlock.java │ │ │ │ ├── RtcpSdesItem.java │ │ │ │ ├── RtcpSdesPacket.java │ │ │ │ ├── RtcpSenderReportPacket.java │ │ │ │ ├── RtcpSession.java │ │ │ │ ├── RtcpStatisticsReceiver.java │ │ │ │ ├── RtcpStatisticsTransmitter.java │ │ │ │ ├── RtpPacket.java │ │ │ │ ├── RtpPacketReceiver.java │ │ │ │ ├── RtpPacketTransmitter.java │ │ │ │ ├── RtpSource.java │ │ │ │ ├── RtpStatisticsReceiver.java │ │ │ │ └── RtpStatisticsTransmitter.java │ │ │ ├── event/ │ │ │ │ ├── RtcpApplicationEvent.java │ │ │ │ ├── RtcpByeEvent.java │ │ │ │ ├── RtcpEvent.java │ │ │ │ ├── RtcpEventListener.java │ │ │ │ ├── RtcpReceiverReportEvent.java │ │ │ │ ├── RtcpSdesEvent.java │ │ │ │ └── RtcpSenderReportEvent.java │ │ │ ├── format/ │ │ │ │ ├── DummyFormat.java │ │ │ │ ├── Format.java │ │ │ │ ├── audio/ │ │ │ │ │ ├── AudioFormat.java │ │ │ │ │ └── PcmuAudioFormat.java │ │ │ │ └── video/ │ │ │ │ ├── H263VideoFormat.java │ │ │ │ ├── H264VideoFormat.java │ │ │ │ └── VideoFormat.java │ │ │ ├── media/ │ │ │ │ ├── MediaException.java │ │ │ │ ├── MediaInput.java │ │ │ │ ├── MediaOutput.java │ │ │ │ └── MediaSample.java │ │ │ ├── stream/ │ │ │ │ ├── DummyPacketSourceStream.java │ │ │ │ ├── MediaCaptureStream.java │ │ │ │ ├── MediaRendererStream.java │ │ │ │ ├── ProcessorInputStream.java │ │ │ │ ├── ProcessorOutputStream.java │ │ │ │ └── RtpInputStream.java │ │ │ └── util/ │ │ │ ├── Buffer.java │ │ │ ├── Packet.java │ │ │ └── SystemTimeBase.java │ │ ├── platform/ │ │ │ ├── AndroidFactory.java │ │ │ ├── FactoryException.java │ │ │ ├── file/ │ │ │ │ ├── FileDescription.java │ │ │ │ └── FileFactory.java │ │ │ ├── logger/ │ │ │ │ └── AndroidAppender.java │ │ │ ├── network/ │ │ │ │ ├── AndroidDatagramConnection.java │ │ │ │ ├── AndroidHttpConnection.java │ │ │ │ ├── AndroidNetworkFactory.java │ │ │ │ ├── AndroidSocketConnection.java │ │ │ │ ├── AndroidSocketServerConnection.java │ │ │ │ ├── DatagramConnection.java │ │ │ │ ├── HttpConnection.java │ │ │ │ ├── NetworkFactory.java │ │ │ │ ├── SocketConnection.java │ │ │ │ └── SocketServerConnection.java │ │ │ └── registry/ │ │ │ ├── AndroidRegistryFactory.java │ │ │ └── RegistryFactory.java │ │ ├── provider/ │ │ │ └── settings/ │ │ │ ├── RcsSettings.java │ │ │ └── RcsSettingsData.java │ │ ├── service/ │ │ │ └── api/ │ │ │ └── client/ │ │ │ ├── capability/ │ │ │ │ └── Capabilities.java │ │ │ └── media/ │ │ │ ├── IMediaEventListener.aidl │ │ │ ├── IMediaPlayer.aidl │ │ │ ├── IMediaRenderer.aidl │ │ │ ├── MediaCodec.aidl │ │ │ ├── MediaCodec.java │ │ │ └── video/ │ │ │ ├── VideoCodec.java │ │ │ └── VideoSurfaceView.java │ │ └── utils/ │ │ ├── FifoBuffer.java │ │ ├── NetworkRessourceManager.java │ │ └── logger/ │ │ ├── Appender.java │ │ └── Logger.java │ └── de/ │ └── kp/ │ ├── net/ │ │ ├── rtp/ │ │ │ ├── RtpPacket.java │ │ │ ├── RtpRandom.java │ │ │ ├── RtpSender.java │ │ │ ├── RtpSocket.java │ │ │ ├── packetizer/ │ │ │ │ ├── AbstractPacketizer.java │ │ │ │ ├── H263Packetizer.java │ │ │ │ ├── H264Fifo.java │ │ │ │ └── H264Packetizer.java │ │ │ ├── recorder/ │ │ │ │ ├── MediaRtpSender.java │ │ │ │ └── RtspVideoRecorder.java │ │ │ ├── stream/ │ │ │ │ └── RtpOutputStream.java │ │ │ └── viewer/ │ │ │ └── RtpVideoRenderer.java │ │ └── rtsp/ │ │ ├── RtspConstants.java │ │ ├── client/ │ │ │ ├── RtspClient.java │ │ │ ├── RtspControl.java │ │ │ ├── api/ │ │ │ │ ├── EntityMessage.java │ │ │ │ ├── Message.java │ │ │ │ ├── MessageFactory.java │ │ │ │ ├── Request.java │ │ │ │ ├── RequestListener.java │ │ │ │ ├── Response.java │ │ │ │ ├── Transport.java │ │ │ │ └── TransportListener.java │ │ │ ├── header/ │ │ │ │ ├── CSeqHeader.java │ │ │ │ ├── ContentEncodingHeader.java │ │ │ │ ├── ContentLengthHeader.java │ │ │ │ ├── ContentTypeHeader.java │ │ │ │ ├── RtspBaseIntegerHeader.java │ │ │ │ ├── RtspBaseStringHeader.java │ │ │ │ ├── RtspContent.java │ │ │ │ ├── RtspHeader.java │ │ │ │ ├── SessionHeader.java │ │ │ │ └── TransportHeader.java │ │ │ ├── message/ │ │ │ │ ├── MessageBuffer.java │ │ │ │ ├── RtspDescriptor.java │ │ │ │ ├── RtspEntityMessage.java │ │ │ │ ├── RtspMedia.java │ │ │ │ ├── RtspMessage.java │ │ │ │ └── RtspMessageFactory.java │ │ │ ├── request/ │ │ │ │ ├── RtspDescribeRequest.java │ │ │ │ ├── RtspOptionsRequest.java │ │ │ │ ├── RtspPauseRequest.java │ │ │ │ ├── RtspPlayRequest.java │ │ │ │ ├── RtspRequest.java │ │ │ │ ├── RtspSetupRequest.java │ │ │ │ └── RtspTeardownRequest.java │ │ │ ├── response/ │ │ │ │ └── RtspResponse.java │ │ │ └── transport/ │ │ │ ├── TCPTransport.java │ │ │ └── TCPTransportListener.java │ │ └── server/ │ │ ├── RtspServer.java │ │ └── response/ │ │ ├── Parser.java │ │ ├── RtspAnnounceResponse.java │ │ ├── RtspDescribeResponse.java │ │ ├── RtspError.java │ │ ├── RtspOptionsResponse.java │ │ ├── RtspPauseResponse.java │ │ ├── RtspPlayResponse.java │ │ ├── RtspResponse.java │ │ ├── RtspResponseTeardown.java │ │ ├── RtspSetupResponse.java │ │ └── SDP.java │ └── rtspcamera/ │ ├── MediaConstants.java │ ├── RtspApiCodecsCamera.java │ └── RtspNativeCodecsCamera.java └── RtspViewer/ ├── .classpath ├── .gitignore ├── .project ├── AndroidManifest.xml ├── gpl.txt ├── proguard-project.txt ├── project.properties ├── res/ │ ├── layout/ │ │ └── videoview.xml │ └── values/ │ └── strings.xml └── src/ └── de/ └── kp/ └── rtspviewer/ └── RtspViewerActivity.java ================================================ FILE CONTENTS ================================================ ================================================ FILE: README.md ================================================ # RTSP-Camera-for-Android Android based RTSP Server which is able to serve live camera view to multiple RTSP clients, such as VLC. This project is not maintained anymore (in fact since 2end of 2012). It exists to share the code how to implement this back in the days. I've not tested out the following gitHub project on my own, but if you are looking for a more actual Android RTSP based solution, pls check out: * https://github.com/hypeapps/Endoscope thanks for all the fish (=PA=) ================================================ FILE: RtspCamera/.classpath ================================================ ================================================ FILE: RtspCamera/.gitignore ================================================ /bin /gen ================================================ FILE: RtspCamera/.project ================================================ RtspCamera com.android.ide.eclipse.adt.ResourceManagerBuilder com.android.ide.eclipse.adt.PreCompilerBuilder org.eclipse.jdt.core.javabuilder com.android.ide.eclipse.adt.ApkBuilder com.android.ide.eclipse.adt.AndroidNature org.eclipse.jdt.core.javanature ================================================ FILE: RtspCamera/.settings/org.jboss.ide.eclipse.as.core.prefs ================================================ eclipse.preferences.version=1 org.jboss.ide.eclipse.as.core.singledeployable.deployableList= ================================================ FILE: RtspCamera/AndroidManifest.xml ================================================ ================================================ FILE: RtspCamera/gpl.txt ================================================ GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . ================================================ FILE: RtspCamera/jni/Android.mk ================================================ include $(call all-subdir-makefiles) ================================================ FILE: RtspCamera/jni/Application.mk ================================================ APP_PROJECT_PATH := /arwa/git/RTSP-Camera-for-Android/RtspCamera APP_MODULES := libH264Decoder libH264Encoder libH263Encoder libH263Decoder ================================================ FILE: RtspCamera/jni/avc_h264/Android.mk ================================================ AVC_ROOT:= $(call my-dir) include $(call all-subdir-makefiles) ================================================ FILE: RtspCamera/jni/avc_h264/common/include/avcapi_common.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /** This file contains common type definitions and enumerations used by AVC encoder and decoder libraries which are exposed to the users. @publishedAll */ #ifndef AVCAPI_COMMON_H_INCLUDED #define AVCAPI_COMMON_H_INCLUDED // xxx pa deact PV_MEMORY_POOL for test #define PV_MEMORY_POOL /** This is common return status. @publishedAll */ typedef enum { AVC_NO_BUFFER = -2, AVC_MEMORY_FAIL = -1, AVC_FAIL = 0, AVC_SUCCESS = 1, AVC_PICTURE_OUTPUT_READY = 2 } AVCStatus; /** This enumeration is for profiles. The value follows the profile_idc in sequence parameter set rbsp. See Annex A. @publishedAll */ typedef enum { AVC_BASELINE = 66, AVC_MAIN = 77, AVC_EXTENDED = 88, AVC_HIGH = 100, AVC_HIGH10 = 110, AVC_HIGH422 = 122, AVC_HIGH444 = 144 } AVCProfile; /** This enumeration is for levels. The value follows the level_idc in sequence parameter set rbsp. See Annex A. @published All */ typedef enum { AVC_LEVEL_AUTO = 0, AVC_LEVEL1_B = 9, AVC_LEVEL1 = 10, AVC_LEVEL1_1 = 11, AVC_LEVEL1_2 = 12, AVC_LEVEL1_3 = 13, AVC_LEVEL2 = 20, AVC_LEVEL2_1 = 21, AVC_LEVEL2_2 = 22, AVC_LEVEL3 = 30, AVC_LEVEL3_1 = 31, AVC_LEVEL3_2 = 32, AVC_LEVEL4 = 40, AVC_LEVEL4_1 = 41, AVC_LEVEL4_2 = 42, AVC_LEVEL5 = 50, AVC_LEVEL5_1 = 51 } AVCLevel; /** This enumeration follows Table 7-1 for NAL unit type codes. This may go to avccommon_api.h later (external common). @publishedAll */ typedef enum { AVC_NALTYPE_SLICE = 1, /* non-IDR non-data partition */ AVC_NALTYPE_DPA = 2, /* data partition A */ AVC_NALTYPE_DPB = 3, /* data partition B */ AVC_NALTYPE_DPC = 4, /* data partition C */ AVC_NALTYPE_IDR = 5, /* IDR NAL */ AVC_NALTYPE_SEI = 6, /* supplemental enhancement info */ AVC_NALTYPE_SPS = 7, /* sequence parameter set */ AVC_NALTYPE_PPS = 8, /* picture parameter set */ AVC_NALTYPE_AUD = 9, /* access unit delimiter */ AVC_NALTYPE_EOSEQ = 10, /* end of sequence */ AVC_NALTYPE_EOSTREAM = 11, /* end of stream */ AVC_NALTYPE_FILL = 12 /* filler data */ } AVCNalUnitType; /** This enumeration specifies debug logging type. This may go to avccommon_api.h later (external common). @publishedAll */ typedef enum { AVC_LOGTYPE_ERROR = 0, AVC_LOGTYPE_WARNING = 1, AVC_LOGTYPE_INFO = 2 } AVCLogType; /** This enumerate the status of certain flags. @publishedAll */ typedef enum { AVC_OFF = 0, AVC_ON = 1 } AVCFlag; /** This structure contains input information. Note, this structure is identical to AVCDecOutput for now. */ typedef struct tagAVCFrameIO { /** A unique identification number for a particular instance of this structure. To remain unchanged by the application between the time when it is given to the library and the time when the library returns it back. */ uint32 id; /** Array of pointers to Y,Cb,Cr content in 4:2:0 format. For AVC decoding, this memory is allocated by the AVC decoder library. For AVC encoding, only the memory for original unencoded frame is allocated by the application. Internal memory is also allocated by the AVC encoder library. */ uint8 *YCbCr[3]; /** In/Out: Coded width of the luma component, it has to be multiple of 16. */ int pitch; /** In/Out: Coded height of the luma component, must be multiple of 16. */ int height; /** In/Out: Display width, less than picth */ int clip_width; /** In/Out: Display height, less than height */ int clip_height; /** Input: Origin of the display area [0]=>row, [1]=>column */ int clip_origin[2]; /** Output: Frame number in de/encoding order (not necessary)*/ uint32 coding_order; /** Output: Frame number in displaying order (this may or may not be associated with the POC at all!!!). */ uint32 disp_order; /** In/Out: Flag for use for reference or not. */ uint is_reference; /** In/Out: Coding timestamp in msec (not display timestamp) */ uint32 coding_timestamp; /* there could be something else here such as format, DON (decoding order number) if available thru SEI, etc. */ } AVCFrameIO; /** CALLBACK FUNCTION TO BE IMPLEMENTED BY APPLICATION */ /** In AVCDecControls structure, userData is a pointer to an object with the following member functions. */ /** @brief Decoded picture buffers (DPB) must be allocated or re-allocated before an IDR frame is decoded. If PV_MEMORY_POOL is not defined, AVC lib will allocate DPB internally which cannot be shared with the application. In that case, this function will not be called. @param userData The same value of userData in AVCHandle object. @param frame_size_in_mbs The size of each frame in number of macroblocks. @param num_frames The number of frames in DPB. @return 1 for success, 0 for fail (cannot allocate DPB) */ typedef int (*FunctionType_DPBAlloc)(void *userData, uint frame_size_in_mbs, uint num_buffers); /** @brief AVC library calls this function is reserve a memory of one frame from the DPB. Once reserved, this frame shall not be deleted or over-written by the app. @param userData The same value of userData in AVCHandle object. @param indx Index of a frame in DPB (AVC library keeps track of the index). @param yuv The address of the yuv pointer returned to the AVC lib. @return 1 for success, 0 for fail (no frames available to bind). */ typedef int (*FunctionType_FrameBind)(void *userData, int indx, uint8 **yuv); /** @brief AVC library calls this function once a bound frame is not needed for decoding operation (falls out of the sliding window, or marked unused for reference). @param userData The same value of userData in AVCHandle object. @param indx Index of frame to be unbound (AVC library keeps track of the index). @return none. */ typedef void (*FuctionType_FrameUnbind)(void *userData, int); /** Pointer to malloc function for general memory allocation, so that application can keep track of memory usage. \param "size" "Size of requested memory in bytes." \param "attribute" "Some value specifying types, priority, etc. of the memory." \return "The address of the allocated memory casted to int" */ typedef int (*FunctionType_Malloc)(void *userData, int32 size, int attribute); /** Function pointer to free \param "mem" "Pointer to the memory to be freed casted to int" \return "void" */ typedef void (*FunctionType_Free)(void *userData, int mem); /** Debug logging information is returned to the application thru this function. \param "type" "Type of logging message, see definition of AVCLogType." \param "string1" "Logging message." \param "string2" "To be defined." */ typedef void (*FunctionType_DebugLog)(uint32 *userData, AVCLogType type, char *string1, int val1, int val2); /** This structure has to be allocated and maintained by the user of the library. This structure is used as a handle to the library object. */ typedef struct tagAVCHandle { /** A pointer to the internal data structure. Users have to make sure that this value is NULL at the beginning. */ void *AVCObject; /** A pointer to user object which has the following member functions used for callback purpose. !!! */ void *userData; /** Pointers to functions implemented by the users of AVC library */ FunctionType_DPBAlloc CBAVC_DPBAlloc; FunctionType_FrameBind CBAVC_FrameBind; FuctionType_FrameUnbind CBAVC_FrameUnbind; FunctionType_Malloc CBAVC_Malloc; FunctionType_Free CBAVC_Free; FunctionType_DebugLog CBAVC_DebugLog; /** Flag to enable debugging */ uint32 debugEnable; } AVCHandle; #ifdef PVDEBUGMSG_LOG #define DEBUG_LOG(a,b,c,d,e) CBAVC_DebugLog(a,b,c,d,e) #else #define DEBUG_LOG(a,b,c,d,e) #endif #endif /* _AVCAPI_COMMON_H_ */ ================================================ FILE: RtspCamera/jni/avc_h264/common/include/avcint_common.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /** This file contains common code shared between AVC decoder and AVC encoder for internal use only. @publishedAll */ #ifndef AVCINT_COMMON_H_INCLUDED #define AVCINT_COMMON_H_INCLUDED #ifndef OSCL_TYPES_H_INCLUDED #include "oscl_types.h" #endif #ifndef AVCAPI_COMMON_H_INCLUDED #include "avcapi_common.h" #endif #ifndef TRUE #define TRUE 1 #define FALSE 0 #endif /** Mathematic functions defined in subclause 5.7. Can be replaced with assembly instructions for speedup. @publishedAll */ #define AVC_ABS(x) (((x)<0)? -(x) : (x)) #define AVC_SIGN(x) (((x)<0)? -1 : 1) #define AVC_SIGN0(x) (((x)<0)? -1 : (((x)>0) ? 1 : 0)) #define AVC_MAX(x,y) ((x)>(y)? (x):(y)) #define AVC_MIN(x,y) ((x)<(y)? (x):(y)) #define AVC_MEDIAN(A,B,C) ((A) > (B) ? ((A) < (C) ? (A) : (B) > (C) ? (B) : (C)): (B) < (C) ? (B) : (C) > (A) ? (C) : (A)) #define AVC_CLIP3(a,b,x) (AVC_MAX(a,AVC_MIN(x,b))) /* clip x between a and b */ #define AVC_CLIP(x) AVC_CLIP3(0,255,x) #define AVC_FLOOR(x) ((int)(x)) #define AVC_RASTER_SCAN(x,y,n) ((x)+(y)*(n)) #define AVC_ROUND(x) (AVC_SIGN(x)*AVC_FLOOR(AVC_ABS(x)+0.5)) #define AVC_INVERSE_RASTER_SCAN(a,b,c,d,e) (((e)==0)? (((a)%((d)/(b)))*(b)): (((a)/((d)/(b)))*(c))) /* a:block address, b:block width, c:block height, d:total_width, e:x or y coordinate */ #define DEFAULT_ATTR 0 /* default memory attribute */ #define FAST_MEM_ATTR 1 /* fast memory attribute */ /* This section is for definition of constants. */ #define MB_SIZE 16 #define BLOCK_SIZE 4 #define EMULATION_PREVENTION_THREE_BYTE 0x3 #define NUM_PIXELS_IN_MB (24*16) #define NUM_BLKS_IN_MB 24 #define AVCNumI4PredMode 9 #define AVCNumI16PredMode 4 #define AVCNumIChromaMode 4 /* constants used in the structures below */ #define MAXIMUMVALUEOFcpb_cnt 32 /* used in HRDParams */ #define MAX_NUM_REF_FRAMES_IN_PIC_ORDER_CNT_CYCLE 255 /* used in SeqParamSet */ #define MAX_NUM_SLICE_GROUP 8 /* used in PicParamSet */ #define MAX_REF_PIC_LIST_REORDERING 32 /* 32 is maximum according to Annex A, SliceHeader */ #define MAX_DEC_REF_PIC_MARKING 64 /* 64 is the maximum possible given the max num ref pictures to 31. */ #define MAX_FS (16+1) /* pre-defined size of frame store array */ #define MAX_LEVEL_IDX 15 /* only 15 levels defined for now */ #define MAX_REF_PIC_LIST 33 /* max size of the RefPicList0 and RefPicList1 */ /** Architectural related macros. @publishedAll */ #ifdef USE_PRED_BLOCK #define MB_BASED_DEBLOCK #endif /** Picture type, PV created. @publishedAll */ typedef enum { AVC_FRAME = 3 } AVCPictureType; /** This slice type follows Table 7-3. The bottom 5 items may not needed. @publishedAll */ typedef enum { AVC_P_SLICE = 0, AVC_B_SLICE = 1, AVC_I_SLICE = 2, AVC_SP_SLICE = 3, AVC_SI_SLICE = 4, AVC_P_ALL_SLICE = 5, AVC_B_ALL_SLICE = 6, AVC_I_ALL_SLICE = 7, AVC_SP_ALL_SLICE = 8, AVC_SI_ALL_SLICE = 9 } AVCSliceType; /** Types of the macroblock and partition. PV Created. @publishedAll */ typedef enum { /* intra */ AVC_I4, AVC_I16, AVC_I_PCM, AVC_SI4, /* inter for both P and B*/ AVC_BDirect16, AVC_P16, AVC_P16x8, AVC_P8x16, AVC_P8, AVC_P8ref0, AVC_SKIP } AVCMBMode; /** Enumeration for sub-macroblock mode, interpreted from sub_mb_type. @publishedAll */ typedef enum { /* for sub-partition mode */ AVC_BDirect8, AVC_8x8, AVC_8x4, AVC_4x8, AVC_4x4 } AVCSubMBMode; /** Mode of prediction of partition or sub-partition. PV Created. Do not change the order!!! Used in table look-up mode prediction in vlc.c. @publishedAll */ typedef enum { AVC_Pred_L0 = 0, AVC_Pred_L1, AVC_BiPred, AVC_Direct } AVCPredMode; /** Mode of intra 4x4 prediction. Table 8-2 @publishedAll */ typedef enum { AVC_I4_Vertical = 0, AVC_I4_Horizontal, AVC_I4_DC, AVC_I4_Diagonal_Down_Left, AVC_I4_Diagonal_Down_Right, AVC_I4_Vertical_Right, AVC_I4_Horizontal_Down, AVC_I4_Vertical_Left, AVC_I4_Horizontal_Up } AVCIntra4x4PredMode; /** Mode of intra 16x16 prediction. Table 8-3 @publishedAll */ typedef enum { AVC_I16_Vertical = 0, AVC_I16_Horizontal, AVC_I16_DC, AVC_I16_Plane } AVCIntra16x16PredMode; /** Mode of intra chroma prediction. Table 8-4 @publishedAll */ typedef enum { AVC_IC_DC = 0, AVC_IC_Horizontal, AVC_IC_Vertical, AVC_IC_Plane } AVCIntraChromaPredMode; /** Type of residual going to residual_block_cavlc function, PV created. @publishedAll */ typedef enum { AVC_Luma, AVC_Intra16DC, AVC_Intra16AC, AVC_ChromaDC, AVC_ChromaAC } AVCResidualType; /** This structure contains VUI parameters as specified in Annex E. Some variables may be removed from the structure if they are found to be useless to store. @publishedAll */ typedef struct tagHRDParams { uint cpb_cnt_minus1; /* ue(v), range 0..31 */ uint bit_rate_scale; /* u(4) */ uint cpb_size_scale; /* u(4) */ uint32 bit_rate_value_minus1[MAXIMUMVALUEOFcpb_cnt];/* ue(v), range 0..2^32-2 */ uint32 cpb_size_value_minus1[MAXIMUMVALUEOFcpb_cnt]; /* ue(v), range 0..2^32-2 */ uint cbr_flag[MAXIMUMVALUEOFcpb_cnt]; /* u(1) */ uint initial_cpb_removal_delay_length_minus1; /* u(5), default 23 */ uint cpb_removal_delay_length_minus1; /* u(5), default 23 */ uint dpb_output_delay_length_minus1; /* u(5), default 23 */ uint time_offset_length; /* u(5), default 24 */ } AVCHRDParams; /** This structure contains VUI parameters as specified in Annex E. Some variables may be removed from the structure if they are found to be useless to store. @publishedAll */ typedef struct tagVUIParam { uint aspect_ratio_info_present_flag; /* u(1) */ uint aspect_ratio_idc; /* u(8), table E-1 */ uint sar_width; /* u(16) */ uint sar_height; /* u(16) */ uint overscan_info_present_flag; /* u(1) */ uint overscan_appropriate_flag; /* u(1) */ uint video_signal_type_present_flag; /* u(1) */ uint video_format; /* u(3), Table E-2, default 5, unspecified */ uint video_full_range_flag; /* u(1) */ uint colour_description_present_flag; /* u(1) */ uint colour_primaries; /* u(8), Table E-3, default 2, unspecified */ uint transfer_characteristics; /* u(8), Table E-4, default 2, unspecified */ uint matrix_coefficients; /* u(8), Table E-5, default 2, unspecified */ uint chroma_location_info_present_flag; /* u(1) */ uint chroma_sample_loc_type_top_field; /* ue(v), Fig. E-1range 0..5, default 0 */ uint chroma_sample_loc_type_bottom_field; /* ue(v) */ uint timing_info_present_flag; /* u(1) */ uint num_units_in_tick; /* u(32), must be > 0 */ uint time_scale; /* u(32), must be > 0 */ uint fixed_frame_rate_flag; /* u(1), Eq. C-13 */ uint nal_hrd_parameters_present_flag; /* u(1) */ AVCHRDParams nal_hrd_parameters; /* hrd_paramters */ uint vcl_hrd_parameters_present_flag; /* u(1) */ AVCHRDParams vcl_hrd_parameters; /* hrd_paramters */ /* if ((nal_hrd_parameters_present_flag || (vcl_hrd_parameters_present_flag)) */ uint low_delay_hrd_flag; /* u(1) */ uint pic_struct_present_flag; uint bitstream_restriction_flag; /* u(1) */ uint motion_vectors_over_pic_boundaries_flag; /* u(1) */ uint max_bytes_per_pic_denom; /* ue(v), default 2 */ uint max_bits_per_mb_denom; /* ue(v), range 0..16, default 1 */ uint log2_max_mv_length_vertical; /* ue(v), range 0..16, default 16 */ uint log2_max_mv_length_horizontal; /* ue(v), range 0..16, default 16 */ uint max_dec_frame_reordering; /* ue(v) */ uint max_dec_frame_buffering; /* ue(v) */ } AVCVUIParams; /** This structure contains information in a sequence parameter set NAL. Some variables may be removed from the structure if they are found to be useless to store. @publishedAll */ typedef struct tagSeqParamSet { uint Valid; /* indicates the parameter set is valid */ uint profile_idc; /* u(8) */ uint constrained_set0_flag; /* u(1) */ uint constrained_set1_flag; /* u(1) */ uint constrained_set2_flag; /* u(1) */ uint constrained_set3_flag; /* u(1) */ uint level_idc; /* u(8) */ uint seq_parameter_set_id; /* ue(v), range 0..31 */ uint log2_max_frame_num_minus4; /* ue(v), range 0..12 */ uint pic_order_cnt_type; /* ue(v), range 0..2 */ /* if( pic_order_cnt_type == 0 ) */ uint log2_max_pic_order_cnt_lsb_minus4; /* ue(v), range 0..12 */ /* else if( pic_order_cnt_type == 1 ) */ uint delta_pic_order_always_zero_flag; /* u(1) */ int32 offset_for_non_ref_pic; /* se(v) */ int32 offset_for_top_to_bottom_field; /* se(v) */ uint num_ref_frames_in_pic_order_cnt_cycle; /* ue(v) , range 0..255 */ /* for( i = 0; i < num_ref_frames_in_pic_order_cnt_cycle; i++ ) */ int32 offset_for_ref_frame[MAX_NUM_REF_FRAMES_IN_PIC_ORDER_CNT_CYCLE]; /* se(v) */ uint num_ref_frames; /* ue(v), range 0..16 */ uint gaps_in_frame_num_value_allowed_flag; /* u(1) */ uint pic_width_in_mbs_minus1; /* ue(v) */ uint pic_height_in_map_units_minus1; /* ue(v) */ uint frame_mbs_only_flag; /* u(1) */ /* if( !frame_mbs_only_flag ) */ uint mb_adaptive_frame_field_flag; /* u(1) */ uint direct_8x8_inference_flag; /* u(1), must be 1 when frame_mbs_only_flag is 0 */ uint frame_cropping_flag; /* u(1) */ /* if( frmae_cropping_flag) */ uint frame_crop_left_offset; /* ue(v) */ uint frame_crop_right_offset; /* ue(v) */ uint frame_crop_top_offset; /* ue(v) */ uint frame_crop_bottom_offset; /* ue(v) */ uint vui_parameters_present_flag; /* u(1) */ // uint nal_hrd_parameters_present_flag; // uint vcl_hrd_parameters_present_flag; // AVCHRDParams *nal_hrd_parameters; // AVCHRDParams *vcl_hrd_parameters; AVCVUIParams vui_parameters; /* AVCVUIParam */ } AVCSeqParamSet; /** This structure contains information in a picture parameter set NAL. Some variables may be removed from the structure if they are found to be useless to store. @publishedAll */ typedef struct tagPicParamSet { uint pic_parameter_set_id; /* ue(v), range 0..255 */ uint seq_parameter_set_id; /* ue(v), range 0..31 */ uint entropy_coding_mode_flag; /* u(1) */ uint pic_order_present_flag; /* u(1) */ uint num_slice_groups_minus1; /* ue(v), range in Annex A */ /* if( num_slice_groups_minus1 > 0) */ uint slice_group_map_type; /* ue(v), range 0..6 */ /* if( slice_group_map_type = = 0 ) */ /* for(0:1:num_slice_groups_minus1) */ uint run_length_minus1[MAX_NUM_SLICE_GROUP]; /* ue(v) */ /* else if( slice_group_map_type = = 2 ) */ /* for(0:1:num_slice_groups_minus1-1) */ uint top_left[MAX_NUM_SLICE_GROUP-1]; /* ue(v) */ uint bottom_right[MAX_NUM_SLICE_GROUP-1]; /* ue(v) */ /* else if( slice_group_map_type = = 3 || 4 || 5 */ uint slice_group_change_direction_flag; /* u(1) */ uint slice_group_change_rate_minus1; /* ue(v) */ /* else if( slice_group_map_type = = 6 ) */ uint pic_size_in_map_units_minus1; /* ue(v) */ /* for(0:1:pic_size_in_map_units_minus1) */ uint *slice_group_id; /* complete MBAmap u(v) */ uint num_ref_idx_l0_active_minus1; /* ue(v), range 0..31 */ uint num_ref_idx_l1_active_minus1; /* ue(v), range 0..31 */ uint weighted_pred_flag; /* u(1) */ uint weighted_bipred_idc; /* u(2), range 0..2 */ int pic_init_qp_minus26; /* se(v), range -26..25 */ int pic_init_qs_minus26; /* se(v), range -26..25 */ int chroma_qp_index_offset; /* se(v), range -12..12 */ uint deblocking_filter_control_present_flag; /* u(1) */ uint constrained_intra_pred_flag; /* u(1) */ uint redundant_pic_cnt_present_flag; /* u(1) */ } AVCPicParamSet; /** This structure contains slice header information. Some variables may be removed from the structure if they are found to be useless to store. @publishedAll */ typedef struct tagSliceHeader { uint first_mb_in_slice; /* ue(v) */ AVCSliceType slice_type; /* ue(v), Table 7-3, range 0..9 */ uint pic_parameter_set_id; /* ue(v), range 0..255 */ uint frame_num; /* u(v), see log2max_frame_num_minus4 */ /* if( !frame_mbs_only_flag) */ uint field_pic_flag; /* u(1) */ /* if(field_pic_flag) */ uint bottom_field_flag; /* u(1) */ /* if(nal_unit_type == 5) */ uint idr_pic_id; /* ue(v), range 0..65535 */ /* if(pic_order_cnt_type==0) */ uint pic_order_cnt_lsb; /* u(v), range 0..MaxPicOrderCntLsb-1 */ /* if(pic_order_present_flag && !field_pic_flag) */ int32 delta_pic_order_cnt_bottom; /* se(v) */ /* if(pic_order_cnt_type==1 && !delta_pic_order_always_zero_flag) */ /* if(pic_order_present_flag && !field_pic_flag) */ int32 delta_pic_order_cnt[2]; /* if(redundant_pic_cnt_present_flag) */ uint redundant_pic_cnt; /* ue(v), range 0..127 */ /* if(slice_type == B) */ uint direct_spatial_mv_pred_flag; /* u(1) */ /* if(slice_type == P || slice_type==SP || slice_type==B) */ uint num_ref_idx_active_override_flag; /* u(1) */ /* if(num_ref_idx_active_override_flag) */ uint num_ref_idx_l0_active_minus1; /* ue(v) */ /* if(slie_type == B) */ uint num_ref_idx_l1_active_minus1; /* ue(v) */ /* ref_pic_list_reordering() */ uint ref_pic_list_reordering_flag_l0; /* u(1) */ uint reordering_of_pic_nums_idc_l0[MAX_REF_PIC_LIST_REORDERING]; /* ue(v), range 0..3 */ uint abs_diff_pic_num_minus1_l0[MAX_REF_PIC_LIST_REORDERING]; /* ue(v) */ uint long_term_pic_num_l0[MAX_REF_PIC_LIST_REORDERING]; /* ue(v) */ uint ref_pic_list_reordering_flag_l1; /* u(1) */ uint reordering_of_pic_nums_idc_l1[MAX_REF_PIC_LIST_REORDERING]; /* ue(v), range 0..3 */ uint abs_diff_pic_num_minus1_l1[MAX_REF_PIC_LIST_REORDERING]; /* ue(v) */ uint long_term_pic_num_l1[MAX_REF_PIC_LIST_REORDERING]; /* ue(v) */ /* end ref_pic_list_reordering() */ /* if(nal_ref_idc!=0) */ /* dec_ref_pic_marking() */ uint no_output_of_prior_pics_flag; /* u(1) */ uint long_term_reference_flag; /* u(1) */ uint adaptive_ref_pic_marking_mode_flag; /* u(1) */ uint memory_management_control_operation[MAX_DEC_REF_PIC_MARKING]; /* ue(v), range 0..6 */ uint difference_of_pic_nums_minus1[MAX_DEC_REF_PIC_MARKING]; /* ue(v) */ uint long_term_pic_num[MAX_DEC_REF_PIC_MARKING]; /* ue(v) */ uint long_term_frame_idx[MAX_DEC_REF_PIC_MARKING]; /* ue(v) */ uint max_long_term_frame_idx_plus1[MAX_DEC_REF_PIC_MARKING]; /* ue(v) */ /* end dec_ref_pic_marking() */ /* if(entropy_coding_mode_flag && slice_type!=I && slice_type!=SI) */ uint cabac_init_idc; /* ue(v), range 0..2 */ int slice_qp_delta; /* se(v), range 0..51 */ /* if(slice_type==SP || slice_type==SI) */ /* if(slice_type==SP) */ uint sp_for_switch_flag; /* u(1) */ int slice_qs_delta; /* se(v) */ /* if(deblocking_filter_control_present_flag)*/ uint disable_deblocking_filter_idc; /* ue(v), range 0..2 */ /* if(disable_deblocking_filter_idc!=1) */ int slice_alpha_c0_offset_div2; /* se(v), range -6..6, default 0 */ int slice_beta_offset_div_2; /* se(v), range -6..6, default 0 */ /* if(num_slice_groups_minus1>0 && slice_group_map_type>=3 && slice_group_map_type<=5)*/ uint slice_group_change_cycle; /* u(v), use ceil(log2(PicSizeInMapUnits/SliceGroupChangeRate + 1)) bits*/ } AVCSliceHeader; /** This struct contains information about the neighboring pixel. @publishedAll */ typedef struct tagPixPos { int available; int mb_addr; /* macroblock address of the current pixel, see below */ int x; /* x,y positions of current pixel relative to the macroblock mb_addr */ int y; int pos_x; /* x,y positions of current pixel relative to the picture. */ int pos_y; } AVCPixelPos; typedef struct tagNeighborAvailability { int left; int top; /* macroblock address of the current pixel, see below */ int top_right; /* x,y positions of current pixel relative to the macroblock mb_addr */ } AVCNeighborAvailability; /** This structure contains picture data and related information necessary to be used as reference frame. @publishedAll */ typedef struct tagPictureData { uint16 RefIdx; /* index used for reference frame */ uint8 *Sl; /* derived from base_dpb in AVCFrameStore */ uint8 *Scb; /* for complementary fields, YUV are interlaced */ uint8 *Scr; /* Sl of top_field and bottom_fields will be one line apart and the stride will be 2 times the width. */ /* For non-complementary field, the above still applies. A special output formatting is required. */ /* Then, necessary variables that need to be stored */ AVCPictureType picType; /* frame, top-field or bot-field */ /*bool*/ uint isReference; /*bool*/ uint isLongTerm; int PicOrderCnt; int PicNum; int LongTermPicNum; int width; /* how many pixel per line */ int height;/* how many line */ int pitch; /* how many pixel between the line */ uint padded; /* flag for being padded */ } AVCPictureData; /** This structure contains information for frame storage. @publishedAll */ typedef struct tagFrameStore { uint8 *base_dpb; /* base pointer for the YCbCr */ int IsReference; /* 0=not used for ref; 1=top used; 2=bottom used; 3=both fields (or frame) used */ int IsLongTerm; /* 0=not used for ref; 1=top used; 2=bottom used; 3=both fields (or frame) used */ /* if IsLongTerm is true, IsReference can be ignored. */ /* if IsReference is true, IsLongterm will be checked for short-term or long-term. */ /* IsUsed must be true to enable the validity of IsReference and IsLongTerm */ int IsOutputted; /* has it been outputted via AVCDecGetOutput API, then don't output it again, wait until it is returned. */ AVCPictureData frame; int FrameNum; int FrameNumWrap; int LongTermFrameIdx; int PicOrderCnt; /* of the frame, smaller of the 2 fields */ } AVCFrameStore; /** This structure maintains the actual memory for the decoded picture buffer (DPB) which is allocated at the beginning according to profile/level. Once decoded_picture_buffer is allocated, Sl,Scb,Scr in AVCPictureData structure just point to the address in decoded_picture_buffer. used_size maintains the used space. NOTE:: In order to maintain contiguous memory space, memory equal to a single frame is assigned at a time. Two opposite fields reside in the same frame memory. |-------|---|---|---|xxx|-------|xxx|---|-------| decoded_picture_buffer frame top bot top frame bot frame 0 1 1 2 3 4 5 bot 2 and top 4 do not exist, the memory is not used. @publishedAll */ typedef struct tagDecPicBuffer { uint8 *decoded_picture_buffer; /* actual memory */ uint32 dpb_size; /* size of dpb in bytes */ uint32 used_size; /* used size */ struct tagFrameStore *fs[MAX_FS]; /* list of frame stored, actual buffer */ int num_fs; /* size of fs */ } AVCDecPicBuffer; /** This structure contains macroblock related variables. @publishedAll */ typedef struct tagMacroblock { AVCIntraChromaPredMode intra_chroma_pred_mode; /* ue(v) */ int32 mvL0[16]; /* motion vectors, 16 bit packed (x,y) per element */ int32 mvL1[16]; int16 ref_idx_L0[4]; int16 ref_idx_L1[4]; uint16 RefIdx[4]; /* ref index, has value of AVCPictureData->RefIdx */ /* stored data */ /*bool*/ uint mb_intra; /* intra flag */ /*bool*/ uint mb_bottom_field; AVCMBMode mbMode; /* type of MB prediction */ AVCSubMBMode subMbMode[4]; /* for each 8x8 partition */ uint CBP; /* CodeBlockPattern */ AVCIntra16x16PredMode i16Mode; /* Intra16x16PredMode */ AVCIntra4x4PredMode i4Mode[16]; /* Intra4x4PredMode, in raster scan order */ int NumMbPart; /* number of partition */ AVCPredMode MBPartPredMode[4][4]; /* prediction mode [MBPartIndx][subMBPartIndx] */ int MbPartWidth; int MbPartHeight; int NumSubMbPart[4]; /* for each 8x8 partition */ int SubMbPartWidth[4]; /* for each 8x8 partition */ int SubMbPartHeight[4]; /* for each 8x8 partition */ uint8 nz_coeff[NUM_BLKS_IN_MB]; /* [blk_y][blk_x], Chroma is [4..5][0...3], see predict_nnz() function */ int QPy; /* Luma QP */ int QPc; /* Chroma QP */ int QSc; /* Chroma QP S-picture */ int slice_id; // MC slice } AVCMacroblock; /** This structure contains common internal variables between the encoder and decoder such that some functions can be shared among them. @publishedAll */ typedef struct tagCommonObj { /* put these 2 up here to make sure they are word-aligned */ int16 block[NUM_PIXELS_IN_MB]; /* for transformed residue coefficient */ uint8 *pred_block; /* pointer to prediction block, could point to a frame */ #ifdef USE_PRED_BLOCK uint8 pred[688]; /* for prediction */ /* Luma [0-399], Cb [400-543], Cr[544-687] */ #endif int pred_pitch; /* either equal to 20 or to frame pitch */ /* temporary buffers for intra prediction */ /* these variables should remain inside fast RAM */ #ifdef MB_BASED_DEBLOCK uint8 *intra_pred_top; /* a row of pixel for intra prediction */ uint8 intra_pred_left[17]; /* a column of pixel for intra prediction */ uint8 *intra_pred_top_cb; uint8 intra_pred_left_cb[9]; uint8 *intra_pred_top_cr; uint8 intra_pred_left_cr[9]; #endif /* pointer to the prediction area for intra prediction */ uint8 *pintra_pred_top; /* pointer to the top intra prediction value */ uint8 *pintra_pred_left; /* pointer to the left intra prediction value */ uint8 intra_pred_topleft; /* the [-1,-1] neighboring pixel */ uint8 *pintra_pred_top_cb; uint8 *pintra_pred_left_cb; uint8 intra_pred_topleft_cb; uint8 *pintra_pred_top_cr; uint8 *pintra_pred_left_cr; uint8 intra_pred_topleft_cr; int QPy; int QPc; int QPy_div_6; int QPy_mod_6; int QPc_div_6; int QPc_mod_6; /**** nal_unit ******/ /* previously in AVCNALUnit format */ uint NumBytesInRBSP; int forbidden_bit; int nal_ref_idc; AVCNalUnitType nal_unit_type; AVCNalUnitType prev_nal_unit_type; /*bool*/ uint slice_data_partitioning; /* flag when nal_unit_type is between 2 and 4 */ /**** ******** ******/ AVCSliceType slice_type; AVCDecPicBuffer *decPicBuf; /* decoded picture buffer */ AVCSeqParamSet *currSeqParams; /* the currently used one */ AVCPicParamSet *currPicParams; /* the currently used one */ uint seq_parameter_set_id; /* slice header */ AVCSliceHeader *sliceHdr; /* slice header param syntax variables */ AVCPictureData *currPic; /* pointer to current picture */ AVCFrameStore *currFS; /* pointer to current frame store */ AVCPictureType currPicType; /* frame, top-field or bot-field */ /*bool*/ uint newPic; /* flag for new picture */ uint newSlice; /* flag for new slice */ AVCPictureData *prevRefPic; /* pointer to previous picture */ AVCMacroblock *mblock; /* array of macroblocks covering entire picture */ AVCMacroblock *currMB; /* pointer to current macroblock */ uint mbNum; /* number of current MB */ int mb_x; /* x-coordinate of the current mbNum */ int mb_y; /* y-coordinate of the current mbNum */ /* For internal operation, scratch memory for MV, prediction, transform, etc.*/ uint32 cbp4x4; /* each bit represent nonzero 4x4 block in reverse raster scan order */ /* starting from luma, Cb and Cr, lsb toward msb */ int mvd_l0[4][4][2]; /* [mbPartIdx][subMbPartIdx][compIdx], se(v) */ int mvd_l1[4][4][2]; /* [mbPartIdx][subMbPartIdx][compIdx], se(v) */ int mbAddrA, mbAddrB, mbAddrC, mbAddrD; /* address of neighboring MBs */ /*bool*/ uint mbAvailA, mbAvailB, mbAvailC, mbAvailD; /* availability */ /*bool*/ uint intraAvailA, intraAvailB, intraAvailC, intraAvailD; /* for intra mode */ /***********************************************/ /* The following variables are defined in the draft. */ /* They may need to be stored in PictureData structure and used for reference. */ /* In that case, just move or copy it to AVCDecPictureData structure. */ int padded_size; /* size of extra padding to a frame */ uint MaxFrameNum; /*2^(log2_max_frame_num_minus4+4), range 0.. 2^16-1 */ uint MaxPicOrderCntLsb; /*2^(log2_max_pic_order_cnt_lsb_minus4+4), 0..2^16-1 */ uint PicWidthInMbs; /*pic_width_in_mbs_minus1+1 */ uint PicWidthInSamplesL; /* PicWidthInMbs*16 */ uint PicWidthInSamplesC; /* PicWIdthInMbs*8 */ uint PicHeightInMapUnits; /* pic_height_in_map_units_minus1+1 */ uint PicSizeInMapUnits; /* PicWidthInMbs*PicHeightInMapUnits */ uint FrameHeightInMbs; /*(2-frame_mbs_only_flag)*PicHeightInMapUnits */ uint SliceGroupChangeRate; /* slice_group_change_rate_minus1 + 1 */ /* access unit */ uint primary_pic_type; /* u(3), Table 7-2, kinda informative only */ /* slice data partition */ uint slice_id; /* ue(v) */ uint UnusedShortTermFrameNum; uint PrevRefFrameNum; uint MbaffFrameFlag; /* (mb_adaptive_frame_field_flag && !field_pic_flag) */ uint PicHeightInMbs; /* FrameHeightInMbs/(1+field_pic_flag) */ int PicHeightInSamplesL; /* PicHeightInMbs*16 */ int PicHeightInSamplesC; /* PicHeightInMbs*8 */ uint PicSizeInMbs; /* PicWidthInMbs*PicHeightInMbs */ uint level_idc; int numMBs; uint MaxPicNum; uint CurrPicNum; int QSy; /* 26+pic_init_qp_minus26+slice_qs_delta */ int FilterOffsetA; int FilterOffsetB; uint MapUnitsInSliceGroup0; /* Min(slie_group_change_cycle*SliceGroupChangeRate,PicSizeInMapUnits) */ /* dec_ref_pic_marking */ int MaxLongTermFrameIdx; int LongTermFrameIdx; /* POC related variables */ /*bool*/ uint mem_mgr_ctrl_eq_5; /* if memory_management_control_operation equal to 5 flag */ int PicOrderCnt; int BottomFieldOrderCnt, TopFieldOrderCnt; /* POC mode 0 */ int prevPicOrderCntMsb; uint prevPicOrderCntLsb; int PicOrderCntMsb; /* POC mode 1 */ int prevFrameNumOffset, FrameNumOffset; uint prevFrameNum; int absFrameNum; int picOrderCntCycleCnt, frameNumInPicOrderCntCycle; int expectedDeltaPerPicOrderCntCycle; int expectedPicOrderCnt; /* FMO */ int *MbToSliceGroupMap; /* to be re-calculate at the beginning */ /* ref pic list */ AVCPictureData *RefPicList0[MAX_REF_PIC_LIST]; /* list 0 */ AVCPictureData *RefPicList1[MAX_REF_PIC_LIST]; /* list 1 */ AVCFrameStore *refFrameList0ShortTerm[32]; AVCFrameStore *refFrameList1ShortTerm[32]; AVCFrameStore *refFrameListLongTerm[32]; int refList0Size; int refList1Size; /* slice data semantics*/ int mb_skip_run; /* ue(v) */ /*uint mb_skip_flag;*/ /* ae(v) */ /* uint end_of_slice_flag;*//* ae(v) */ /***********************************************/ /* function pointers */ int (*is_short_ref)(AVCPictureData *s); int (*is_long_ref)(AVCPictureData *s); } AVCCommonObj; /** Commonly used constant arrays. @publishedAll */ /** Zigzag scan from 1-D to 2-D. */ const static uint8 ZZ_SCAN[16] = {0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15}; /* Zigzag scan from 1-D to 2-D output to block[24][16]. */ const static uint8 ZZ_SCAN_BLOCK[16] = {0, 1, 16, 32, 17, 2, 3, 18, 33, 48, 49, 34, 19, 35, 50, 51}; /** From zigzag to raster for luma DC value */ const static uint8 ZIGZAG2RASTERDC[16] = {0, 4, 64, 128, 68, 8, 12, 72, 132, 192, 196, 136, 76, 140, 200, 204}; /** Mapping from coding scan block indx to raster scan block index */ const static int blkIdx2blkX[16] = {0, 1, 0, 1, 2, 3, 2, 3, 0, 1, 0, 1, 2, 3, 2, 3}; const static int blkIdx2blkY[16] = {0, 0, 1, 1, 0, 0, 1, 1, 2, 2, 3, 3, 2, 2, 3, 3}; /** from [blk8indx][blk4indx] to raster scan index */ const static int blkIdx2blkXY[4][4] = {{0, 1, 4, 5}, {2, 3, 6, 7}, {8, 9, 12, 13}, {10, 11, 14, 15}}; /* Availability of the neighboring top-right block relative to the current block. */ const static int BlkTopRight[16] = {2, 2, 2, 3, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0}; /** Table 8-13 Specification of QPc as a function of qPI. */ const static uint8 mapQPi2QPc[52] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 29, 30, 31, 32, 32, 33, 34, 34, 35, 35, 36, 36, 37, 37, 37, 38, 38, 38, 39, 39, 39, 39 }; /** See 8.5.5 equation (8-252 and 8-253) the definition of v matrix. */ /* in zigzag scan */ const static int dequant_coefres[6][16] = { {10, 13, 13, 10, 16, 10, 13, 13, 13, 13, 16, 10, 16, 13, 13, 16}, {11, 14, 14, 11, 18, 11, 14, 14, 14, 14, 18, 11, 18, 14, 14, 18}, {13, 16, 16, 13, 20, 13, 16, 16, 16, 16, 20, 13, 20, 16, 16, 20}, {14, 18, 18, 14, 23, 14, 18, 18, 18, 18, 23, 14, 23, 18, 18, 23}, {16, 20, 20, 16, 25, 16, 20, 20, 20, 20, 25, 16, 25, 20, 20, 25}, {18, 23, 23, 18, 29, 18, 23, 23, 23, 23, 29, 18, 29, 23, 23, 29} }; /** From jm7.6 block.c. (in zigzag scan) */ const static int quant_coef[6][16] = { {13107, 8066, 8066, 13107, 5243, 13107, 8066, 8066, 8066, 8066, 5243, 13107, 5243, 8066, 8066, 5243}, {11916, 7490, 7490, 11916, 4660, 11916, 7490, 7490, 7490, 7490, 4660, 11916, 4660, 7490, 7490, 4660}, {10082, 6554, 6554, 10082, 4194, 10082, 6554, 6554, 6554, 6554, 4194, 10082, 4194, 6554, 6554, 4194}, {9362, 5825, 5825, 9362, 3647, 9362, 5825, 5825, 5825, 5825, 3647, 9362, 3647, 5825, 5825, 3647}, {8192, 5243, 5243, 8192, 3355, 8192, 5243, 5243, 5243, 5243, 3355, 8192, 3355, 5243, 5243, 3355}, {7282, 4559, 4559, 7282, 2893, 7282, 4559, 4559, 4559, 4559, 2893, 7282, 2893, 4559, 4559, 2893} }; /** Convert scan from raster scan order to block decoding order and from block decoding order to raster scan order. Same table!!! */ const static uint8 ras2dec[16] = {0, 1, 4, 5, 2, 3, 6, 7, 8, 9, 12, 13, 10, 11, 14, 15}; /* mapping from level_idc to index map */ const static uint8 mapLev2Idx[61] = {255, 255, 255, 255, 255, 255, 255, 255, 255, 1, 0, 1, 2, 3, 255, 255, 255, 255, 255, 255, 4, 5, 6, 255, 255, 255, 255, 255, 255, 255, 7, 8, 9, 255, 255, 255, 255, 255, 255, 255, 10, 11, 12, 255, 255, 255, 255, 255, 255, 255, 13, 14, 255, 255, 255, 255, 255, 255, 255, 255 }; /* map back from index to Level IDC */ const static uint8 mapIdx2Lev[MAX_LEVEL_IDX] = {10, 11, 12, 13, 20, 21, 22, 30, 31, 32, 40, 41, 42, 50, 51}; /** from the index map to the MaxDPB value times 2 */ const static int32 MaxDPBX2[MAX_LEVEL_IDX] = {297, 675, 1782, 1782, 1782, 3564, 6075, 6075, 13500, 15360, 24576, 24576, 24576, 82620, 138240 }; /* map index to the max frame size */ const static int MaxFS[MAX_LEVEL_IDX] = {99, 396, 396, 396, 396, 792, 1620, 1620, 3600, 5120, 8192, 8192, 8192, 22080, 36864 }; /* map index to max MB processing rate */ const static int32 MaxMBPS[MAX_LEVEL_IDX] = {1485, 3000, 6000, 11880, 11880, 19800, 20250, 40500, 108000, 216000, 245760, 245760, 491520, 589824, 983040 }; /* map index to max video bit rate */ const static uint32 MaxBR[MAX_LEVEL_IDX] = {64, 192, 384, 768, 2000, 4000, 4000, 10000, 14000, 20000, 20000, 50000, 50000, 135000, 240000 }; /* map index to max CPB size */ const static uint32 MaxCPB[MAX_LEVEL_IDX] = {175, 500, 1000, 2000, 2000, 4000, 4000, 10000, 14000, 20000, 25000, 62500, 62500, 135000, 240000 }; /* map index to max vertical MV range */ const static int MaxVmvR[MAX_LEVEL_IDX] = {64, 128, 128, 128, 128, 256, 256, 256, 512, 512, 512, 512, 512, 512, 512}; #endif /* _AVCINT_COMMON_H_ */ ================================================ FILE: RtspCamera/jni/avc_h264/common/include/avclib_common.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /** This file contains declarations of internal functions for common encoder/decoder library. @publishedAll */ #ifndef AVCCOMMON_LIB_H_INCLUDED #define AVCCOMMON_LIB_H_INCLUDED #ifndef AVCINT_COMMON_H_INCLUDED #include "avcint_common.h" #endif #ifndef OSCL_BASE_H_INCLUDED #include "oscl_base.h" #endif /*----------- deblock.c --------------*/ /** This function performs conditional deblocking on a complete picture. \param "video" "Pointer to AVCCommonObj." \return "AVC_SUCCESS for success and AVC_FAIL otherwise." */ OSCL_IMPORT_REF AVCStatus DeblockPicture(AVCCommonObj *video); /** This function performs MB-based deblocking when MB_BASED_DEBLOCK is defined at compile time. \param "video" "Pointer to AVCCommonObj." \return "AVC_SUCCESS for success and AVC_FAIL otherwise." */ void MBInLoopDeblock(AVCCommonObj *video); /*---------- dpb.c --------------------*/ /** This function is called everytime a new sequence is detected. \param "avcHandle" "Pointer to AVCHandle." \param "video" "Pointer to AVCCommonObj." \param "padding" "Flag specifying whether padding in luma component is needed (used for encoding)." \return "AVC_SUCCESS or AVC_FAIL." */ OSCL_IMPORT_REF AVCStatus AVCConfigureSequence(AVCHandle *avcHandle, AVCCommonObj *video, bool padding); /** This function allocates and initializes the decoded picture buffer structure based on the profile and level for the first sequence parameter set. Currently, it does not allow changing in profile/level for subsequent SPS. \param "avcHandle" "Pointer to AVCHandle." \param "video" "Pointer to AVCCommonObj." \param "FrameHeightInMbs" "Height of the frame in the unit of MBs." \param "PicWidthInMbs" "Width of the picture in the unit of MBs." \param "padding" "Flag specifying whether padding in luma component is needed (used for encoding)." \return "AVC_SUCCESS or AVC_FAIL." */ AVCStatus InitDPB(AVCHandle *avcHandle, AVCCommonObj *video, int FrameHeightInMbs, int PicWidthInMbs, bool padding); /** This function frees the DPB memory. \param "avcHandle" "Pointer to AVCHandle." \param "video" "Pointer to AVCCommonObj." \return "AVC_SUCCESS or AVC_FAIL." */ OSCL_IMPORT_REF AVCStatus CleanUpDPB(AVCHandle *avcHandle, AVCCommonObj *video); /** This function finds empty frame in the decoded picture buffer to be used for the current picture, initializes the corresponding picture structure with Sl, Scb, Scr, width, height and pitch. \param "avcHandle" "Pointer to the main handle object." \param "video" "Pointer to AVCCommonObj." \return "AVC_SUCCESS or AVC_FAIL." */ OSCL_IMPORT_REF AVCStatus DPBInitBuffer(AVCHandle *avcHandle, AVCCommonObj *video); /** This function finds empty frame in the decoded picture buffer to be used for the current picture, initializes the corresponding picture structure with Sl, Scb, Scr, width, height and pitch. \param "video" "Pointer to AVCCommonObj." \param "CurrPicNum" "Current picture number (only used in decoder)." \return "AVC_SUCCESS or AVC_FAIL." */ OSCL_IMPORT_REF void DPBInitPic(AVCCommonObj *video, int CurrPicNum); /** This function releases the current frame back to the available pool for skipped frame after encoding. \param "avcHandle" "Pointer to the main handle object." \param "video" "Pointer to the AVCCommonObj." \return "void." */ OSCL_IMPORT_REF void DPBReleaseCurrentFrame(AVCHandle *avcHandle, AVCCommonObj *video); /** This function performs decoded reference picture marking process and store the current picture to the corresponding frame storage in the decoded picture buffer. \param "avcHandle" "Pointer to the main handle object." \param "video" "Pointer to the AVCCommonObj." \return "AVC_SUCCESS or AVC_FAIL." */ OSCL_IMPORT_REF AVCStatus StorePictureInDPB(AVCHandle *avcHandle, AVCCommonObj *video); /** This function perform sliding window operation on the reference picture lists, see subclause 8.2.5.3. It removes short-term ref frames with smallest FrameNumWrap from the reference list. \param "avcHandle" "Pointer to the main handle object." \param "video" "Pointer to the AVCCommonObj." \param "dpb" "Pointer to the AVCDecPicBuffer." \return "AVC_SUCCESS or AVC_FAIL (contradicting values or scenario as in the Note in the draft)." */ AVCStatus sliding_window_process(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb); /** This function perform adaptive memory marking operation on the reference picture lists, see subclause 8.2.5.4. It calls other functions for specific operations. \param "video" "Pointer to the AVCCommonObj." \param "dpb" "Pointer to the AVCDecPicBuffer." \param "sliceHdr" "Pointer to the AVCSliceHeader." \return "AVC_SUCCESS or AVC_FAIL (contradicting values or scenario as in the Note in the draft)." */ AVCStatus adaptive_memory_marking(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb, AVCSliceHeader *sliceHdr); /** This function performs memory management control operation 1, marking a short-term picture as unused for reference. See subclause 8.2.5.4.1. \param "video" "Pointer to the AVCCommonObj." \param "dpb" "Pointer to the AVCDecPicBuffer." \param "difference_of_pic_nums_minus1" "From the syntax in dec_ref_pic_marking()." */ void MemMgrCtrlOp1(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb, int difference_of_pic_nums_minus1); /** This function performs memory management control operation 2, marking a long-term picture as unused for reference. See subclause 8.2.5.4.2. \param "dpb" "Pointer to the AVCDecPicBuffer." \param "field_pic_flag" "Flag whether the current picture is field or not." \param "long_term_pic_num" "From the syntax in dec_ref_pic_marking()." */ void MemMgrCtrlOp2(AVCHandle *avcHandle, AVCDecPicBuffer *dpb, int long_term_pic_num); /** This function performs memory management control operation 3, assigning a LongTermFrameIdx to a short-term reference picture. See subclause 8.2.5.4.3. \param "video" "Pointer to the AVCCommonObj." \param "dpb" "Pointer to the AVCDecPicBuffer." \param "difference_of_pic_nums_minus1" "From the syntax in dec_ref_pic_marking()." \param "long_term_pic_num" "From the syntax in dec_ref_pic_marking()." */ void MemMgrCtrlOp3(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb, uint difference_of_pic_nums_minus1, uint long_term_frame_idx); /** This function performs memory management control operation 4, getting new MaxLongTermFrameIdx. See subclause 8.2.5.4.4. \param "video" "Pointer to the AVCCommonObj." \param "dpb" "Pointer to the AVCDecPicBuffer." \param "max_long_term_frame_idx_plus1" "From the syntax in dec_ref_pic_marking()." */ void MemMgrCtrlOp4(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb, uint max_long_term_frame_idx_plus1); /** This function performs memory management control operation 5, marking all reference pictures as unused for reference and set MaxLongTermFrameIdx to no long-termframe indices. See subclause 8.2.5.4.5. \param "video" "Pointer to the AVCCommonObj." \param "dpb" "Pointer to the AVCDecPicBuffer." */ void MemMgrCtrlOp5(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb); /** This function performs memory management control operation 6, assigning a long-term frame index to the current picture. See subclause 8.2.5.4.6. \param "video" "Pointer to the AVCCommonObj." \param "dpb" "Pointer to the AVCDecPicBuffer." \param "long_term_frame_idx" "From the syntax in dec_ref_pic_marking()." */ void MemMgrCtrlOp6(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb, uint long_term_frame_idx); /** This function mark a long-term ref frame with a specific frame index as unused for reference. \param "dpb" "Pointer to the AVCDecPicBuffer." \param "long_term_frame_idx" "To look for" */ void unmark_long_term_frame_for_reference_by_frame_idx(AVCHandle *avcHandle, AVCDecPicBuffer *dpb, uint long_term_frame_idx); /** This function mark a long-term ref field with a specific frame index as unused for reference except a frame that contains a picture with picNumX. \param "dpb" "Pointer to the AVCDecPicBuffer." \param "long_term_frame_idx" "To look for." \param "picNumX" "To look for." */ void unmark_long_term_field_for_reference_by_frame_idx(AVCCommonObj *video, AVCDecPicBuffer *dpb, uint long_term_frame_indx, int picNumX); /** This function mark a frame to unused for reference. \param "fs" "Pointer to AVCFrameStore to be unmarked." */ void unmark_for_reference(AVCHandle *avcHandle, AVCDecPicBuffer *dpb, uint idx); void update_ref_list(AVCDecPicBuffer *dpb); /*---------- fmo.c --------------*/ /** This function initializes flexible macroblock reordering. \param "video" "Pointer to AVCCommonObj." \return "AVC_SUCCESS for success and AVC_FAIL otherwise." */ OSCL_IMPORT_REF AVCStatus FMOInit(AVCCommonObj *video); /** This function fills up an array that maps Map unit to the slice group following the interleaved slice group map type. \param "mapUnitToSliceGroupMap" "Array of slice group mapping." \param "run_length_minus1" "Array of the run-length." \param "num_slice_groups_minus_1" "Number of slice group minus 1." \param "PicSizeInMapUnit" "Size of the picture in number Map units." \return "Void." */ void FmoGenerateType0MapUnitMap(int *mapUnitToSliceGroupMap, uint *run_length_minus1, uint num_slice_groups_minus1, uint PicSizeInMapUnits); /** This function fills up an array that maps Map unit to the slice group following the dispersed slice group map type. \param "mapUnitToSliceGroupMap" "Array of slice group mapping." \param "PicWidthInMbs" "Width of the luma picture in macroblock unit." \param "num_slice_groups_minus_1" "Number of slice group minus 1." \param "PicSizeInMapUnit" "Size of the picture in number Map units." \return "Void." */ void FmoGenerateType1MapUnitMap(int *mapUnitToSliceGroupMap, int PicWidthInMbs, uint num_slice_groups_minus1, uint PicSizeInMapUnits); /** This function fills up an array that maps Map unit to the slice group following the foreground with left-over slice group map type. \param "pps" "Pointer to AVCPicParamSets structure." \param "mapUnitToSliceGroupMap" "Array of slice group mapping." \param "PicWidthInMbs" "Width of the luma picture in macroblock unit." \param "num_slice_groups_minus_1" "Number of slice group minus 1." \param "PicSizeInMapUnit" "Size of the picture in number Map units." \return "Void." */ void FmoGenerateType2MapUnitMap(AVCPicParamSet *pps, int *mapUnitToSliceGroupMap, int PicWidthInMbs, uint num_slice_groups_minus1, uint PicSizeInMapUnits); /** This function fills up an array that maps Map unit to the slice group following the box-out slice group map type. \param "pps" "Pointer to AVCPicParamSets structure." \param "mapUnitToSliceGroupMap" "Array of slice group mapping." \param "PicWidthInMbs" "Width of the luma picture in macroblock unit." \return "Void." */ void FmoGenerateType3MapUnitMap(AVCCommonObj *video, AVCPicParamSet* pps, int *mapUnitToSliceGroupMap, int PicWidthInMbs); /** This function fills up an array that maps Map unit to the slice group following the raster scan slice group map type. \param "mapUnitToSliceGroupMap" "Array of slice group mapping." \param "MapUnitsInSliceGroup0" "Derived in subclause 7.4.3." \param "slice_group_change_direction_flag" "A value from the slice header." \param "PicSizeInMapUnit" "Size of the picture in number Map units." \return "void" */ void FmoGenerateType4MapUnitMap(int *mapUnitToSliceGroupMap, int MapUnitsInSliceGroup0, int slice_group_change_direction_flag, uint PicSizeInMapUnits); /** This function fills up an array that maps Map unit to the slice group following wipe slice group map type. \param "mapUnitToSliceGroupMap" "Array of slice group mapping." \param "video" "Pointer to AVCCommonObj structure." \param "slice_group_change_direction_flag" "A value from the slice header." \param "PicSizeInMapUnit" "Size of the picture in number Map units." \return "void" */ void FmoGenerateType5MapUnitMap(int *mapUnitsToSliceGroupMap, AVCCommonObj *video, int slice_group_change_direction_flag, uint PicSizeInMapUnits); /** This function fills up an array that maps Map unit to the slice group following wipe slice group map type. \param "mapUnitToSliceGroupMap" "Array of slice group mapping." \param "slice_group_id" "Array of slice_group_id from AVCPicParamSet structure." \param "PicSizeInMapUnit" "Size of the picture in number Map units." \return "void" */ void FmoGenerateType6MapUnitMap(int *mapUnitsToSliceGroupMap, int *slice_group_id, uint PicSizeInMapUnits); /*------------- itrans.c --------------*/ /** This function performs transformation of the Intra16x16DC value according to subclause 8.5.6. \param "block" "Pointer to the video->block[0][0][0]." \param "QPy" "Quantization parameter." \return "void." */ void Intra16DCTrans(int16 *block, int Qq, int Rq); /** This function performs transformation of a 4x4 block according to subclause 8.5.8. \param "block" "Pointer to the origin of transform coefficient area." \param "pred" "Pointer to the origin of predicted area." \param "cur" "Pointer to the origin of the output area." \param "width" "Pitch of cur." \return "void." */ void itrans(int16 *block, uint8 *pred, uint8 *cur, int width); /* This function is the same one as itrans except for chroma. \param "block" "Pointer to the origin of transform coefficient area." \param "pred" "Pointer to the origin of predicted area." \param "cur" "Pointer to the origin of the output area." \param "width" "Pitch of cur." \return "void." */ void ictrans(int16 *block, uint8 *pred, uint8 *cur, int width); /** This function performs transformation of the DCChroma value according to subclause 8.5.7. \param "block" "Pointer to the video->block[0][0][0]." \param "QPc" "Quantization parameter." \return "void." */ void ChromaDCTrans(int16 *block, int Qq, int Rq); /** This function copies a block from pred to cur. \param "pred" "Pointer to prediction block." \param "cur" "Pointer to the current YUV block." \param "width" "Pitch of cur memory." \param "pred_pitch" "Pitch for pred memory. \return "void." */ void copy_block(uint8 *pred, uint8 *cur, int width, int pred_pitch); /*--------- mb_access.c ----------------*/ /** This function initializes the neighboring information before start macroblock decoding. \param "video" "Pointer to AVCCommonObj." \param "mbNum" "The current macroblock index." \param "currMB" "Pointer to the current AVCMacroblock structure." \return "void" */ OSCL_IMPORT_REF void InitNeighborAvailability(AVCCommonObj *video, int mbNum); /** This function checks whether the requested neighboring macroblock is available. \param "MbToSliceGroupMap" "Array containing the slice group ID mapping to MB index." \param "PicSizeInMbs" "Size of the picture in number of MBs." \param "mbAddr" "Neighboring macroblock index to check." \param "currMbAddr" "Current macroblock index." \return "TRUE if the neighboring MB is available, FALSE otherwise." */ bool mb_is_available(AVCMacroblock *mblock, uint PicSizeInMbs, int mbAddr, int currMbAddr); /** This function performs prediction of the nonzero coefficient for a luma block (i,j). \param "video" "Pointer to AVCCommonObj." \param "i" "Block index, horizontal." \param "j" "Block index, vertical." \return "Predicted number of nonzero coefficient." */ OSCL_IMPORT_REF int predict_nnz(AVCCommonObj *video, int i, int j); /** This function performs prediction of the nonzero coefficient for a chroma block (i,j). \param "video" "Pointer to AVCCommonObj." \param "i" "Block index, horizontal." \param "j" "Block index, vertical." \return "Predicted number of nonzero coefficient." */ OSCL_IMPORT_REF int predict_nnz_chroma(AVCCommonObj *video, int i, int j); /** This function calculates the predicted motion vectors for the current macroblock. \param "video" "Pointer to AVCCommonObj." \param "encFlag" "Boolean whether this function is used by encoder or decoder." \return "void." */ OSCL_IMPORT_REF void GetMotionVectorPredictor(AVCCommonObj *video, int encFlag); /*---------- reflist.c -----------------*/ /** This function initializes reference picture list used in INTER prediction at the beginning of each slice decoding. See subclause 8.2.4. \param "video" "Pointer to AVCCommonObj." \return "void" Output is video->RefPicList0, video->RefPicList1, video->refList0Size and video->refList1Size. */ OSCL_IMPORT_REF void RefListInit(AVCCommonObj *video); /** This function generates picture list from frame list. Used when current picture is field. see subclause 8.2.4.2.5. \param "video" "Pointer to AVCCommonObj." \param "IsL1" "Is L1 list?" \param "long_term" "Is long-term prediction?" \return "void" */ void GenPicListFromFrameList(AVCCommonObj *video, int IsL1, int long_term); /** This function performs reference picture list reordering according to the ref_pic_list_reordering() syntax. See subclause 8.2.4.3. \param "video" "Pointer to AVCCommonObj." \return "AVC_SUCCESS or AVC_FAIL" Output is video->RefPicList0, video->RefPicList1, video->refList0Size and video->refList1Size. */ OSCL_IMPORT_REF AVCStatus ReOrderList(AVCCommonObj *video); /** This function performs reference picture list reordering according to the ref_pic_list_reordering() syntax regardless of list 0 or list 1. See subclause 8.2.4.3. \param "video" "Pointer to AVCCommonObj." \param "isL1" "Is list 1 or not." \return "AVC_SUCCESS or AVC_FAIL" Output is video->RefPicList0 and video->refList0Size or video->RefPicList1 and video->refList1Size. */ AVCStatus ReorderRefPicList(AVCCommonObj *video, int isL1); /** This function performs reordering process of reference picture list for short-term pictures. See subclause 8.2.4.3.1. \param "video" "Pointer to AVCCommonObj." \param "picNumLX" "picNumLX of an entry in the reference list." \param "refIdxLX" "Pointer to the current entry index in the reference." \param "isL1" "Is list 1 or not." \return "AVC_SUCCESS or AVC_FAIL" */ AVCStatus ReorderShortTerm(AVCCommonObj *video, int picNumLX, int *refIdxLX, int isL1); /** This function performs reordering process of reference picture list for long-term pictures. See subclause 8.2.4.3.2. \param "video" "Pointer to AVCCommonObj." \param "LongTermPicNum" "LongTermPicNum of an entry in the reference list." \param "refIdxLX" "Pointer to the current entry index in the reference." \param "isL1" "Is list 1 or not." \return "AVC_SUCCESS or AVC_FAIL" */ AVCStatus ReorderLongTerm(AVCCommonObj *video, int LongTermPicNum, int *refIdxLX, int isL1); /** This function gets the pictures in DPB according to the PicNum. \param "video" "Pointer to AVCCommonObj." \param "picNum" "PicNum of the picture we are looking for." \return "Pointer to the AVCPictureData or NULL if not found" */ AVCPictureData* GetShortTermPic(AVCCommonObj *video, int picNum); /** This function gets the pictures in DPB according to the LongtermPicNum. \param "video" "Pointer to AVCCommonObj." \param "LongtermPicNum" "LongtermPicNum of the picture we are looking for." \return "Pointer to the AVCPictureData." */ AVCPictureData* GetLongTermPic(AVCCommonObj *video, int LongtermPicNum); /** This function indicates whether the picture is used for short-term reference or not. \param "s" "Pointer to AVCPictureData." \return "1 if it is used for short-term, 0 otherwise." */ int is_short_ref(AVCPictureData *s); /** This function indicates whether the picture is used for long-term reference or not. \param "s" "Pointer to AVCPictureData." \return "1 if it is used for long-term, 0 otherwise." */ int is_long_ref(AVCPictureData *s); /** This function sorts array of pointers to AVCPictureData in descending order of the PicNum value. \param "data" "Array of pointers to AVCPictureData." \param "num" "Size of the array." \return "void" */ void SortPicByPicNum(AVCPictureData *data[], int num); /** This function sorts array of pointers to AVCPictureData in ascending order of the PicNum value. \param "data" "Array of pointers to AVCPictureData." \param "num" "Size of the array." \return "void" */ void SortPicByPicNumLongTerm(AVCPictureData *data[], int num); /** This function sorts array of pointers to AVCFrameStore in descending order of the FrameNumWrap value. \param "data" "Array of pointers to AVCFrameStore." \param "num" "Size of the array." \return "void" */ void SortFrameByFrameNumWrap(AVCFrameStore *data[], int num); /** This function sorts array of pointers to AVCFrameStore in ascending order of the LongTermFrameIdx value. \param "data" "Array of pointers to AVCFrameStore." \param "num" "Size of the array." \return "void" */ void SortFrameByLTFrameIdx(AVCFrameStore *data[], int num); /** This function sorts array of pointers to AVCPictureData in descending order of the PicOrderCnt value. \param "data" "Array of pointers to AVCPictureData." \param "num" "Size of the array." \return "void" */ void SortPicByPOC(AVCPictureData *data[], int num, int descending); /** This function sorts array of pointers to AVCPictureData in ascending order of the LongTermPicNum value. \param "data" "Array of pointers to AVCPictureData." \param "num" "Size of the array." \return "void" */ void SortPicByLTPicNum(AVCPictureData *data[], int num); /** This function sorts array of pointers to AVCFrameStore in descending order of the PicOrderCnt value. \param "data" "Array of pointers to AVCFrameStore." \param "num" "Size of the array." \return "void" */ void SortFrameByPOC(AVCFrameStore *data[], int num, int descending); #endif /* _AVCCOMMON_LIB_H_ */ ================================================ FILE: RtspCamera/jni/avc_h264/common/src/deblock.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "avclib_common.h" #include "oscl_mem.h" #define MAX_QP 51 #define MB_BLOCK_SIZE 16 // NOTE: these 3 tables are for funtion GetStrength() only const static int ININT_STRENGTH[4] = {0x04040404, 0x03030303, 0x03030303, 0x03030303}; // NOTE: these 3 tables are for funtion EdgeLoop() only // NOTE: to change the tables below for instance when the QP doubling is changed from 6 to 8 values const static int ALPHA_TABLE[52] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 5, 6, 7, 8, 9, 10, 12, 13, 15, 17, 20, 22, 25, 28, 32, 36, 40, 45, 50, 56, 63, 71, 80, 90, 101, 113, 127, 144, 162, 182, 203, 226, 255, 255} ; const static int BETA_TABLE[52] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18} ; const static int CLIP_TAB[52][5] = { { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 1, 1}, { 0, 0, 0, 1, 1}, { 0, 0, 0, 1, 1}, { 0, 0, 0, 1, 1}, { 0, 0, 1, 1, 1}, { 0, 0, 1, 1, 1}, { 0, 1, 1, 1, 1}, { 0, 1, 1, 1, 1}, { 0, 1, 1, 1, 1}, { 0, 1, 1, 1, 1}, { 0, 1, 1, 2, 2}, { 0, 1, 1, 2, 2}, { 0, 1, 1, 2, 2}, { 0, 1, 1, 2, 2}, { 0, 1, 2, 3, 3}, { 0, 1, 2, 3, 3}, { 0, 2, 2, 3, 3}, { 0, 2, 2, 4, 4}, { 0, 2, 3, 4, 4}, { 0, 2, 3, 4, 4}, { 0, 3, 3, 5, 5}, { 0, 3, 4, 6, 6}, { 0, 3, 4, 6, 6}, { 0, 4, 5, 7, 7}, { 0, 4, 5, 8, 8}, { 0, 4, 6, 9, 9}, { 0, 5, 7, 10, 10}, { 0, 6, 8, 11, 11}, { 0, 6, 8, 13, 13}, { 0, 7, 10, 14, 14}, { 0, 8, 11, 16, 16}, { 0, 9, 12, 18, 18}, { 0, 10, 13, 20, 20}, { 0, 11, 15, 23, 23}, { 0, 13, 17, 25, 25} }; // NOTE: this table is only QP clipping, index = QP + video->FilterOffsetA/B, clipped to [0, 51] // video->FilterOffsetA/B is in {-12, 12] const static int QP_CLIP_TAB[76] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // [-12, 0] 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, // [1, 51] 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51 // [52,63] }; static void DeblockMb(AVCCommonObj *video, int mb_x, int mb_y, uint8 *SrcY, uint8 *SrcU, uint8 *SrcV); //static void GetStrength(AVCCommonObj *video, uint8 *Strength, AVCMacroblock* MbP, AVCMacroblock* MbQ, int dir, int edge); static void GetStrength_Edge0(uint8 *Strength, AVCMacroblock* MbP, AVCMacroblock* MbQ, int dir); static void GetStrength_VerticalEdges(uint8 *Strength, AVCMacroblock* MbQ); static void GetStrength_HorizontalEdges(uint8 Strength[12], AVCMacroblock* MbQ); static void EdgeLoop_Luma_vertical(uint8* SrcPtr, uint8 *Strength, int Alpha, int Beta, int *clipTable, int pitch); static void EdgeLoop_Luma_horizontal(uint8* SrcPtr, uint8 *Strength, int Alpha, int Beta, int *clipTable, int pitch); static void EdgeLoop_Chroma_vertical(uint8* SrcPtr, uint8 *Strength, int Alpha, int Beta, int *clipTable, int pitch); static void EdgeLoop_Chroma_horizontal(uint8* SrcPtr, uint8 *Strength, int Alpha, int Beta, int *clipTable, int pitch); /* ***************************************************************************************** * \brief Filter all macroblocks in order of increasing macroblock address. ***************************************************************************************** */ OSCL_EXPORT_REF AVCStatus DeblockPicture(AVCCommonObj *video) { uint i, j; int pitch = video->currPic->pitch, pitch_c, width; uint8 *SrcY, *SrcU, *SrcV; SrcY = video->currPic->Sl; // pointers to source SrcU = video->currPic->Scb; SrcV = video->currPic->Scr; pitch_c = pitch >> 1; width = video->currPic->width; for (i = 0; i < video->PicHeightInMbs; i++) { for (j = 0; j < video->PicWidthInMbs; j++) { DeblockMb(video, j, i, SrcY, SrcU, SrcV); // update SrcY, SrcU, SrcV SrcY += MB_BLOCK_SIZE; SrcU += (MB_BLOCK_SIZE >> 1); SrcV += (MB_BLOCK_SIZE >> 1); } SrcY += ((pitch << 4) - width); SrcU += ((pitch_c << 3) - (width >> 1)); SrcV += ((pitch_c << 3) - (width >> 1)); } return AVC_SUCCESS; } #ifdef MB_BASED_DEBLOCK /* ***************************************************************************************** * \brief Filter one macroblocks in a fast macroblock memory and copy it to frame ***************************************************************************************** */ void MBInLoopDeblock(AVCCommonObj *video) { AVCPictureData *currPic = video->currPic; #ifdef USE_PRED_BLOCK uint8 *predCb, *predCr, *pred_block; int i, j, dst_width, dst_height, dst_widthc, dst_heightc; #endif int pitch = currPic->pitch; int x_pos = video->mb_x; int y_pos = video->mb_y; uint8 *curL, *curCb, *curCr; int offset; offset = (y_pos << 4) * pitch; curL = currPic->Sl + offset + (x_pos << 4); offset >>= 2; offset += (x_pos << 3); curCb = currPic->Scb + offset; curCr = currPic->Scr + offset; #ifdef USE_PRED_BLOCK pred_block = video->pred; /* 1. copy neighboring pixels from frame to the video->pred_block */ if (y_pos) /* not the 0th row */ { /* copy to the top 4 lines of the macroblock */ curL -= (pitch << 2); /* go back 4 lines */ oscl_memcpy(pred_block + 4, curL, 16); curL += pitch; oscl_memcpy(pred_block + 24, curL, 16); curL += pitch; oscl_memcpy(pred_block + 44, curL, 16); curL += pitch; oscl_memcpy(pred_block + 64, curL, 16); curL += pitch; curCb -= (pitch << 1); /* go back 4 lines chroma */ curCr -= (pitch << 1); pred_block += 400; oscl_memcpy(pred_block + 4, curCb, 8); curCb += (pitch >> 1); oscl_memcpy(pred_block + 16, curCb, 8); curCb += (pitch >> 1); oscl_memcpy(pred_block + 28, curCb, 8); curCb += (pitch >> 1); oscl_memcpy(pred_block + 40, curCb, 8); curCb += (pitch >> 1); pred_block += 144; oscl_memcpy(pred_block + 4, curCr, 8); curCr += (pitch >> 1); oscl_memcpy(pred_block + 16, curCr, 8); curCr += (pitch >> 1); oscl_memcpy(pred_block + 28, curCr, 8); curCr += (pitch >> 1); oscl_memcpy(pred_block + 40, curCr, 8); curCr += (pitch >> 1); pred_block = video->pred; } /* 2. perform deblocking. */ DeblockMb(video, x_pos, y_pos, pred_block + 84, pred_block + 452, pred_block + 596); /* 3. copy it back to the frame and update pred_block */ predCb = pred_block + 400; predCr = predCb + 144; /* find the range of the block inside pred_block to be copied back */ if (y_pos) /* the first row */ { curL -= (pitch << 2); curCb -= (pitch << 1); curCr -= (pitch << 1); dst_height = 20; dst_heightc = 12; } else { pred_block += 80; predCb += 48; predCr += 48; dst_height = 16; dst_heightc = 8; } if (x_pos) /* find the width */ { curL -= 4; curCb -= 4; curCr -= 4; if (x_pos == (int)(video->PicWidthInMbs - 1)) { dst_width = 20; dst_widthc = 12; } else { dst_width = 16; dst_widthc = 8; } } else { pred_block += 4; predCb += 4; predCr += 4; dst_width = 12; dst_widthc = 4; } /* perform copy */ for (j = 0; j < dst_height; j++) { oscl_memcpy(curL, pred_block, dst_width); curL += pitch; pred_block += 20; } for (j = 0; j < dst_heightc; j++) { oscl_memcpy(curCb, predCb, dst_widthc); oscl_memcpy(curCr, predCr, dst_widthc); curCb += (pitch >> 1); curCr += (pitch >> 1); predCb += 12; predCr += 12; } if (x_pos != (int)(video->PicWidthInMbs - 1)) /* now copy from the right-most 4 columns to the left-most 4 columns */ { pred_block = video->pred; for (i = 0; i < 20; i += 4) { *((uint32*)pred_block) = *((uint32*)(pred_block + 16)); pred_block += 20; *((uint32*)pred_block) = *((uint32*)(pred_block + 16)); pred_block += 20; *((uint32*)pred_block) = *((uint32*)(pred_block + 16)); pred_block += 20; *((uint32*)pred_block) = *((uint32*)(pred_block + 16)); pred_block += 20; } for (i = 0; i < 24; i += 4) { *((uint32*)pred_block) = *((uint32*)(pred_block + 8)); pred_block += 12; *((uint32*)pred_block) = *((uint32*)(pred_block + 8)); pred_block += 12; *((uint32*)pred_block) = *((uint32*)(pred_block + 8)); pred_block += 12; *((uint32*)pred_block) = *((uint32*)(pred_block + 8)); pred_block += 12; } } #else DeblockMb(video, x_pos, y_pos, curL, curCb, curCr); #endif return ; } #endif /* ***************************************************************************************** * \brief Deblocking filter for one macroblock. ***************************************************************************************** */ void DeblockMb(AVCCommonObj *video, int mb_x, int mb_y, uint8 *SrcY, uint8 *SrcU, uint8 *SrcV) { AVCMacroblock *MbP, *MbQ; int edge, QP, QPC; int filterLeftMbEdgeFlag = (mb_x != 0); int filterTopMbEdgeFlag = (mb_y != 0); int pitch = video->currPic->pitch; int indexA, indexB, tmp; int Alpha, Beta, Alpha_c, Beta_c; int mbNum = mb_y * video->PicWidthInMbs + mb_x; int *clipTable, *clipTable_c, *qp_clip_tab; uint8 Strength[16]; void* str; MbQ = &(video->mblock[mbNum]); // current Mb // If filter is disabled, return if (video->sliceHdr->disable_deblocking_filter_idc == 1) return; if (video->sliceHdr->disable_deblocking_filter_idc == 2) { // don't filter at slice boundaries filterLeftMbEdgeFlag = mb_is_available(video->mblock, video->PicSizeInMbs, mbNum - 1, mbNum); filterTopMbEdgeFlag = mb_is_available(video->mblock, video->PicSizeInMbs, mbNum - video->PicWidthInMbs, mbNum); } /* NOTE: edge=0 and edge=1~3 are separate cases because of the difference of MbP, index A and indexB calculation */ /* for edge = 1~3, MbP, indexA and indexB remain the same, and thus there is no need to re-calculate them for each edge */ qp_clip_tab = (int *)QP_CLIP_TAB + 12; /* 1.VERTICAL EDGE + MB BOUNDARY (edge = 0) */ if (filterLeftMbEdgeFlag) { MbP = MbQ - 1; //GetStrength(video, Strength, MbP, MbQ, 0, 0); // Strength for 4 blks in 1 stripe, 0 => vertical edge GetStrength_Edge0(Strength, MbP, MbQ, 0); str = (void*)Strength; //de-ref type-punned pointer fix if (*((uint32*)str)) // only if one of the 4 Strength bytes is != 0 { QP = (MbP->QPy + MbQ->QPy + 1) >> 1; // Average QP of the two blocks; indexA = QP + video->FilterOffsetA; indexB = QP + video->FilterOffsetB; indexA = qp_clip_tab[indexA]; // IClip(0, MAX_QP, QP+video->FilterOffsetA) indexB = qp_clip_tab[indexB]; // IClip(0, MAX_QP, QP+video->FilterOffsetB) Alpha = ALPHA_TABLE[indexA]; Beta = BETA_TABLE[indexB]; clipTable = (int *) CLIP_TAB[indexA]; if (Alpha > 0 && Beta > 0) #ifdef USE_PRED_BLOCK EdgeLoop_Luma_vertical(SrcY, Strength, Alpha, Beta, clipTable, 20); #else EdgeLoop_Luma_vertical(SrcY, Strength, Alpha, Beta, clipTable, pitch); #endif QPC = (MbP->QPc + MbQ->QPc + 1) >> 1; indexA = QPC + video->FilterOffsetA; indexB = QPC + video->FilterOffsetB; indexA = qp_clip_tab[indexA]; // IClip(0, MAX_QP, QP+video->FilterOffsetA) indexB = qp_clip_tab[indexB]; // IClip(0, MAX_QP, QP+video->FilterOffsetB) Alpha = ALPHA_TABLE[indexA]; Beta = BETA_TABLE[indexB]; clipTable = (int *) CLIP_TAB[indexA]; if (Alpha > 0 && Beta > 0) { #ifdef USE_PRED_BLOCK EdgeLoop_Chroma_vertical(SrcU, Strength, Alpha, Beta, clipTable, 12); EdgeLoop_Chroma_vertical(SrcV, Strength, Alpha, Beta, clipTable, 12); #else EdgeLoop_Chroma_vertical(SrcU, Strength, Alpha, Beta, clipTable, pitch >> 1); EdgeLoop_Chroma_vertical(SrcV, Strength, Alpha, Beta, clipTable, pitch >> 1); #endif } } } /* end of: if(filterLeftMbEdgeFlag) */ /* 2.VERTICAL EDGE (no boundary), the edges are all inside a MB */ /* First calculate the necesary parameters all at once, outside the loop */ MbP = MbQ; indexA = MbQ->QPy + video->FilterOffsetA; indexB = MbQ->QPy + video->FilterOffsetB; // index indexA = qp_clip_tab[indexA]; // IClip(0, MAX_QP, QP+video->FilterOffsetA) indexB = qp_clip_tab[indexB]; // IClip(0, MAX_QP, QP+video->FilterOffsetB) Alpha = ALPHA_TABLE[indexA]; Beta = BETA_TABLE[indexB]; clipTable = (int *)CLIP_TAB[indexA]; /* Save Alpha, Beta and clipTable for future use, with the obselete variables filterLeftMbEdgeFlag, mbNum amd tmp */ filterLeftMbEdgeFlag = Alpha; mbNum = Beta; tmp = (int)clipTable; indexA = MbQ->QPc + video->FilterOffsetA; indexB = MbQ->QPc + video->FilterOffsetB; indexA = qp_clip_tab[indexA]; // IClip(0, MAX_QP, QP+video->FilterOffsetA) indexB = qp_clip_tab[indexB]; // IClip(0, MAX_QP, QP+video->FilterOffsetB) Alpha_c = ALPHA_TABLE[indexA]; Beta_c = BETA_TABLE[indexB]; clipTable_c = (int *)CLIP_TAB[indexA]; GetStrength_VerticalEdges(Strength + 4, MbQ); // Strength for 4 blks in 1 stripe, 0 => vertical edge for (edge = 1; edge < 4; edge++) // 4 vertical strips of 16 pel { //GetStrength_VerticalEdges(video, Strength, MbP, MbQ, 0, edge); // Strength for 4 blks in 1 stripe, 0 => vertical edge if (*((int*)(Strength + (edge << 2)))) // only if one of the 4 Strength bytes is != 0 { if (Alpha > 0 && Beta > 0) #ifdef USE_PRED_BLOCK EdgeLoop_Luma_vertical(SrcY + (edge << 2), Strength + (edge << 2), Alpha, Beta, clipTable, 20); #else EdgeLoop_Luma_vertical(SrcY + (edge << 2), Strength + (edge << 2), Alpha, Beta, clipTable, pitch); #endif if (!(edge & 1) && Alpha_c > 0 && Beta_c > 0) { #ifdef USE_PRED_BLOCK EdgeLoop_Chroma_vertical(SrcU + (edge << 1), Strength + (edge << 2), Alpha_c, Beta_c, clipTable_c, 12); EdgeLoop_Chroma_vertical(SrcV + (edge << 1), Strength + (edge << 2), Alpha_c, Beta_c, clipTable_c, 12); #else EdgeLoop_Chroma_vertical(SrcU + (edge << 1), Strength + (edge << 2), Alpha_c, Beta_c, clipTable_c, pitch >> 1); EdgeLoop_Chroma_vertical(SrcV + (edge << 1), Strength + (edge << 2), Alpha_c, Beta_c, clipTable_c, pitch >> 1); #endif } } } //end edge /* 3.HORIZONTAL EDGE + MB BOUNDARY (edge = 0) */ if (filterTopMbEdgeFlag) { MbP = MbQ - video->PicWidthInMbs; //GetStrength(video, Strength, MbP, MbQ, 1, 0); // Strength for 4 blks in 1 stripe, 0 => vertical edge GetStrength_Edge0(Strength, MbP, MbQ, 1); str = (void*)Strength; //de-ref type-punned pointer fix if (*((uint32*)str)) // only if one of the 4 Strength bytes is != 0 { QP = (MbP->QPy + MbQ->QPy + 1) >> 1; // Average QP of the two blocks; indexA = QP + video->FilterOffsetA; indexB = QP + video->FilterOffsetB; indexA = qp_clip_tab[indexA]; // IClip(0, MAX_QP, QP+video->FilterOffsetA) indexB = qp_clip_tab[indexB]; // IClip(0, MAX_QP, QP+video->FilterOffsetB) Alpha = ALPHA_TABLE[indexA]; Beta = BETA_TABLE[indexB]; clipTable = (int *)CLIP_TAB[indexA]; if (Alpha > 0 && Beta > 0) { #ifdef USE_PRED_BLOCK EdgeLoop_Luma_horizontal(SrcY, Strength, Alpha, Beta, clipTable, 20); #else EdgeLoop_Luma_horizontal(SrcY, Strength, Alpha, Beta, clipTable, pitch); #endif } QPC = (MbP->QPc + MbQ->QPc + 1) >> 1; indexA = QPC + video->FilterOffsetA; indexB = QPC + video->FilterOffsetB; indexA = qp_clip_tab[indexA]; // IClip(0, MAX_QP, QP+video->FilterOffsetA) indexB = qp_clip_tab[indexB]; // IClip(0, MAX_QP, QP+video->FilterOffsetB) Alpha = ALPHA_TABLE[indexA]; Beta = BETA_TABLE[indexB]; clipTable = (int *)CLIP_TAB[indexA]; if (Alpha > 0 && Beta > 0) { #ifdef USE_PRED_BLOCK EdgeLoop_Chroma_horizontal(SrcU, Strength, Alpha, Beta, clipTable, 12); EdgeLoop_Chroma_horizontal(SrcV, Strength, Alpha, Beta, clipTable, 12); #else EdgeLoop_Chroma_horizontal(SrcU, Strength, Alpha, Beta, clipTable, pitch >> 1); EdgeLoop_Chroma_horizontal(SrcV, Strength, Alpha, Beta, clipTable, pitch >> 1); #endif } } } /* end of: if(filterTopMbEdgeFlag) */ /* 4.HORIZONTAL EDGE (no boundary), the edges are inside a MB */ MbP = MbQ; /* Recover Alpha, Beta and clipTable for edge!=0 with the variables filterLeftMbEdgeFlag, mbNum and tmp */ /* Note that Alpha_c, Beta_c and clipTable_c for chroma is already calculated */ Alpha = filterLeftMbEdgeFlag; Beta = mbNum; clipTable = (int *)tmp; GetStrength_HorizontalEdges(Strength + 4, MbQ); // Strength for 4 blks in 1 stripe, 0 => vertical edge for (edge = 1; edge < 4; edge++) // 4 horicontal strips of 16 pel { //GetStrength(video, Strength, MbP, MbQ, 1, edge); // Strength for 4 blks in 1 stripe 1 => horizontal edge if (*((int*)(Strength + (edge << 2)))) // only if one of the 4 Strength bytes is != 0 { if (Alpha > 0 && Beta > 0) { #ifdef USE_PRED_BLOCK EdgeLoop_Luma_horizontal(SrcY + (edge << 2)*20, Strength + (edge << 2), Alpha, Beta, clipTable, 20); #else EdgeLoop_Luma_horizontal(SrcY + (edge << 2)*pitch, Strength + (edge << 2), Alpha, Beta, clipTable, pitch); #endif } if (!(edge & 1) && Alpha_c > 0 && Beta_c > 0) { #ifdef USE_PRED_BLOCK EdgeLoop_Chroma_horizontal(SrcU + (edge << 1)*12, Strength + (edge << 2), Alpha_c, Beta_c, clipTable_c, 12); EdgeLoop_Chroma_horizontal(SrcV + (edge << 1)*12, Strength + (edge << 2), Alpha_c, Beta_c, clipTable_c, 12); #else EdgeLoop_Chroma_horizontal(SrcU + (edge << 1)*(pitch >> 1), Strength + (edge << 2), Alpha_c, Beta_c, clipTable_c, pitch >> 1); EdgeLoop_Chroma_horizontal(SrcV + (edge << 1)*(pitch >> 1), Strength + (edge << 2), Alpha_c, Beta_c, clipTable_c, pitch >> 1); #endif } } } //end edge return; } /* ***************************************************************************************************** * \brief returns a buffer of 4 Strength values for one stripe in a mb (for different Frame types) ***************************************************************************************************** */ void GetStrength_Edge0(uint8 *Strength, AVCMacroblock* MbP, AVCMacroblock* MbQ, int dir) { int tmp; int16 *ptrQ, *ptrP; void* vptr; uint8 *pStrength; void* refIdx; if (MbP->mbMode == AVC_I4 || MbP->mbMode == AVC_I16 || MbQ->mbMode == AVC_I4 || MbQ->mbMode == AVC_I16) { *((int*)Strength) = ININT_STRENGTH[0]; // Start with Strength=3. or Strength=4 for Mb-edge } else // if not intra or SP-frame { *((int*)Strength) = 0; if (dir == 0) // Vertical Edge 0 { //1. Check the ref_frame_id refIdx = (void*) MbQ->RefIdx; //de-ref type-punned pointer fix ptrQ = (int16*)refIdx; refIdx = (void*)MbP->RefIdx; //de-ref type-punned pointer fix ptrP = (int16*)refIdx; pStrength = Strength; if (ptrQ[0] != ptrP[1]) pStrength[0] = 1; if (ptrQ[2] != ptrP[3]) pStrength[2] = 1; pStrength[1] = pStrength[0]; pStrength[3] = pStrength[2]; //2. Check the non-zero coeff blocks (4x4) if (MbQ->nz_coeff[0] != 0 || MbP->nz_coeff[3] != 0) pStrength[0] = 2; if (MbQ->nz_coeff[4] != 0 || MbP->nz_coeff[7] != 0) pStrength[1] = 2; if (MbQ->nz_coeff[8] != 0 || MbP->nz_coeff[11] != 0) pStrength[2] = 2; if (MbQ->nz_coeff[12] != 0 || MbP->nz_coeff[15] != 0) pStrength[3] = 2; //3. Only need to check the mv difference vptr = (void*)MbQ->mvL0; // for deref type-punned pointer ptrQ = (int16*)vptr; ptrP = (int16*)(MbP->mvL0 + 3); // points to 4x4 block #3 (the 4th column) // 1st blk if (*pStrength == 0) { // check |mv difference| >= 4 tmp = *ptrQ++ - *ptrP++; if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStrength = 1; tmp = *ptrQ-- - *ptrP--; if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStrength = 1; } pStrength++; ptrQ += 8; ptrP += 8; // 2nd blk if (*pStrength == 0) { // check |mv difference| >= 4 tmp = *ptrQ++ - *ptrP++; if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStrength = 1; tmp = *ptrQ-- - *ptrP--; if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStrength = 1; } pStrength++; ptrQ += 8; ptrP += 8; // 3rd blk if (*pStrength == 0) { // check |mv difference| >= 4 tmp = *ptrQ++ - *ptrP++; if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStrength = 1; tmp = *ptrQ-- - *ptrP--; if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStrength = 1; } pStrength++; ptrQ += 8; ptrP += 8; // 4th blk if (*pStrength == 0) { // check |mv difference| >= 4 tmp = *ptrQ++ - *ptrP++; if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStrength = 1; tmp = *ptrQ-- - *ptrP--; if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStrength = 1; } } else // Horizontal Edge 0 { //1. Check the ref_frame_id refIdx = (void*)MbQ->RefIdx; //de-ref type-punned pointer ptrQ = (int16*)refIdx; refIdx = (void*)MbP->RefIdx; //de-ref type-punned pointer ptrP = (int16*)refIdx; pStrength = Strength; if (ptrQ[0] != ptrP[2]) pStrength[0] = 1; if (ptrQ[1] != ptrP[3]) pStrength[2] = 1; pStrength[1] = pStrength[0]; pStrength[3] = pStrength[2]; //2. Check the non-zero coeff blocks (4x4) if (MbQ->nz_coeff[0] != 0 || MbP->nz_coeff[12] != 0) pStrength[0] = 2; if (MbQ->nz_coeff[1] != 0 || MbP->nz_coeff[13] != 0) pStrength[1] = 2; if (MbQ->nz_coeff[2] != 0 || MbP->nz_coeff[14] != 0) pStrength[2] = 2; if (MbQ->nz_coeff[3] != 0 || MbP->nz_coeff[15] != 0) pStrength[3] = 2; //3. Only need to check the mv difference vptr = (void*)MbQ->mvL0; ptrQ = (int16*)vptr; ptrP = (int16*)(MbP->mvL0 + 12); // points to 4x4 block #12 (the 4th row) // 1st blk if (*pStrength == 0) { // check |mv difference| >= 4 tmp = *ptrQ++ - *ptrP++; if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStrength = 1; tmp = *ptrQ-- - *ptrP--; if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStrength = 1; } pStrength++; ptrQ += 2; ptrP += 2; // 2nd blk if (*pStrength == 0) { // check |mv difference| >= 4 tmp = *ptrQ++ - *ptrP++; if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStrength = 1; tmp = *ptrQ-- - *ptrP--; if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStrength = 1; } pStrength++; ptrQ += 2; ptrP += 2; // 3rd blk if (*pStrength == 0) { // check |mv difference| >= 4 tmp = *ptrQ++ - *ptrP++; if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStrength = 1; tmp = *ptrQ-- - *ptrP--; if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStrength = 1; } pStrength++; ptrQ += 2; ptrP += 2; // 4th blk if (*pStrength == 0) { // check |mv difference| >= 4 tmp = *ptrQ++ - *ptrP++; if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStrength = 1; tmp = *ptrQ-- - *ptrP--; if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStrength = 1; } } /* end of: else if(dir == 0) */ } /* end of: if( !(MbP->mbMode == AVC_I4 ...) */ } void GetStrength_VerticalEdges(uint8 *Strength, AVCMacroblock* MbQ) { int idx, tmp; int16 *ptr, *pmvx, *pmvy; uint8 *pnz; uint8 *pStrength, *pStr; void* refIdx; if (MbQ->mbMode == AVC_I4 || MbQ->mbMode == AVC_I16) { *((int*)Strength) = ININT_STRENGTH[1]; // Start with Strength=3. or Strength=4 for Mb-edge *((int*)(Strength + 4)) = ININT_STRENGTH[2]; *((int*)(Strength + 8)) = ININT_STRENGTH[3]; } else // Not intra or SP-frame { *((int*)Strength) = 0; // for non-intra MB, strength = 0, 1 or 2. *((int*)(Strength + 4)) = 0; *((int*)(Strength + 8)) = 0; //1. Check the ref_frame_id refIdx = (void*)MbQ->RefIdx; //de-ref type-punned pointer fix ptr = (int16*)refIdx; pStrength = Strength; if (ptr[0] != ptr[1]) pStrength[4] = 1; if (ptr[2] != ptr[3]) pStrength[6] = 1; pStrength[5] = pStrength[4]; pStrength[7] = pStrength[6]; //2. Check the nz_coeff block and mv difference pmvx = (int16*)(MbQ->mvL0 + 1); // points to 4x4 block #1,not #0 pmvy = pmvx + 1; for (idx = 0; idx < 4; idx += 2) // unroll the loop, make 4 iterations to 2 { // first/third row : 1,2,3 or 9,10,12 // Strength = 2 for a whole row pnz = MbQ->nz_coeff + (idx << 2); if (*pnz++ != 0) *pStrength = 2; if (*pnz++ != 0) { *pStrength = 2; *(pStrength + 4) = 2; } if (*pnz++ != 0) { *(pStrength + 4) = 2; *(pStrength + 8) = 2; } if (*pnz != 0) *(pStrength + 8) = 2; // Then Strength = 1 if (*pStrength == 0) { //within the same 8x8 block, no need to check the reference id //only need to check the |mv difference| >= 4 tmp = *pmvx - *(pmvx - 2); if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStrength = 1; tmp = *pmvy - *(pmvy - 2); if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStrength = 1; } pmvx += 2; pmvy += 2; pStr = pStrength + 4; if (*pStr == 0) { //check the |mv difference| >= 4 tmp = *pmvx - *(pmvx - 2); if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStr = 1; tmp = *pmvy - *(pmvy - 2); if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStr = 1; } pmvx += 2; pmvy += 2; pStr = pStrength + 8; if (*pStr == 0) { //within the same 8x8 block, no need to check the reference id //only need to check the |mv difference| >= 4 tmp = *pmvx - *(pmvx - 2); if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStr = 1; tmp = *pmvy - *(pmvy - 2); if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStr = 1; } // Second/fourth row: 5,6,7 or 14,15,16 // Strength = 2 for a whole row pnz = MbQ->nz_coeff + ((idx + 1) << 2); if (*pnz++ != 0) *(pStrength + 1) = 2; if (*pnz++ != 0) { *(pStrength + 1) = 2; *(pStrength + 5) = 2; } if (*pnz++ != 0) { *(pStrength + 5) = 2; *(pStrength + 9) = 2; } if (*pnz != 0) *(pStrength + 9) = 2; // Then Strength = 1 pmvx += 4; pmvy += 4; pStr = pStrength + 1; if (*pStr == 0) { //within the same 8x8 block, no need to check the reference id //only need to check the |mv difference| >= 4 tmp = *pmvx - *(pmvx - 2); if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStr = 1; tmp = *pmvy - *(pmvy - 2); if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStr = 1; } pmvx += 2; pmvy += 2; pStr = pStrength + 5; if (*pStr == 0) { //check the |mv difference| >= 4 tmp = *pmvx - *(pmvx - 2); if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStr = 1; tmp = *pmvy - *(pmvy - 2); if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStr = 1; } pmvx += 2; pmvy += 2; pStr = pStrength + 9; if (*pStr == 0) { //within the same 8x8 block, no need to check the reference id //only need to check the |mv difference| >= 4 tmp = *pmvx - *(pmvx - 2); if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStr = 1; tmp = *pmvy - *(pmvy - 2); if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStr = 1; } // update some variables for the next two rows pmvx += 4; pmvy += 4; pStrength += 2; } /* end of: for(idx=0; idx<2; idx++) */ } /* end of: else if( MbQ->mbMode == AVC_I4 ...) */ } void GetStrength_HorizontalEdges(uint8 Strength[12], AVCMacroblock* MbQ) { int idx, tmp; int16 *ptr, *pmvx, *pmvy; uint8 *pStrength, *pStr; void* refIdx; if (MbQ->mbMode == AVC_I4 || MbQ->mbMode == AVC_I16) { *((int*)Strength) = ININT_STRENGTH[1]; // Start with Strength=3. or Strength=4 for Mb-edge *((int*)(Strength + 4)) = ININT_STRENGTH[2]; *((int*)(Strength + 8)) = ININT_STRENGTH[3]; } else // Not intra or SP-frame { *((int*)Strength) = 0; // for non-intra MB, strength = 0, 1 or 2. *((int*)(Strength + 4)) = 0; // for non-intra MB, strength = 0, 1 or 2. *((int*)(Strength + 8)) = 0; // for non-intra MB, strength = 0, 1 or 2. //1. Check the ref_frame_id refIdx = (void*) MbQ->RefIdx; // de-ref type-punned fix ptr = (int16*) refIdx; pStrength = Strength; if (ptr[0] != ptr[2]) pStrength[4] = 1; if (ptr[1] != ptr[3]) pStrength[6] = 1; pStrength[5] = pStrength[4]; pStrength[7] = pStrength[6]; //2. Check the nz_coeff block and mv difference pmvx = (int16*)(MbQ->mvL0 + 4); // points to 4x4 block #4,not #0 pmvy = pmvx + 1; for (idx = 0; idx < 4; idx += 2) // unroll the loop, make 4 iterations to 2 { // first/third row : 1,2,3 or 9,10,12 // Strength = 2 for a whole row if (MbQ->nz_coeff[idx] != 0) *pStrength = 2; if (MbQ->nz_coeff[4+idx] != 0) { *pStrength = 2; *(pStrength + 4) = 2; } if (MbQ->nz_coeff[8+idx] != 0) { *(pStrength + 4) = 2; *(pStrength + 8) = 2; } if (MbQ->nz_coeff[12+idx] != 0) *(pStrength + 8) = 2; // Then Strength = 1 if (*pStrength == 0) { //within the same 8x8 block, no need to check the reference id //only need to check the |mv difference| >= 4 tmp = *pmvx - *(pmvx - 8); if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStrength = 1; tmp = *pmvy - *(pmvy - 8); if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStrength = 1; } pmvx += 8; pmvy += 8; pStr = pStrength + 4; if (*pStr == 0) { //check the |mv difference| >= 4 tmp = *pmvx - *(pmvx - 8); if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStr = 1; tmp = *pmvy - *(pmvy - 8); if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStr = 1; } pmvx += 8; pmvy += 8; pStr = pStrength + 8; if (*pStr == 0) { //within the same 8x8 block, no need to check the reference id //only need to check the |mv difference| >= 4 tmp = *pmvx - *(pmvx - 8); if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStr = 1; tmp = *pmvy - *(pmvy - 8); if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStr = 1; } // Second/fourth row: 5,6,7 or 14,15,16 // Strength = 2 for a whole row if (MbQ->nz_coeff[idx+1] != 0) *(pStrength + 1) = 2; if (MbQ->nz_coeff[4+idx+1] != 0) { *(pStrength + 1) = 2; *(pStrength + 5) = 2; } if (MbQ->nz_coeff[8+idx+1] != 0) { *(pStrength + 5) = 2; *(pStrength + 9) = 2; } if (MbQ->nz_coeff[12+idx+1] != 0) *(pStrength + 9) = 2; // Then Strength = 1 pmvx -= 14; pmvy -= 14; // -14 = -16 + 2 pStr = pStrength + 1; if (*pStr == 0) { //within the same 8x8 block, no need to check the reference id //only need to check the |mv difference| >= 4 tmp = *pmvx - *(pmvx - 8); if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStr = 1; tmp = *pmvy - *(pmvy - 8); if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStr = 1; } pmvx += 8; pmvy += 8; pStr = pStrength + 5; if (*pStr == 0) { //check the |mv difference| >= 4 tmp = *pmvx - *(pmvx - 8); if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStr = 1; tmp = *pmvy - *(pmvy - 8); if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStr = 1; } pmvx += 8; pmvy += 8; pStr = pStrength + 9; if (*pStr == 0) { //within the same 8x8 block, no need to check the reference id //only need to check the |mv difference| >= 4 tmp = *pmvx - *(pmvx - 8); if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStr = 1; tmp = *pmvy - *(pmvy - 8); if (tmp < 0) tmp = -tmp; if (tmp >= 4) *pStr = 1; } // update some variables for the next two rows pmvx -= 14; pmvy -= 14; // -14 = -16 + 2 pStrength += 2; } /* end of: for(idx=0; idx<2; idx++) */ } /* end of: else if( MbQ->mbMode == AVC_I4 ...) */ } /* ***************************************************************************************** * \brief Filters one edge of 16 (luma) or 8 (chroma) pel ***************************************************************************************** */ void EdgeLoop_Luma_horizontal(uint8* SrcPtr, uint8 *Strength, int Alpha, int Beta, int *clipTable, int pitch) { int pel, ap = 0, aq = 0, Strng; int C0, c0, dif, AbsDelta, tmp, tmp1; int L2 = 0, L1, L0, R0, R1, R2 = 0, RL0; if (Strength[0] == 4) /* INTRA strong filtering */ { for (pel = 0; pel < 16; pel++) { R0 = SrcPtr[0]; R1 = SrcPtr[pitch]; L0 = SrcPtr[-pitch]; L1 = SrcPtr[-(pitch<<1)]; // |R0 - R1| < Beta tmp1 = R0 - R1; if (tmp1 < 0) tmp1 = -tmp1; tmp = (tmp1 - Beta); //|L0 - L1| < Beta tmp1 = L0 - L1; if (tmp1 < 0) tmp1 = -tmp1; tmp &= (tmp1 - Beta); //|R0 - L0| < Alpha AbsDelta = R0 - L0; if (AbsDelta < 0) AbsDelta = -AbsDelta; tmp &= (AbsDelta - Alpha); if (tmp < 0) { AbsDelta -= ((Alpha >> 2) + 2); R2 = SrcPtr[pitch<<1]; //inc2 L2 = SrcPtr[-(pitch+(pitch<<1))]; // -inc3 // |R0 - R2| < Beta && |R0 - L0| < (Alpha/4 + 2) tmp = R0 - R2; if (tmp < 0) tmp = -tmp; aq = AbsDelta & (tmp - Beta); // |L0 - L2| < Beta && |R0 - L0| < (Alpha/4 + 2) tmp = L0 - L2; if (tmp < 0) tmp = -tmp; ap = AbsDelta & (tmp - Beta); if (aq < 0) { tmp = R1 + R0 + L0; SrcPtr[0] = (L1 + (tmp << 1) + R2 + 4) >> 3; tmp += R2; SrcPtr[pitch] = (tmp + 2) >> 2; SrcPtr[pitch<<1] = (((SrcPtr[(pitch+(pitch<<1))] + R2) << 1) + tmp + 4) >> 3; } else SrcPtr[0] = ((R1 << 1) + R0 + L1 + 2) >> 2; if (ap < 0) { tmp = L1 + R0 + L0; SrcPtr[-pitch] = (R1 + (tmp << 1) + L2 + 4) >> 3; tmp += L2; SrcPtr[-(pitch<<1)] = (tmp + 2) >> 2; SrcPtr[-(pitch+(pitch<<1))] = (((SrcPtr[-(pitch<<2)] + L2) << 1) + tmp + 4) >> 3; } else SrcPtr[-pitch] = ((L1 << 1) + L0 + R1 + 2) >> 2; } /* if(tmp < 0) */ SrcPtr ++; // Increment to next set of pixel } /* end of: for(pel=0; pel<16; pel++) */ } /* if(Strength[0] == 4) */ else /* Normal filtering */ { for (pel = 0; pel < 16; pel++) { Strng = Strength[pel >> 2]; if (Strng) { R0 = SrcPtr[0]; R1 = SrcPtr[pitch]; L0 = SrcPtr[-pitch]; L1 = SrcPtr[-(pitch<<1)]; // inc2 //|R0 - L0| < Alpha tmp1 = R0 - L0; if (tmp1 < 0) tmp1 = -tmp1; tmp = (tmp1 - Alpha); // |R0 - R1| < Beta tmp1 = R0 - R1; if (tmp1 < 0) tmp1 = -tmp1; tmp &= (tmp1 - Beta); //|L0 - L1| < Beta tmp1 = L0 - L1; if (tmp1 < 0) tmp1 = -tmp1; tmp &= (tmp1 - Beta); if (tmp < 0) { R2 = SrcPtr[pitch<<1]; //inc2 L2 = SrcPtr[-(pitch+(pitch<<1))]; // -inc3 // |R0 - R2| < Beta tmp = R0 - R2; if (tmp < 0) tmp = -tmp; aq = tmp - Beta; // |L0 - L2| < Beta tmp = L0 - L2; if (tmp < 0) tmp = -tmp; ap = tmp - Beta; c0 = C0 = clipTable[Strng]; if (ap < 0) c0++; if (aq < 0) c0++; //dif = IClip(-c0, c0, ((Delta << 2) + (L1 - R1) + 4) >> 3); dif = (((R0 - L0) << 2) + (L1 - R1) + 4) >> 3; tmp = dif + c0; if ((uint)tmp > (uint)c0 << 1) { tmp = ~(tmp >> 31); dif = (tmp & (c0 << 1)) - c0; } //SrcPtr[0] = (uint8)IClip(0, 255, R0 - dif); //SrcPtr[-inc] = (uint8)IClip(0, 255, L0 + dif); RL0 = R0 + L0; R0 -= dif; L0 += dif; if ((uint)R0 > 255) { tmp = ~(R0 >> 31); R0 = tmp & 255; } if ((uint)L0 > 255) { tmp = ~(L0 >> 31); L0 = tmp & 255; } SrcPtr[-pitch] = L0; SrcPtr[0] = R0; if (C0 != 0) /* Multiple zeros in the clip tables */ { if (aq < 0) // SrcPtr[inc] += IClip(-C0, C0,(R2 + ((RL0 + 1) >> 1) - (R1<<1)) >> 1); { R2 = (R2 + ((RL0 + 1) >> 1) - (R1 << 1)) >> 1; tmp = R2 + C0; if ((uint)tmp > (uint)C0 << 1) { tmp = ~(tmp >> 31); R2 = (tmp & (C0 << 1)) - C0; } SrcPtr[pitch] += R2; } if (ap < 0) //SrcPtr[-inc2] += IClip(-C0, C0,(L2 + ((RL0 + 1) >> 1) - (L1<<1)) >> 1); { L2 = (L2 + ((RL0 + 1) >> 1) - (L1 << 1)) >> 1; tmp = L2 + C0; if ((uint)tmp > (uint)C0 << 1) { tmp = ~(tmp >> 31); L2 = (tmp & (C0 << 1)) - C0; } SrcPtr[-(pitch<<1)] += L2; } } } /* if(tmp < 0) */ } /* end of: if((Strng = Strength[pel >> 2])) */ SrcPtr ++; // Increment to next set of pixel } /* for(pel=0; pel<16; pel++) */ } /* else if(Strength[0] == 4) */ } void EdgeLoop_Luma_vertical(uint8* SrcPtr, uint8 *Strength, int Alpha, int Beta, int *clipTable, int pitch) { int pel, ap = 1, aq = 1; int C0, c0, dif, AbsDelta, Strng, tmp, tmp1; int L2 = 0, L1, L0, R0, R1, R2 = 0; uint8 *ptr, *ptr1; register uint R_in, L_in; uint R_out, L_out; if (Strength[0] == 4) /* INTRA strong filtering */ { for (pel = 0; pel < 16; pel++) { // Read 8 pels R_in = *((uint *)SrcPtr); // R_in = {R3, R2, R1, R0} L_in = *((uint *)(SrcPtr - 4)); // L_in = {L0, L1, L2, L3} R1 = (R_in >> 8) & 0xff; R0 = R_in & 0xff; L0 = L_in >> 24; L1 = (L_in >> 16) & 0xff; // |R0 - R1| < Beta tmp1 = (R_in & 0xff) - R1; if (tmp1 < 0) tmp1 = -tmp1; tmp = (tmp1 - Beta); //|L0 - L1| < Beta tmp1 = (L_in >> 24) - L1; if (tmp1 < 0) tmp1 = -tmp1; tmp &= (tmp1 - Beta); //|R0 - L0| < Alpha AbsDelta = (R_in & 0xff) - (L_in >> 24); if (AbsDelta < 0) AbsDelta = -AbsDelta; tmp &= (AbsDelta - Alpha); if (tmp < 0) { AbsDelta -= ((Alpha >> 2) + 2); R2 = (R_in >> 16) & 0xff; L2 = (L_in >> 8) & 0xff; // |R0 - R2| < Beta && |R0 - L0| < (Alpha/4 + 2) tmp1 = (R_in & 0xff) - R2; if (tmp1 < 0) tmp1 = -tmp1; aq = AbsDelta & (tmp1 - Beta); // |L0 - L2| < Beta && |R0 - L0| < (Alpha/4 + 2) tmp1 = (L_in >> 24) - L2; if (tmp1 < 0) tmp1 = -tmp1; ap = AbsDelta & (tmp1 - Beta); ptr = SrcPtr; if (aq < 0) { R_out = (R_in >> 24) << 24; // Keep R3 at the fourth byte tmp = R0 + L0 + R1; R_out |= (((tmp << 1) + L1 + R2 + 4) >> 3); tmp += R2; R_out |= (((tmp + 2) >> 2) << 8); tmp1 = ((R_in >> 24) + R2) << 1; R_out |= (((tmp1 + tmp + 4) >> 3) << 16); *((uint *)SrcPtr) = R_out; } else *ptr = ((R1 << 1) + R0 + L1 + 2) >> 2; if (ap < 0) { L_out = (L_in << 24) >> 24; // Keep L3 at the first byte tmp = R0 + L0 + L1; L_out |= ((((tmp << 1) + R1 + L2 + 4) >> 3) << 24); tmp += L2; L_out |= (((tmp + 2) >> 2) << 16); tmp1 = ((L_in & 0xff) + L2) << 1; L_out |= (((tmp1 + tmp + 4) >> 3) << 8); *((uint *)(SrcPtr - 4)) = L_out; } else *(--ptr) = ((L1 << 1) + L0 + R1 + 2) >> 2; } /* if(tmp < 0) */ SrcPtr += pitch; // Increment to next set of pixel } /* end of: for(pel=0; pel<16; pel++) */ } /* if(Strength[0] == 4) */ else /* Normal filtering */ { for (pel = 0; pel < 16; pel++) { Strng = Strength[pel >> 2]; if (Strng) { // Read 8 pels R_in = *((uint *)SrcPtr); // R_in = {R3, R2, R1, R0} L_in = *((uint *)(SrcPtr - 4)); // L_in = {L0, L1, L2, L3} R1 = (R_in >> 8) & 0xff; R0 = R_in & 0xff; L0 = L_in >> 24; L1 = (L_in >> 16) & 0xff; //|R0 - L0| < Alpha tmp = R0 - L0; if (tmp < 0) tmp = -tmp; tmp -= Alpha; // |R0 - R1| < Beta tmp1 = R0 - R1; if (tmp1 < 0) tmp1 = -tmp1; tmp &= (tmp1 - Beta); //|L0 - L1| < Beta tmp1 = L0 - L1; if (tmp1 < 0) tmp1 = -tmp1; tmp &= (tmp1 - Beta); if (tmp < 0) { L2 = SrcPtr[-3]; R2 = SrcPtr[2]; // |R0 - R2| < Beta tmp = R0 - R2; if (tmp < 0) tmp = -tmp; aq = tmp - Beta; // |L0 - L2| < Beta tmp = L0 - L2; if (tmp < 0) tmp = -tmp; ap = tmp - Beta; c0 = C0 = clipTable[Strng]; if (ap < 0) c0++; if (aq < 0) c0++; //dif = IClip(-c0, c0, ((Delta << 2) + (L1 - R1) + 4) >> 3); dif = (((R0 - L0) << 2) + (L1 - R1) + 4) >> 3; tmp = dif + c0; if ((uint)tmp > (uint)c0 << 1) { tmp = ~(tmp >> 31); dif = (tmp & (c0 << 1)) - c0; } ptr = SrcPtr; ptr1 = SrcPtr - 1; //SrcPtr[0] = (uint8)IClip(0, 255, R0 - dif); //SrcPtr[-inc] = (uint8)IClip(0, 255, L0 + dif); R_in = R0 - dif; L_in = L0 + dif; /* cannot re-use R0 and L0 here */ if ((uint)R_in > 255) { tmp = ~((int)R_in >> 31); R_in = tmp & 255; } if ((uint)L_in > 255) { tmp = ~((int)L_in >> 31); L_in = tmp & 255; } *ptr1-- = L_in; *ptr++ = R_in; if (C0 != 0) // Multiple zeros in the clip tables { if (ap < 0) //SrcPtr[-inc2] += IClip(-C0, C0,(L2 + ((RL0 + 1) >> 1) - (L1<<1)) >> 1); { L2 = (L2 + ((R0 + L0 + 1) >> 1) - (L1 << 1)) >> 1; tmp = L2 + C0; if ((uint)tmp > (uint)C0 << 1) { tmp = ~(tmp >> 31); L2 = (tmp & (C0 << 1)) - C0; } *ptr1 += L2; } if (aq < 0) // SrcPtr[inc] += IClip(-C0, C0,(R2 + ((RL0 + 1) >> 1) - (R1<<1)) >> 1); { R2 = (R2 + ((R0 + L0 + 1) >> 1) - (R1 << 1)) >> 1; tmp = R2 + C0; if ((uint)tmp > (uint)C0 << 1) { tmp = ~(tmp >> 31); R2 = (tmp & (C0 << 1)) - C0; } *ptr += R2; } } } /* if(tmp < 0) */ } /* end of: if((Strng = Strength[pel >> 2])) */ SrcPtr += pitch; // Increment to next set of pixel } /* for(pel=0; pel<16; pel++) */ } /* else if(Strength[0] == 4) */ } void EdgeLoop_Chroma_vertical(uint8* SrcPtr, uint8 *Strength, int Alpha, int Beta, int *clipTable, int pitch) { int pel, Strng; int c0, dif; int L1, L0, R0, R1, tmp, tmp1; uint8 *ptr; uint R_in, L_in; for (pel = 0; pel < 16; pel++) { Strng = Strength[pel>>2]; if (Strng) { // Read 8 pels R_in = *((uint *)SrcPtr); // R_in = {R3, R2, R1, R0} L_in = *((uint *)(SrcPtr - 4)); // L_in = {L0, L1, L2, L3} R1 = (R_in >> 8) & 0xff; R0 = R_in & 0xff; L0 = L_in >> 24; L1 = (L_in >> 16) & 0xff; // |R0 - R1| < Beta tmp1 = R0 - R1; if (tmp1 < 0) tmp1 = -tmp1; tmp = (tmp1 - Beta); //|L0 - L1| < Beta tmp1 = L0 - L1; if (tmp1 < 0) tmp1 = -tmp1; tmp &= (tmp1 - Beta); //|R0 - L0| < Alpha tmp1 = R0 - L0; if (tmp1 < 0) tmp1 = -tmp1; tmp &= (tmp1 - Alpha); if (tmp < 0) { ptr = SrcPtr; if (Strng == 4) /* INTRA strong filtering */ { *ptr-- = ((R1 << 1) + R0 + L1 + 2) >> 2; *ptr = ((L1 << 1) + L0 + R1 + 2) >> 2; } else /* normal filtering */ { c0 = clipTable[Strng] + 1; //dif = IClip(-c0, c0, ((Delta << 2) + (L1 - R1) + 4) >> 3); dif = (((R0 - L0) << 2) + (L1 - R1) + 4) >> 3; tmp = dif + c0; if ((uint)tmp > (uint)c0 << 1) { tmp = ~(tmp >> 31); dif = (tmp & (c0 << 1)) - c0; } //SrcPtr[0] = (uint8)IClip(0, 255, R0 - dif); //SrcPtr[-inc] = (uint8)IClip(0, 255, L0 + dif); L0 += dif; R0 -= dif; if ((uint)L0 > 255) { tmp = ~(L0 >> 31); L0 = tmp & 255; } if ((uint)R0 > 255) { tmp = ~(R0 >> 31); R0 = tmp & 255; } *ptr-- = R0; *ptr = L0; } } pel ++; SrcPtr += pitch; // Increment to next set of pixel } /* end of: if((Strng = Strength[pel >> 2])) */ else { pel += 3; SrcPtr += (pitch << 1); //PtrInc << 1; } } /* end of: for(pel=0; pel<16; pel++) */ } void EdgeLoop_Chroma_horizontal(uint8* SrcPtr, uint8 *Strength, int Alpha, int Beta, int *clipTable, int pitch) { int pel, Strng; int c0, dif; int L1, L0, R0, R1, tmp, tmp1; for (pel = 0; pel < 16; pel++) { Strng = Strength[pel>>2]; if (Strng) { R0 = SrcPtr[0]; L0 = SrcPtr[-pitch]; L1 = SrcPtr[-(pitch<<1)]; //inc2 R1 = SrcPtr[pitch]; // |R0 - R1| < Beta tmp1 = R0 - R1; if (tmp1 < 0) tmp1 = -tmp1; tmp = (tmp1 - Beta); //|L0 - L1| < Beta tmp1 = L0 - L1; if (tmp1 < 0) tmp1 = -tmp1; tmp &= (tmp1 - Beta); //|R0 - L0| < Alpha tmp1 = R0 - L0; if (tmp1 < 0) tmp1 = -tmp1; tmp &= (tmp1 - Alpha); if (tmp < 0) { if (Strng == 4) /* INTRA strong filtering */ { SrcPtr[0] = ((R1 << 1) + R0 + L1 + 2) >> 2; SrcPtr[-pitch] = ((L1 << 1) + L0 + R1 + 2) >> 2; } else /* normal filtering */ { c0 = clipTable[Strng] + 1; //dif = IClip(-c0, c0, ((Delta << 2) + (L1 - R1) + 4) >> 3); dif = (((R0 - L0) << 2) + (L1 - R1) + 4) >> 3; tmp = dif + c0; if ((uint)tmp > (uint)c0 << 1) { tmp = ~(tmp >> 31); dif = (tmp & (c0 << 1)) - c0; } //SrcPtr[-inc] = (uint8)IClip(0, 255, L0 + dif); //SrcPtr[0] = (uint8)IClip(0, 255, R0 - dif); L0 += dif; R0 -= dif; if ((uint)L0 > 255) { tmp = ~(L0 >> 31); L0 = tmp & 255; } if ((uint)R0 > 255) { tmp = ~(R0 >> 31); R0 = tmp & 255; } SrcPtr[0] = R0; SrcPtr[-pitch] = L0; } } pel ++; SrcPtr ++; // Increment to next set of pixel } /* end of: if((Strng = Strength[pel >> 2])) */ else { pel += 3; SrcPtr += 2; } } /* end of: for(pel=0; pel<16; pel++) */ } ================================================ FILE: RtspCamera/jni/avc_h264/common/src/dpb.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "avclib_common.h" // xxx pa #define LOG_TAG "dbp" #include "android/log.h" #define DPB_MEM_ATTR 0 AVCStatus InitDPB(AVCHandle *avcHandle, AVCCommonObj *video, int FrameHeightInMbs, int PicWidthInMbs, bool padding) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "InitDPB(int FrameHeightInMbs <%d>, int PicWidthInMbs <%d>, bool padding <%d>)", FrameHeightInMbs, PicWidthInMbs, padding); AVCDecPicBuffer *dpb = video->decPicBuf; int level, framesize, num_fs; void *userData = avcHandle->userData; #ifndef PV_MEMORY_POOL uint32 addr; #endif uint16 refIdx = 0; level = video->currSeqParams->level_idc; for (num_fs = 0; num_fs < MAX_FS; num_fs++) { dpb->fs[num_fs] = NULL; } framesize = (int)(((FrameHeightInMbs * PicWidthInMbs) << 7) * 3); if (padding) { video->padded_size = (int)((((FrameHeightInMbs + 2) * (PicWidthInMbs + 2)) << 7) * 3) - framesize; } else { video->padded_size = 0; } #ifndef PV_MEMORY_POOL if (dpb->decoded_picture_buffer) { avcHandle->CBAVC_Free(userData, (int)dpb->decoded_picture_buffer); dpb->decoded_picture_buffer = NULL; } #endif /* need to allocate one extra frame for current frame, DPB only defines for reference frames */ dpb->num_fs = (uint32)(MaxDPBX2[mapLev2Idx[level]] << 2) / (3 * FrameHeightInMbs * PicWidthInMbs) + 1; __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "InitDPB dpb->num_fs = %d", dpb->num_fs); if (dpb->num_fs > MAX_FS) { dpb->num_fs = MAX_FS; } if (video->currSeqParams->num_ref_frames + 1 > (uint32)dpb->num_fs) { dpb->num_fs = video->currSeqParams->num_ref_frames + 1; } dpb->dpb_size = dpb->num_fs * (framesize + video->padded_size); // dpb->dpb_size = (uint32)MaxDPBX2[mapLev2Idx[level]]*512 + framesize; #ifndef PV_MEMORY_POOL dpb->decoded_picture_buffer = (uint8*) avcHandle->CBAVC_Malloc(userData, dpb->dpb_size, 100/*DPB_MEM_ATTR*/); if (dpb->decoded_picture_buffer == NULL || dpb->decoded_picture_buffer&0x3) // not word aligned return AVC_MEMORY_FAIL; #endif dpb->used_size = 0; num_fs = 0; while (num_fs < dpb->num_fs) { /* fs is an array pointers to AVCDecPicture */ dpb->fs[num_fs] = (AVCFrameStore*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCFrameStore), 101/*DEFAULT_ATTR*/); if (dpb->fs[num_fs] == NULL) { return AVC_MEMORY_FAIL; } #ifndef PV_MEMORY_POOL /* assign the actual memory for Sl, Scb, Scr */ dpb->fs[num_fs]->base_dpb = dpb->decoded_picture_buffer + dpb->used_size; #endif dpb->fs[num_fs]->IsReference = 0; dpb->fs[num_fs]->IsLongTerm = 0; dpb->fs[num_fs]->IsOutputted = 3; dpb->fs[num_fs]->frame.RefIdx = refIdx++; /* this value will remain unchanged through out the encoding session */ dpb->fs[num_fs]->frame.picType = AVC_FRAME; dpb->fs[num_fs]->frame.isLongTerm = 0; dpb->fs[num_fs]->frame.isReference = 0; video->RefPicList0[num_fs] = &(dpb->fs[num_fs]->frame); dpb->fs[num_fs]->frame.padded = 0; dpb->used_size += (framesize + video->padded_size); num_fs++; } return AVC_SUCCESS; } OSCL_EXPORT_REF AVCStatus AVCConfigureSequence(AVCHandle *avcHandle, AVCCommonObj *video, bool padding) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "AVCConfigureSequence"); void *userData = avcHandle->userData; AVCDecPicBuffer *dpb = video->decPicBuf; int framesize, ii; /* size of one frame */ uint PicWidthInMbs, PicHeightInMapUnits, FrameHeightInMbs, PicSizeInMapUnits; uint num_fs; /* derived variables from SPS */ PicWidthInMbs = video->currSeqParams->pic_width_in_mbs_minus1 + 1; PicHeightInMapUnits = video->currSeqParams->pic_height_in_map_units_minus1 + 1 ; FrameHeightInMbs = (2 - video->currSeqParams->frame_mbs_only_flag) * PicHeightInMapUnits ; PicSizeInMapUnits = PicWidthInMbs * PicHeightInMapUnits ; if (video->PicSizeInMapUnits != PicSizeInMapUnits || video->currSeqParams->level_idc != video->level_idc) { /* make sure you mark all the frames as unused for reference for flushing*/ for (ii = 0; ii < dpb->num_fs; ii++) { dpb->fs[ii]->IsReference = 0; dpb->fs[ii]->IsOutputted |= 0x02; } num_fs = (uint32)(MaxDPBX2[(uint32)mapLev2Idx[video->currSeqParams->level_idc]] << 2) / (3 * PicSizeInMapUnits) + 1; if (num_fs >= MAX_FS) { num_fs = MAX_FS; } #ifdef PV_MEMORY_POOL if (padding) { avcHandle->CBAVC_DPBAlloc(avcHandle->userData, PicSizeInMapUnits + ((PicWidthInMbs + 2) << 1) + (PicHeightInMapUnits << 1), num_fs); } else { avcHandle->CBAVC_DPBAlloc(avcHandle->userData, PicSizeInMapUnits, num_fs); } #endif __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "AVCConfigureSequence CleanUpDPB & InitDPB"); CleanUpDPB(avcHandle, video); if (InitDPB(avcHandle, video, FrameHeightInMbs, PicWidthInMbs, padding) != AVC_SUCCESS) { return AVC_FAIL; } /* Allocate video->mblock upto PicSizeInMbs and populate the structure such as the neighboring MB pointers. */ framesize = (FrameHeightInMbs * PicWidthInMbs); if (video->mblock) { avcHandle->CBAVC_Free(userData, (uint32)video->mblock); video->mblock = NULL; } video->mblock = (AVCMacroblock*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCMacroblock) * framesize, DEFAULT_ATTR); if (video->mblock == NULL) { return AVC_FAIL; } for (ii = 0; ii < framesize; ii++) { video->mblock[ii].slice_id = -1; } /* Allocate memory for intra prediction */ #ifdef MB_BASED_DEBLOCK video->intra_pred_top = (uint8*) avcHandle->CBAVC_Malloc(userData, PicWidthInMbs << 4, FAST_MEM_ATTR); if (video->intra_pred_top == NULL) { return AVC_FAIL; } video->intra_pred_top_cb = (uint8*) avcHandle->CBAVC_Malloc(userData, PicWidthInMbs << 3, FAST_MEM_ATTR); if (video->intra_pred_top_cb == NULL) { return AVC_FAIL; } video->intra_pred_top_cr = (uint8*) avcHandle->CBAVC_Malloc(userData, PicWidthInMbs << 3, FAST_MEM_ATTR); if (video->intra_pred_top_cr == NULL) { return AVC_FAIL; } #endif /* Allocate slice group MAP map */ if (video->MbToSliceGroupMap) { avcHandle->CBAVC_Free(userData, (uint32)video->MbToSliceGroupMap); video->MbToSliceGroupMap = NULL; } video->MbToSliceGroupMap = (int*) avcHandle->CBAVC_Malloc(userData, sizeof(uint) * PicSizeInMapUnits * 2, 7/*DEFAULT_ATTR*/); if (video->MbToSliceGroupMap == NULL) { return AVC_FAIL; } video->PicSizeInMapUnits = PicSizeInMapUnits; video->level_idc = video->currSeqParams->level_idc; } return AVC_SUCCESS; } OSCL_EXPORT_REF AVCStatus CleanUpDPB(AVCHandle *avcHandle, AVCCommonObj *video) { AVCDecPicBuffer *dpb = video->decPicBuf; int ii; void *userData = avcHandle->userData; for (ii = 0; ii < MAX_FS; ii++) { if (dpb->fs[ii] != NULL) { avcHandle->CBAVC_Free(userData, (int)dpb->fs[ii]); dpb->fs[ii] = NULL; } } #ifndef PV_MEMORY_POOL if (dpb->decoded_picture_buffer) { avcHandle->CBAVC_Free(userData, (int)dpb->decoded_picture_buffer); dpb->decoded_picture_buffer = NULL; } #endif dpb->used_size = 0; dpb->dpb_size = 0; return AVC_SUCCESS; } OSCL_EXPORT_REF AVCStatus DPBInitBuffer(AVCHandle *avcHandle, AVCCommonObj *video) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "DPBInitBuffer"); AVCDecPicBuffer *dpb = video->decPicBuf; int ii, status; /* Before doing any decoding, check if there's a frame memory available */ /* look for next unused dpb->fs, or complementary field pair */ /* video->currPic is assigned to this */ /* There's also restriction on the frame_num, see page 59 of JVT-I1010.doc. */ for (ii = 0; ii < dpb->num_fs; ii++) { /* looking for the one not used or not reference and has been outputted */ if (dpb->fs[ii]->IsReference == 0 && dpb->fs[ii]->IsOutputted == 3) { video->currFS = dpb->fs[ii]; #ifdef PV_MEMORY_POOL status = avcHandle->CBAVC_FrameBind(avcHandle->userData, ii, &(video->currFS->base_dpb)); if (status == AVC_FAIL) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "DPBInitBuffer CBAVC_FrameBind-> return AVC_NO_BUFFER for fs: %d", ii); return AVC_NO_BUFFER; /* this should not happen */ } #endif break; } } if (ii == dpb->num_fs) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "DPBInitBuffer return AVC_PICTURE_OUTPUT_READY"); return AVC_PICTURE_OUTPUT_READY; /* no empty frame available */ } __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "DPBInitBuffer final return AVC_SUCCESS"); return AVC_SUCCESS; } OSCL_EXPORT_REF void DPBInitPic(AVCCommonObj *video, int CurrPicNum) { int offset = 0; int offsetc = 0; int luma_framesize; /* this part has to be set here, assuming that slice header and POC have been decoded. */ /* used in GetOutput API */ video->currFS->PicOrderCnt = video->PicOrderCnt; video->currFS->FrameNum = video->sliceHdr->frame_num; video->currFS->FrameNumWrap = CurrPicNum; // MC_FIX /* initialize everything to zero */ video->currFS->IsOutputted = 0; video->currFS->IsReference = 0; video->currFS->IsLongTerm = 0; video->currFS->frame.isReference = FALSE; video->currFS->frame.isLongTerm = FALSE; /* initialize the pixel pointer to NULL */ video->currFS->frame.Sl = video->currFS->frame.Scb = video->currFS->frame.Scr = NULL; /* determine video->currPic */ /* assign dbp->base_dpb to fs[i]->frame.Sl, Scb, Scr .*/ /* For PicSizeInMbs, see DecodeSliceHeader() */ video->currPic = &(video->currFS->frame); video->currPic->padded = 0; // reset this flag to not-padded if (video->padded_size) { offset = ((video->PicWidthInSamplesL + 32) << 4) + 16; // offset to the origin offsetc = (offset >> 2) + 4; luma_framesize = (int)((((video->FrameHeightInMbs + 2) * (video->PicWidthInMbs + 2)) << 8)); } else luma_framesize = video->PicSizeInMbs << 8; video->currPic->Sl = video->currFS->base_dpb + offset; video->currPic->Scb = video->currFS->base_dpb + luma_framesize + offsetc; video->currPic->Scr = video->currPic->Scb + (luma_framesize >> 2); video->currPic->pitch = video->PicWidthInSamplesL + (video->padded_size == 0 ? 0 : 32); video->currPic->height = video->PicHeightInSamplesL; video->currPic->width = video->PicWidthInSamplesL; video->currPic->PicNum = CurrPicNum; } /* to release skipped frame after encoding */ OSCL_EXPORT_REF void DPBReleaseCurrentFrame(AVCHandle *avcHandle, AVCCommonObj *video) { AVCDecPicBuffer *dpb = video->decPicBuf; int ii; video->currFS->IsOutputted = 3; // return this buffer. #ifdef PV_MEMORY_POOL /* for non-memory pool, no need to do anything */ /* search for current frame index */ ii = dpb->num_fs; while (ii--) { if (dpb->fs[ii] == video->currFS) { avcHandle->CBAVC_FrameUnbind(avcHandle->userData, ii); break; } } #endif return ; } /* see subclause 8.2.5.1 */ OSCL_EXPORT_REF AVCStatus StorePictureInDPB(AVCHandle *avcHandle, AVCCommonObj *video) { AVCStatus status; AVCDecPicBuffer *dpb = video->decPicBuf; AVCSliceHeader *sliceHdr = video->sliceHdr; int ii, num_ref; /* number 1 of 8.2.5.1, we handle gaps in frame_num differently without using the memory */ /* to be done!!!! */ /* number 3 of 8.2.5.1 */ if (video->nal_unit_type == AVC_NALTYPE_IDR) { for (ii = 0; ii < dpb->num_fs; ii++) { if (dpb->fs[ii] != video->currFS) /* not current frame */ { dpb->fs[ii]->IsReference = 0; /* mark as unused for reference */ dpb->fs[ii]->IsLongTerm = 0; /* but still used until output */ dpb->fs[ii]->IsOutputted |= 0x02; #ifdef PV_MEMORY_POOL if (dpb->fs[ii]->IsOutputted == 3) { avcHandle->CBAVC_FrameUnbind(avcHandle->userData, ii); } #endif } } video->currPic->isReference = TRUE; video->currFS->IsReference = 3; if (sliceHdr->long_term_reference_flag == 0) { video->currPic->isLongTerm = FALSE; video->currFS->IsLongTerm = 0; video->MaxLongTermFrameIdx = -1; } else { video->currPic->isLongTerm = TRUE; video->currFS->IsLongTerm = 3; video->currFS->LongTermFrameIdx = 0; video->MaxLongTermFrameIdx = 0; } if (sliceHdr->no_output_of_prior_pics_flag) { for (ii = 0; ii < dpb->num_fs; ii++) { if (dpb->fs[ii] != video->currFS) /* not current frame */ { dpb->fs[ii]->IsOutputted = 3; #ifdef PV_MEMORY_POOL avcHandle->CBAVC_FrameUnbind(avcHandle->userData, ii); #endif } } } video->mem_mgr_ctrl_eq_5 = TRUE; /* flush reference frames MC_FIX */ } else { if (video->currPic->isReference == TRUE) { if (sliceHdr->adaptive_ref_pic_marking_mode_flag == 0) { status = sliding_window_process(avcHandle, video, dpb); /* we may have to do this after adaptive_memory_marking */ } else { status = adaptive_memory_marking(avcHandle, video, dpb, sliceHdr); } if (status != AVC_SUCCESS) { return status; } } } /* number 4 of 8.2.5.1 */ /* This basically says every frame must be at least used for short-term ref. */ /* Need to be revisited!!! */ /* look at insert_picture_in_dpb() */ if (video->nal_unit_type != AVC_NALTYPE_IDR && video->currPic->isLongTerm == FALSE) { if (video->currPic->isReference) { video->currFS->IsReference = 3; } else { video->currFS->IsReference = 0; } video->currFS->IsLongTerm = 0; } /* check if number of reference frames doesn't exceed num_ref_frames */ num_ref = 0; for (ii = 0; ii < dpb->num_fs; ii++) { if (dpb->fs[ii]->IsReference) { num_ref++; } } if (num_ref > (int)video->currSeqParams->num_ref_frames) { return AVC_FAIL; /* out of range */ } return AVC_SUCCESS; } AVCStatus sliding_window_process(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb) { int ii, numShortTerm, numLongTerm; int32 MinFrameNumWrap; int MinIdx; numShortTerm = 0; numLongTerm = 0; for (ii = 0; ii < dpb->num_fs; ii++) { if (dpb->fs[ii] != video->currFS) /* do not count the current frame */ { if (dpb->fs[ii]->IsLongTerm) { numLongTerm++; } else if (dpb->fs[ii]->IsReference) { numShortTerm++; } } } /* Remove this check to allow certain corrupted content to pass. Can re-enable it if it turns out to cause undesirable effect. if (numShortTerm <= 0) { return AVC_FAIL; } */ while (numShortTerm + numLongTerm >= (int)video->currSeqParams->num_ref_frames) { /* get short-term ref frame with smallest PicOrderCnt */ /* this doesn't work for all I-slice clip since PicOrderCnt will not be initialized */ MinFrameNumWrap = 0x7FFFFFFF; MinIdx = -1; for (ii = 0; ii < dpb->num_fs; ii++) { if (dpb->fs[ii]->IsReference && !dpb->fs[ii]->IsLongTerm) { if (dpb->fs[ii]->FrameNumWrap < MinFrameNumWrap) { MinFrameNumWrap = dpb->fs[ii]->FrameNumWrap; MinIdx = ii; } } } if (MinIdx < 0) /* something wrong, impossible */ { return AVC_FAIL; } /* mark the frame with smallest PicOrderCnt to be unused for reference */ dpb->fs[MinIdx]->IsReference = 0; dpb->fs[MinIdx]->IsLongTerm = 0; dpb->fs[MinIdx]->frame.isReference = FALSE; dpb->fs[MinIdx]->frame.isLongTerm = FALSE; dpb->fs[MinIdx]->IsOutputted |= 0x02; #ifdef PV_MEMORY_POOL if (dpb->fs[MinIdx]->IsOutputted == 3) { avcHandle->CBAVC_FrameUnbind(avcHandle->userData, MinIdx); } #endif numShortTerm--; } return AVC_SUCCESS; } /* see subclause 8.2.5.4 */ AVCStatus adaptive_memory_marking(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb, AVCSliceHeader *sliceHdr) { int ii; ii = 0; while (ii < MAX_DEC_REF_PIC_MARKING && sliceHdr->memory_management_control_operation[ii] != 0) { switch (sliceHdr->memory_management_control_operation[ii]) { case 1: MemMgrCtrlOp1(avcHandle, video, dpb, sliceHdr->difference_of_pic_nums_minus1[ii]); // update_ref_list(dpb); break; case 2: MemMgrCtrlOp2(avcHandle, dpb, sliceHdr->long_term_pic_num[ii]); break; case 3: MemMgrCtrlOp3(avcHandle, video, dpb, sliceHdr->difference_of_pic_nums_minus1[ii], sliceHdr->long_term_frame_idx[ii]); break; case 4: MemMgrCtrlOp4(avcHandle, video, dpb, sliceHdr->max_long_term_frame_idx_plus1[ii]); break; case 5: MemMgrCtrlOp5(avcHandle, video, dpb); video->currFS->FrameNum = 0; // video->currFS->PicOrderCnt = 0; break; case 6: MemMgrCtrlOp6(avcHandle, video, dpb, sliceHdr->long_term_frame_idx[ii]); break; } ii++; } if (ii == MAX_DEC_REF_PIC_MARKING) { return AVC_FAIL; /* exceed the limit */ } return AVC_SUCCESS; } /* see subclause 8.2.5.4.1, mark short-term picture as "unused for reference" */ void MemMgrCtrlOp1(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb, int difference_of_pic_nums_minus1) { int picNumX, ii; picNumX = video->CurrPicNum - (difference_of_pic_nums_minus1 + 1); for (ii = 0; ii < dpb->num_fs; ii++) { if (dpb->fs[ii]->IsReference == 3 && dpb->fs[ii]->IsLongTerm == 0) { if (dpb->fs[ii]->frame.PicNum == picNumX) { unmark_for_reference(avcHandle, dpb, ii); return ; } } } return ; } /* see subclause 8.2.5.4.2 mark long-term picture as "unused for reference" */ void MemMgrCtrlOp2(AVCHandle *avcHandle, AVCDecPicBuffer *dpb, int long_term_pic_num) { int ii; for (ii = 0; ii < dpb->num_fs; ii++) { if (dpb->fs[ii]->IsLongTerm == 3) { if (dpb->fs[ii]->frame.LongTermPicNum == long_term_pic_num) { unmark_for_reference(avcHandle, dpb, ii); } } } } /* see subclause 8.2.5.4.3 assign LongTermFrameIdx to a short-term ref picture */ void MemMgrCtrlOp3(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb, uint difference_of_pic_nums_minus1, uint long_term_frame_idx) { int picNumX, ii; picNumX = video->CurrPicNum - (difference_of_pic_nums_minus1 + 1); /* look for fs[i] with long_term_frame_idx */ unmark_long_term_frame_for_reference_by_frame_idx(avcHandle, dpb, long_term_frame_idx); /* now mark the picture with picNumX to long term frame idx */ for (ii = 0; ii < dpb->num_fs; ii++) { if (dpb->fs[ii]->IsReference == 3) { if ((dpb->fs[ii]->frame.isLongTerm == FALSE) && (dpb->fs[ii]->frame.PicNum == picNumX)) { dpb->fs[ii]->LongTermFrameIdx = long_term_frame_idx; dpb->fs[ii]->frame.LongTermPicNum = long_term_frame_idx; dpb->fs[ii]->frame.isLongTerm = TRUE; dpb->fs[ii]->IsLongTerm = 3; return; } } } } /* see subclause 8.2.5.4.4, MaxLongTermFrameIdx */ void MemMgrCtrlOp4(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb, uint max_long_term_frame_idx_plus1) { int ii; video->MaxLongTermFrameIdx = max_long_term_frame_idx_plus1 - 1; /* then mark long term frame with exceeding LongTermFrameIdx to unused for reference. */ for (ii = 0; ii < dpb->num_fs; ii++) { if (dpb->fs[ii]->IsLongTerm && dpb->fs[ii] != video->currFS) { if (dpb->fs[ii]->LongTermFrameIdx > video->MaxLongTermFrameIdx) { unmark_for_reference(avcHandle, dpb, ii); } } } } /* see subclause 8.2.5.4.5 mark all reference picture as "unused for reference" and setting MaxLongTermFrameIdx to "no long-term frame indices" */ void MemMgrCtrlOp5(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb) { int ii; video->MaxLongTermFrameIdx = -1; for (ii = 0; ii < dpb->num_fs; ii++) /* including the current frame ??????*/ { if (dpb->fs[ii] != video->currFS) // MC_FIX { unmark_for_reference(avcHandle, dpb, ii); } } video->mem_mgr_ctrl_eq_5 = TRUE; } /* see subclause 8.2.5.4.6 assing long-term frame index to the current picture */ void MemMgrCtrlOp6(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb, uint long_term_frame_idx) { unmark_long_term_frame_for_reference_by_frame_idx(avcHandle, dpb, long_term_frame_idx); video->currFS->IsLongTerm = 3; video->currFS->IsReference = 3; video->currPic->isLongTerm = TRUE; video->currPic->isReference = TRUE; video->currFS->LongTermFrameIdx = long_term_frame_idx; } void unmark_for_reference(AVCHandle *avcHandle, AVCDecPicBuffer *dpb, uint idx) { AVCFrameStore *fs = dpb->fs[idx]; fs->frame.isReference = FALSE; fs->frame.isLongTerm = FALSE; fs->IsLongTerm = 0; fs->IsReference = 0; fs->IsOutputted |= 0x02; #ifdef PV_MEMORY_POOL if (fs->IsOutputted == 3) { avcHandle->CBAVC_FrameUnbind(avcHandle->userData, idx); } #endif return ; } void unmark_long_term_frame_for_reference_by_frame_idx(AVCHandle *avcHandle, AVCDecPicBuffer *dpb, uint long_term_frame_idx) { int ii; for (ii = 0; ii < dpb->num_fs; ii++) { if (dpb->fs[ii]->IsLongTerm && (dpb->fs[ii]->LongTermFrameIdx == (int)long_term_frame_idx)) { unmark_for_reference(avcHandle, dpb, ii); } } } ================================================ FILE: RtspCamera/jni/avc_h264/common/src/fmo.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "avclib_common.h" #include "oscl_mem.h" /* see subclause 8.2.2 Decoding process for macroblock to slice group map */ OSCL_EXPORT_REF AVCStatus FMOInit(AVCCommonObj *video) { AVCPicParamSet *currPPS = video->currPicParams; int *MbToSliceGroupMap = video->MbToSliceGroupMap; int PicSizeInMapUnits = video->PicSizeInMapUnits; int PicWidthInMbs = video->PicWidthInMbs; if (currPPS->num_slice_groups_minus1 == 0) { oscl_memset(video->MbToSliceGroupMap, 0, video->PicSizeInMapUnits*sizeof(uint)); } else { switch (currPPS->slice_group_map_type) { case 0: FmoGenerateType0MapUnitMap(MbToSliceGroupMap, currPPS->run_length_minus1, currPPS->num_slice_groups_minus1, PicSizeInMapUnits); break; case 1: FmoGenerateType1MapUnitMap(MbToSliceGroupMap, PicWidthInMbs, currPPS->num_slice_groups_minus1, PicSizeInMapUnits); break; case 2: FmoGenerateType2MapUnitMap(currPPS, MbToSliceGroupMap, PicWidthInMbs, currPPS->num_slice_groups_minus1, PicSizeInMapUnits); break; case 3: FmoGenerateType3MapUnitMap(video, currPPS, MbToSliceGroupMap, PicWidthInMbs); break; case 4: FmoGenerateType4MapUnitMap(MbToSliceGroupMap, video->MapUnitsInSliceGroup0, currPPS->slice_group_change_direction_flag, PicSizeInMapUnits); break; case 5: FmoGenerateType5MapUnitMap(MbToSliceGroupMap, video, currPPS->slice_group_change_direction_flag, PicSizeInMapUnits); break; case 6: FmoGenerateType6MapUnitMap(MbToSliceGroupMap, (int*)currPPS->slice_group_id, PicSizeInMapUnits); break; default: return AVC_FAIL; /* out of range, shouldn't come this far */ } } return AVC_SUCCESS; } /* see subclause 8.2.2.1 interleaved slice group map type*/ void FmoGenerateType0MapUnitMap(int *mapUnitToSliceGroupMap, uint *run_length_minus1, uint num_slice_groups_minus1, uint PicSizeInMapUnits) { uint iGroup, j; uint i = 0; do { for (iGroup = 0; (iGroup <= num_slice_groups_minus1) && (i < PicSizeInMapUnits); i += run_length_minus1[iGroup++] + 1) { for (j = 0; j <= run_length_minus1[ iGroup ] && i + j < PicSizeInMapUnits; j++) mapUnitToSliceGroupMap[i+j] = iGroup; } } while (i < PicSizeInMapUnits); } /* see subclause 8.2.2.2 dispersed slice group map type*/ void FmoGenerateType1MapUnitMap(int *mapUnitToSliceGroupMap, int PicWidthInMbs, uint num_slice_groups_minus1, uint PicSizeInMapUnits) { uint i; for (i = 0; i < PicSizeInMapUnits; i++) { mapUnitToSliceGroupMap[i] = ((i % PicWidthInMbs) + (((i / PicWidthInMbs) * (num_slice_groups_minus1 + 1)) / 2)) % (num_slice_groups_minus1 + 1); } } /* see subclause 8.2.2.3 foreground with left-over slice group map type */ void FmoGenerateType2MapUnitMap(AVCPicParamSet *pps, int *mapUnitToSliceGroupMap, int PicWidthInMbs, uint num_slice_groups_minus1, uint PicSizeInMapUnits) { int iGroup; uint i, x, y; uint yTopLeft, xTopLeft, yBottomRight, xBottomRight; for (i = 0; i < PicSizeInMapUnits; i++) { mapUnitToSliceGroupMap[ i ] = num_slice_groups_minus1; } for (iGroup = num_slice_groups_minus1 - 1 ; iGroup >= 0; iGroup--) { yTopLeft = pps->top_left[ iGroup ] / PicWidthInMbs; xTopLeft = pps->top_left[ iGroup ] % PicWidthInMbs; yBottomRight = pps->bottom_right[ iGroup ] / PicWidthInMbs; xBottomRight = pps->bottom_right[ iGroup ] % PicWidthInMbs; for (y = yTopLeft; y <= yBottomRight; y++) { for (x = xTopLeft; x <= xBottomRight; x++) { mapUnitToSliceGroupMap[ y * PicWidthInMbs + x ] = iGroup; } } } } /* see subclause 8.2.2.4 box-out slice group map type */ /* follow the text rather than the JM, it's quite different. */ void FmoGenerateType3MapUnitMap(AVCCommonObj *video, AVCPicParamSet* pps, int *mapUnitToSliceGroupMap, int PicWidthInMbs) { uint i, k; int leftBound, topBound, rightBound, bottomBound; int x, y, xDir, yDir; int mapUnitVacant; uint PicSizeInMapUnits = video->PicSizeInMapUnits; uint MapUnitsInSliceGroup0 = video->MapUnitsInSliceGroup0; for (i = 0; i < PicSizeInMapUnits; i++) { mapUnitToSliceGroupMap[ i ] = 1; } x = (PicWidthInMbs - pps->slice_group_change_direction_flag) / 2; y = (video->PicHeightInMapUnits - pps->slice_group_change_direction_flag) / 2; leftBound = x; topBound = y; rightBound = x; bottomBound = y; xDir = pps->slice_group_change_direction_flag - 1; yDir = pps->slice_group_change_direction_flag; for (k = 0; k < MapUnitsInSliceGroup0; k += mapUnitVacant) { mapUnitVacant = (mapUnitToSliceGroupMap[ y * PicWidthInMbs + x ] == 1); if (mapUnitVacant) { mapUnitToSliceGroupMap[ y * PicWidthInMbs + x ] = 0; } if (xDir == -1 && x == leftBound) { leftBound = AVC_MAX(leftBound - 1, 0); x = leftBound; xDir = 0; yDir = 2 * pps->slice_group_change_direction_flag - 1; } else if (xDir == 1 && x == rightBound) { rightBound = AVC_MIN(rightBound + 1, (int)PicWidthInMbs - 1); x = rightBound; xDir = 0; yDir = 1 - 2 * pps->slice_group_change_direction_flag; } else if (yDir == -1 && y == topBound) { topBound = AVC_MAX(topBound - 1, 0); y = topBound; xDir = 1 - 2 * pps->slice_group_change_direction_flag; yDir = 0; } else if (yDir == 1 && y == bottomBound) { bottomBound = AVC_MIN(bottomBound + 1, (int)video->PicHeightInMapUnits - 1); y = bottomBound; xDir = 2 * pps->slice_group_change_direction_flag - 1; yDir = 0; } else { x = x + xDir; y = y + yDir; } } } /* see subclause 8.2.2.5 raster scan slice group map types */ void FmoGenerateType4MapUnitMap(int *mapUnitToSliceGroupMap, int MapUnitsInSliceGroup0, int slice_group_change_direction_flag, uint PicSizeInMapUnits) { uint sizeOfUpperLeftGroup = slice_group_change_direction_flag ? (PicSizeInMapUnits - MapUnitsInSliceGroup0) : MapUnitsInSliceGroup0; uint i; for (i = 0; i < PicSizeInMapUnits; i++) if (i < sizeOfUpperLeftGroup) mapUnitToSliceGroupMap[ i ] = 1 - slice_group_change_direction_flag; else mapUnitToSliceGroupMap[ i ] = slice_group_change_direction_flag; } /* see subclause 8.2.2.6, wipe slice group map type. */ void FmoGenerateType5MapUnitMap(int *mapUnitToSliceGroupMap, AVCCommonObj *video, int slice_group_change_direction_flag, uint PicSizeInMapUnits) { int PicWidthInMbs = video->PicWidthInMbs; int PicHeightInMapUnits = video->PicHeightInMapUnits; int MapUnitsInSliceGroup0 = video->MapUnitsInSliceGroup0; int sizeOfUpperLeftGroup = slice_group_change_direction_flag ? (PicSizeInMapUnits - MapUnitsInSliceGroup0) : MapUnitsInSliceGroup0; int i, j, k = 0; for (j = 0; j < PicWidthInMbs; j++) { for (i = 0; i < PicHeightInMapUnits; i++) { if (k++ < sizeOfUpperLeftGroup) { mapUnitToSliceGroupMap[ i * PicWidthInMbs + j ] = 1 - slice_group_change_direction_flag; } else { mapUnitToSliceGroupMap[ i * PicWidthInMbs + j ] = slice_group_change_direction_flag; } } } } /* see subclause 8.2.2.7, explicit slice group map */ void FmoGenerateType6MapUnitMap(int *mapUnitToSliceGroupMap, int *slice_group_id, uint PicSizeInMapUnits) { uint i; for (i = 0; i < PicSizeInMapUnits; i++) { mapUnitToSliceGroupMap[i] = slice_group_id[i]; } } ================================================ FILE: RtspCamera/jni/avc_h264/common/src/mb_access.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "avclib_common.h" #include "oscl_mem.h" OSCL_EXPORT_REF void InitNeighborAvailability(AVCCommonObj *video, int mbNum) { int PicWidthInMbs = video->PicWidthInMbs; // do frame-only and postpone intraAvail calculattion video->mbAddrA = mbNum - 1; video->mbAddrB = mbNum - PicWidthInMbs; video->mbAddrC = mbNum - PicWidthInMbs + 1; video->mbAddrD = mbNum - PicWidthInMbs - 1; video->mbAvailA = video->mbAvailB = video->mbAvailC = video->mbAvailD = 0; if (video->mb_x) { video->mbAvailA = (video->mblock[video->mbAddrA].slice_id == video->currMB->slice_id); if (video->mb_y) { video->mbAvailD = (video->mblock[video->mbAddrD].slice_id == video->currMB->slice_id); } } if (video->mb_y) { video->mbAvailB = (video->mblock[video->mbAddrB].slice_id == video->currMB->slice_id); if (video->mb_x < (PicWidthInMbs - 1)) { video->mbAvailC = (video->mblock[video->mbAddrC].slice_id == video->currMB->slice_id); } } return ; } bool mb_is_available(AVCMacroblock *mblock, uint PicSizeInMbs, int mbAddr, int currMbAddr) { if (mbAddr < 0 || mbAddr >= (int)PicSizeInMbs) { return FALSE; } if (mblock[mbAddr].slice_id != mblock[currMbAddr].slice_id) { return FALSE; } return TRUE; } OSCL_EXPORT_REF int predict_nnz(AVCCommonObj *video, int i, int j) { int pred_nnz = 0; int cnt = 1; AVCMacroblock *tempMB; /* left block */ /*getLuma4x4Neighbour(video, mb_nr, i, j, -1, 0, &pix); leftMB = video->mblock + pix.mb_addr; */ /* replace the above with below (won't work for field decoding), 1/19/04 */ if (i) { pred_nnz = video->currMB->nz_coeff[(j<<2)+i-1]; } else { if (video->mbAvailA) { tempMB = video->mblock + video->mbAddrA; pred_nnz = tempMB->nz_coeff[(j<<2)+3]; } else { cnt = 0; } } /* top block */ /*getLuma4x4Neighbour(video, mb_nr, i, j, 0, -1, &pix); topMB = video->mblock + pix.mb_addr;*/ /* replace the above with below (won't work for field decoding), 1/19/04 */ if (j) { pred_nnz += video->currMB->nz_coeff[((j-1)<<2)+i]; cnt++; } else { if (video->mbAvailB) { tempMB = video->mblock + video->mbAddrB; pred_nnz += tempMB->nz_coeff[12+i]; cnt++; } } if (cnt == 2) { pred_nnz = (pred_nnz + 1) >> 1; } return pred_nnz; } OSCL_EXPORT_REF int predict_nnz_chroma(AVCCommonObj *video, int i, int j) { int pred_nnz = 0; int cnt = 1; AVCMacroblock *tempMB; /* left block */ /*getChroma4x4Neighbour(video, mb_nr, i%2, j-4, -1, 0, &pix); leftMB = video->mblock + pix.mb_addr;*/ /* replace the above with below (won't work for field decoding), 1/19/04 */ if (i&1) { pred_nnz = video->currMB->nz_coeff[(j<<2)+i-1]; } else { if (video->mbAvailA) { tempMB = video->mblock + video->mbAddrA; pred_nnz = tempMB->nz_coeff[(j<<2)+i+1]; } else { cnt = 0; } } /* top block */ /*getChroma4x4Neighbour(video, mb_nr, i%2, j-4, 0, -1, &pix); topMB = video->mblock + pix.mb_addr;*/ /* replace the above with below (won't work for field decoding), 1/19/04 */ if (j&1) { pred_nnz += video->currMB->nz_coeff[((j-1)<<2)+i]; cnt++; } else { if (video->mbAvailB) { tempMB = video->mblock + video->mbAddrB; pred_nnz += tempMB->nz_coeff[20+i]; cnt++; } } if (cnt == 2) { pred_nnz = (pred_nnz + 1) >> 1; } return pred_nnz; } OSCL_EXPORT_REF void GetMotionVectorPredictor(AVCCommonObj *video, int encFlag) { AVCMacroblock *currMB = video->currMB; AVCMacroblock *MB_A, *MB_B, *MB_C, *MB_D; int block_x, block_y, block_x_1, block_y_1, new_block_x; int mbPartIdx, subMbPartIdx, offset_indx; int16 *mv, pmv_x, pmv_y; int nmSubMbHeight, nmSubMbWidth, mbPartIdx_X, mbPartIdx_Y; int avail_a, avail_b, avail_c; const static uint32 C = 0x5750; int i, j, offset_MbPart_indx, refIdxLXA, refIdxLXB, refIdxLXC = 0, curr_ref_idx; int pmv_A_x, pmv_B_x, pmv_C_x = 0, pmv_A_y, pmv_B_y, pmv_C_y = 0; /* we have to take care of Intra/skip blocks somewhere, i.e. set MV to 0 and set ref to -1! */ /* we have to populate refIdx as well */ MB_A = &video->mblock[video->mbAddrA]; MB_B = &video->mblock[video->mbAddrB]; if (currMB->mbMode == AVC_SKIP /* && !encFlag */) /* only for decoder */ { currMB->ref_idx_L0[0] = currMB->ref_idx_L0[1] = currMB->ref_idx_L0[2] = currMB->ref_idx_L0[3] = 0; if (video->mbAvailA && video->mbAvailB) { if ((MB_A->ref_idx_L0[1] == 0 && MB_A->mvL0[3] == 0) || (MB_B->ref_idx_L0[2] == 0 && MB_B->mvL0[12] == 0)) { oscl_memset(currMB->mvL0, 0, sizeof(int32)*16); return; } } else { oscl_memset(currMB->mvL0, 0, sizeof(int32)*16); return; } video->mvd_l0[0][0][0] = 0; video->mvd_l0[0][0][1] = 0; } MB_C = &video->mblock[video->mbAddrC]; MB_D = &video->mblock[video->mbAddrD]; offset_MbPart_indx = 0; for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++) { offset_indx = 0; nmSubMbHeight = currMB->SubMbPartHeight[mbPartIdx] >> 2; nmSubMbWidth = currMB->SubMbPartWidth[mbPartIdx] >> 2; mbPartIdx_X = ((mbPartIdx + offset_MbPart_indx) & 1) << 1; mbPartIdx_Y = (mbPartIdx + offset_MbPart_indx) & 2; for (subMbPartIdx = 0; subMbPartIdx < currMB->NumSubMbPart[mbPartIdx]; subMbPartIdx++) { block_x = mbPartIdx_X + ((subMbPartIdx + offset_indx) & 1); block_y = mbPartIdx_Y + (((subMbPartIdx + offset_indx) >> 1) & 1); block_x_1 = block_x - 1; block_y_1 = block_y - 1; refIdxLXA = refIdxLXB = refIdxLXC = -1; pmv_A_x = pmv_A_y = pmv_B_x = pmv_B_y = pmv_C_x = pmv_C_y = 0; if (block_x) { avail_a = 1; refIdxLXA = currMB->ref_idx_L0[(block_y & 2) + (block_x_1 >> 1)]; mv = (int16*)(currMB->mvL0 + (block_y << 2) + block_x_1); pmv_A_x = *mv++; pmv_A_y = *mv; } else { avail_a = video->mbAvailA; if (avail_a) { refIdxLXA = MB_A->ref_idx_L0[(block_y & 2) + 1]; mv = (int16*)(MB_A->mvL0 + (block_y << 2) + 3); pmv_A_x = *mv++; pmv_A_y = *mv; } } if (block_y) { avail_b = 1; refIdxLXB = currMB->ref_idx_L0[(block_y_1 & 2) + (block_x >> 1)]; mv = (int16*)(currMB->mvL0 + (block_y_1 << 2) + block_x); pmv_B_x = *mv++; pmv_B_y = *mv; } else { avail_b = video->mbAvailB; if (avail_b) { refIdxLXB = MB_B->ref_idx_L0[2 + (block_x >> 1)]; mv = (int16*)(MB_B->mvL0 + 12 + block_x); pmv_B_x = *mv++; pmv_B_y = *mv; } } new_block_x = block_x + (currMB->SubMbPartWidth[mbPartIdx] >> 2) - 1; avail_c = (C >> ((block_y << 2) + new_block_x)) & 0x1; if (avail_c) { /* it guaranteed that block_y > 0 && new_block_x<3 ) */ refIdxLXC = currMB->ref_idx_L0[(block_y_1 & 2) + ((new_block_x+1) >> 1)]; mv = (int16*)(currMB->mvL0 + (block_y_1 << 2) + (new_block_x + 1)); pmv_C_x = *mv++; pmv_C_y = *mv; } else { if (block_y == 0 && new_block_x < 3) { avail_c = video->mbAvailB; if (avail_c) { refIdxLXC = MB_B->ref_idx_L0[2 + ((new_block_x+1)>>1)]; mv = (int16*)(MB_B->mvL0 + 12 + (new_block_x + 1)); pmv_C_x = *mv++; pmv_C_y = *mv; } } else if (block_y == 0 && new_block_x == 3) { avail_c = video->mbAvailC; if (avail_c) { refIdxLXC = MB_C->ref_idx_L0[2]; mv = (int16*)(MB_C->mvL0 + 12); pmv_C_x = *mv++; pmv_C_y = *mv; } } if (avail_c == 0) { /* check D */ if (block_x && block_y) { avail_c = 1; refIdxLXC = currMB->ref_idx_L0[(block_y_1 & 2) + (block_x_1 >> 1)]; mv = (int16*)(currMB->mvL0 + (block_y_1 << 2) + block_x_1); pmv_C_x = *mv++; pmv_C_y = *mv; } else if (block_y) { avail_c = video->mbAvailA; if (avail_c) { refIdxLXC = MB_A->ref_idx_L0[(block_y_1 & 2) + 1]; mv = (int16*)(MB_A->mvL0 + (block_y_1 << 2) + 3); pmv_C_x = *mv++; pmv_C_y = *mv; } } else if (block_x) { avail_c = video->mbAvailB; if (avail_c) { refIdxLXC = MB_B->ref_idx_L0[2 + (block_x_1 >> 1)]; mv = (int16*)(MB_B->mvL0 + 12 + block_x_1); pmv_C_x = *mv++; pmv_C_y = *mv; } } else { avail_c = video->mbAvailD; if (avail_c) { refIdxLXC = MB_D->ref_idx_L0[3]; mv = (int16*)(MB_D->mvL0 + 15); pmv_C_x = *mv++; pmv_C_y = *mv; } } } } offset_indx = currMB->SubMbPartWidth[mbPartIdx] >> 3; curr_ref_idx = currMB->ref_idx_L0[(block_y & 2) + (block_x >> 1)]; if (avail_a && !(avail_b || avail_c)) { pmv_x = pmv_A_x; pmv_y = pmv_A_y; } else if (((curr_ref_idx == refIdxLXA) + (curr_ref_idx == refIdxLXB) + (curr_ref_idx == refIdxLXC)) == 1) { if (curr_ref_idx == refIdxLXA) { pmv_x = pmv_A_x; pmv_y = pmv_A_y; } else if (curr_ref_idx == refIdxLXB) { pmv_x = pmv_B_x; pmv_y = pmv_B_y; } else { pmv_x = pmv_C_x; pmv_y = pmv_C_y; } } else { pmv_x = AVC_MEDIAN(pmv_A_x, pmv_B_x, pmv_C_x); pmv_y = AVC_MEDIAN(pmv_A_y, pmv_B_y, pmv_C_y); } /* overwrite if special case */ if (currMB->NumMbPart == 2) { if (currMB->MbPartWidth == 16) { if (mbPartIdx == 0) { if (refIdxLXB == curr_ref_idx) { pmv_x = pmv_B_x; pmv_y = pmv_B_y; } } else if (refIdxLXA == curr_ref_idx) { pmv_x = pmv_A_x; pmv_y = pmv_A_y; } } else { if (mbPartIdx == 0) { if (refIdxLXA == curr_ref_idx) { pmv_x = pmv_A_x; pmv_y = pmv_A_y; } } else if (refIdxLXC == curr_ref_idx) { pmv_x = pmv_C_x; pmv_y = pmv_C_y; } } } mv = (int16*)(currMB->mvL0 + block_x + (block_y << 2)); if (encFlag) /* calculate residual MV video->mvd_l0 */ { video->mvd_l0[mbPartIdx][subMbPartIdx][0] = *mv++ - pmv_x; video->mvd_l0[mbPartIdx][subMbPartIdx][1] = *mv++ - pmv_y; } else /* calculate original MV currMB->mvL0 */ { pmv_x += video->mvd_l0[mbPartIdx][subMbPartIdx][0]; pmv_y += video->mvd_l0[mbPartIdx][subMbPartIdx][1]; for (i = 0; i < nmSubMbHeight; i++) { for (j = 0; j < nmSubMbWidth; j++) { *mv++ = pmv_x; *mv++ = pmv_y; } mv += (8 - (j << 1)); } } } offset_MbPart_indx = currMB->MbPartWidth >> 4; } } ================================================ FILE: RtspCamera/jni/avc_h264/common/src/reflist.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "avclib_common.h" // xxx pa #define LOG_TAG "reflist" #include "android/log.h" /** see subclause 8.2.4 Decoding process for reference picture lists construction. */ OSCL_EXPORT_REF void RefListInit(AVCCommonObj *video) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "RefListInit"); AVCSliceHeader *sliceHdr = video->sliceHdr; AVCDecPicBuffer *dpb = video->decPicBuf; int slice_type = video->slice_type; int i, list0idx; AVCPictureData *tmp_s; list0idx = 0; if (slice_type == AVC_I_SLICE) { video->refList0Size = 0; video->refList1Size = 0; /* we still have to calculate FrameNumWrap to make sure that all I-slice clip can perform sliding_window_operation properly. */ for (i = 0; i < dpb->num_fs; i++) { if ((dpb->fs[i]->IsReference == 3) && (!dpb->fs[i]->IsLongTerm)) { /* subclause 8.2.4.1 Decoding process for picture numbers. */ if (dpb->fs[i]->FrameNum > (int)sliceHdr->frame_num) { dpb->fs[i]->FrameNumWrap = dpb->fs[i]->FrameNum - video->MaxFrameNum; } else { dpb->fs[i]->FrameNumWrap = dpb->fs[i]->FrameNum; } dpb->fs[i]->frame.PicNum = dpb->fs[i]->FrameNumWrap; } } return ; } if (slice_type == AVC_P_SLICE) { /* Calculate FrameNumWrap and PicNum */ for (i = 0; i < dpb->num_fs; i++) { if ((dpb->fs[i]->IsReference == 3) && (!dpb->fs[i]->IsLongTerm)) { /* subclause 8.2.4.1 Decoding process for picture numbers. */ if (dpb->fs[i]->FrameNum > (int)sliceHdr->frame_num) { dpb->fs[i]->FrameNumWrap = dpb->fs[i]->FrameNum - video->MaxFrameNum; } else { dpb->fs[i]->FrameNumWrap = dpb->fs[i]->FrameNum; } dpb->fs[i]->frame.PicNum = dpb->fs[i]->FrameNumWrap; video->RefPicList0[list0idx++] = &(dpb->fs[i]->frame); } } if (list0idx == 0) { dpb->fs[0]->IsReference = 3; video->RefPicList0[0] = &(dpb->fs[0]->frame); list0idx = 1; } /* order list 0 by PicNum from max to min, see subclause 8.2.4.2.1 */ SortPicByPicNum(video->RefPicList0, list0idx); video->refList0Size = list0idx; /* long term handling */ for (i = 0; i < dpb->num_fs; i++) { if (dpb->fs[i]->IsLongTerm == 3) { /* subclause 8.2.4.1 Decoding process for picture numbers. */ dpb->fs[i]->frame.LongTermPicNum = dpb->fs[i]->LongTermFrameIdx; video->RefPicList0[list0idx++] = &(dpb->fs[i]->frame); } } /* order PicNum from min to max, see subclause 8.2.4.2.1 */ SortPicByPicNumLongTerm(&(video->RefPicList0[video->refList0Size]), list0idx - video->refList0Size); video->refList0Size = list0idx; video->refList1Size = 0; } if ((video->refList0Size == video->refList1Size) && (video->refList0Size > 1)) { /* check if lists are identical, if yes swap first two elements of listX[1] */ /* last paragraph of subclause 8.2.4.2.4 */ for (i = 0; i < video->refList0Size; i++) { if (video->RefPicList0[i] != video->RefPicList1[i]) { break; } } if (i == video->refList0Size) { tmp_s = video->RefPicList1[0]; video->RefPicList1[0] = video->RefPicList1[1]; video->RefPicList1[1] = tmp_s; } } /* set max size */ video->refList0Size = AVC_MIN(video->refList0Size, (int)video->sliceHdr->num_ref_idx_l0_active_minus1 + 1); video->refList1Size = AVC_MIN(video->refList1Size, (int)video->sliceHdr->num_ref_idx_l1_active_minus1 + 1); return ; } /* see subclause 8.2.4.3 */ OSCL_EXPORT_REF AVCStatus ReOrderList(AVCCommonObj *video) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "ReOrderList"); AVCSliceHeader *sliceHdr = video->sliceHdr; AVCStatus status = AVC_SUCCESS; int slice_type = video->slice_type; if (slice_type != AVC_I_SLICE) { if (sliceHdr->ref_pic_list_reordering_flag_l0) { status = ReorderRefPicList(video, 0); if (status != AVC_SUCCESS) return status; } if (video->refList0Size == 0) { return AVC_FAIL; } } return status; } AVCStatus ReorderRefPicList(AVCCommonObj *video, int isL1) { AVCSliceHeader *sliceHdr = video->sliceHdr; AVCStatus status; int *list_size; int num_ref_idx_lX_active_minus1; uint *remapping_of_pic_nums_idc; int *abs_diff_pic_num_minus1; int *long_term_pic_idx; int i; int maxPicNum, currPicNum, picNumLXNoWrap, picNumLXPred, picNumLX; int refIdxLX = 0; void* tmp; if (!isL1) /* list 0 */ { list_size = &(video->refList0Size); num_ref_idx_lX_active_minus1 = sliceHdr->num_ref_idx_l0_active_minus1; remapping_of_pic_nums_idc = sliceHdr->reordering_of_pic_nums_idc_l0; tmp = (void*)sliceHdr->abs_diff_pic_num_minus1_l0; abs_diff_pic_num_minus1 = (int*) tmp; tmp = (void*)sliceHdr->long_term_pic_num_l0; long_term_pic_idx = (int*) tmp; } else { list_size = &(video->refList1Size); num_ref_idx_lX_active_minus1 = sliceHdr->num_ref_idx_l1_active_minus1; remapping_of_pic_nums_idc = sliceHdr->reordering_of_pic_nums_idc_l1; tmp = (void*) sliceHdr->abs_diff_pic_num_minus1_l1; abs_diff_pic_num_minus1 = (int*) tmp; tmp = (void*) sliceHdr->long_term_pic_num_l1; long_term_pic_idx = (int*)tmp; } maxPicNum = video->MaxPicNum; currPicNum = video->CurrPicNum; picNumLXPred = currPicNum; /* initial value */ for (i = 0; remapping_of_pic_nums_idc[i] != 3; i++) { if ((remapping_of_pic_nums_idc[i] > 3) || (i >= MAX_REF_PIC_LIST_REORDERING)) { return AVC_FAIL; /* out of range */ } /* see subclause 8.2.4.3.1 */ if (remapping_of_pic_nums_idc[i] < 2) { if (remapping_of_pic_nums_idc[i] == 0) { if (picNumLXPred - (abs_diff_pic_num_minus1[i] + 1) < 0) picNumLXNoWrap = picNumLXPred - (abs_diff_pic_num_minus1[i] + 1) + maxPicNum; else picNumLXNoWrap = picNumLXPred - (abs_diff_pic_num_minus1[i] + 1); } else /* (remapping_of_pic_nums_idc[i] == 1) */ { if (picNumLXPred + (abs_diff_pic_num_minus1[i] + 1) >= maxPicNum) picNumLXNoWrap = picNumLXPred + (abs_diff_pic_num_minus1[i] + 1) - maxPicNum; else picNumLXNoWrap = picNumLXPred + (abs_diff_pic_num_minus1[i] + 1); } picNumLXPred = picNumLXNoWrap; /* prediction for the next one */ if (picNumLXNoWrap > currPicNum) picNumLX = picNumLXNoWrap - maxPicNum; else picNumLX = picNumLXNoWrap; status = ReorderShortTerm(video, picNumLX, &refIdxLX, isL1); if (status != AVC_SUCCESS) { return status; } } else /* (remapping_of_pic_nums_idc[i] == 2), subclause 8.2.4.3.2 */ { status = ReorderLongTerm(video, long_term_pic_idx[i], &refIdxLX, isL1); if (status != AVC_SUCCESS) { return status; } } } /* that's a definition */ *list_size = num_ref_idx_lX_active_minus1 + 1; return AVC_SUCCESS; } /* see subclause 8.2.4.3.1 */ AVCStatus ReorderShortTerm(AVCCommonObj *video, int picNumLX, int *refIdxLX, int isL1) { int cIdx, nIdx; int num_ref_idx_lX_active_minus1; AVCPictureData *picLX, **RefPicListX; if (!isL1) /* list 0 */ { RefPicListX = video->RefPicList0; num_ref_idx_lX_active_minus1 = video->sliceHdr->num_ref_idx_l0_active_minus1; } else { RefPicListX = video->RefPicList1; num_ref_idx_lX_active_minus1 = video->sliceHdr->num_ref_idx_l1_active_minus1; } picLX = GetShortTermPic(video, picNumLX); if (picLX == NULL) { return AVC_FAIL; } /* Note RefPicListX has to access element number num_ref_idx_lX_active */ /* There could be access violation here. */ if (num_ref_idx_lX_active_minus1 + 1 >= MAX_REF_PIC_LIST) { return AVC_FAIL; } for (cIdx = num_ref_idx_lX_active_minus1 + 1; cIdx > *refIdxLX; cIdx--) { RefPicListX[ cIdx ] = RefPicListX[ cIdx - 1]; } RefPicListX[(*refIdxLX)++ ] = picLX; nIdx = *refIdxLX; for (cIdx = *refIdxLX; cIdx <= num_ref_idx_lX_active_minus1 + 1; cIdx++) { if (RefPicListX[ cIdx ]) { if ((RefPicListX[ cIdx ]->isLongTerm) || ((int)RefPicListX[ cIdx ]->PicNum != picNumLX)) { RefPicListX[ nIdx++ ] = RefPicListX[ cIdx ]; } } } return AVC_SUCCESS; } /* see subclause 8.2.4.3.2 */ AVCStatus ReorderLongTerm(AVCCommonObj *video, int LongTermPicNum, int *refIdxLX, int isL1) { AVCPictureData **RefPicListX; int num_ref_idx_lX_active_minus1; int cIdx, nIdx; AVCPictureData *picLX; if (!isL1) /* list 0 */ { RefPicListX = video->RefPicList0; num_ref_idx_lX_active_minus1 = video->sliceHdr->num_ref_idx_l0_active_minus1; } else { RefPicListX = video->RefPicList1; num_ref_idx_lX_active_minus1 = video->sliceHdr->num_ref_idx_l1_active_minus1; } picLX = GetLongTermPic(video, LongTermPicNum); if (picLX == NULL) { return AVC_FAIL; } /* Note RefPicListX has to access element number num_ref_idx_lX_active */ /* There could be access violation here. */ if (num_ref_idx_lX_active_minus1 + 1 >= MAX_REF_PIC_LIST) { return AVC_FAIL; } for (cIdx = num_ref_idx_lX_active_minus1 + 1; cIdx > *refIdxLX; cIdx--) RefPicListX[ cIdx ] = RefPicListX[ cIdx - 1]; RefPicListX[(*refIdxLX)++ ] = picLX; nIdx = *refIdxLX; for (cIdx = *refIdxLX; cIdx <= num_ref_idx_lX_active_minus1 + 1; cIdx++) { if ((!RefPicListX[ cIdx ]->isLongTerm) || ((int)RefPicListX[ cIdx ]->LongTermPicNum != LongTermPicNum)) { RefPicListX[ nIdx++ ] = RefPicListX[ cIdx ]; } } return AVC_SUCCESS; } AVCPictureData* GetShortTermPic(AVCCommonObj *video, int picNum) { int i; AVCDecPicBuffer *dpb = video->decPicBuf; for (i = 0; i < dpb->num_fs; i++) { if (dpb->fs[i]->IsReference == 3) { if ((dpb->fs[i]->frame.isLongTerm == FALSE) && (dpb->fs[i]->frame.PicNum == picNum)) { return &(dpb->fs[i]->frame); } } } return NULL; } AVCPictureData* GetLongTermPic(AVCCommonObj *video, int LongtermPicNum) { AVCDecPicBuffer *dpb = video->decPicBuf; int i; for (i = 0; i < dpb->num_fs; i++) { if (dpb->fs[i]->IsReference == 3) { if ((dpb->fs[i]->frame.isLongTerm == TRUE) && (dpb->fs[i]->frame.LongTermPicNum == LongtermPicNum)) { return &(dpb->fs[i]->frame); } } } return NULL; } int is_short_ref(AVCPictureData *s) { return ((s->isReference) && !(s->isLongTerm)); } int is_long_ref(AVCPictureData *s) { return ((s->isReference) && (s->isLongTerm)); } /* sort by PicNum, descending order */ void SortPicByPicNum(AVCPictureData *data[], int num) { int i, j; AVCPictureData *temp; for (i = 0; i < num - 1; i++) { for (j = i + 1; j < num; j++) { if (data[j]->PicNum > data[i]->PicNum) { temp = data[j]; data[j] = data[i]; data[i] = temp; } } } return ; } /* sort by PicNum, ascending order */ void SortPicByPicNumLongTerm(AVCPictureData *data[], int num) { int i, j; AVCPictureData *temp; for (i = 0; i < num - 1; i++) { for (j = i + 1; j < num; j++) { if (data[j]->LongTermPicNum < data[i]->LongTermPicNum) { temp = data[j]; data[j] = data[i]; data[i] = temp; } } } return ; } /* sort by FrameNumWrap, descending order */ void SortFrameByFrameNumWrap(AVCFrameStore *data[], int num) { int i, j; AVCFrameStore *temp; for (i = 0; i < num - 1; i++) { for (j = i + 1; j < num; j++) { if (data[j]->FrameNumWrap > data[i]->FrameNumWrap) { temp = data[j]; data[j] = data[i]; data[i] = temp; } } } return ; } /* sort frames by LongTermFrameIdx, ascending order */ void SortFrameByLTFrameIdx(AVCFrameStore *data[], int num) { int i, j; AVCFrameStore *temp; for (i = 0; i < num - 1; i++) { for (j = i + 1; j < num; j++) { if (data[j]->LongTermFrameIdx < data[i]->LongTermFrameIdx) { temp = data[j]; data[j] = data[i]; data[i] = temp; } } } return ; } /* sort PictureData by POC in descending order */ void SortPicByPOC(AVCPictureData *data[], int num, int descending) { int i, j; AVCPictureData *temp; if (descending) { for (i = 0; i < num - 1; i++) { for (j = i + 1; j < num; j++) { if (data[j]->PicOrderCnt > data[i]->PicOrderCnt) { temp = data[j]; data[j] = data[i]; data[i] = temp; } } } } else { for (i = 0; i < num - 1; i++) { for (j = i + 1; j < num; j++) { if (data[j]->PicOrderCnt < data[i]->PicOrderCnt) { temp = data[j]; data[j] = data[i]; data[i] = temp; } } } } return ; } /* sort PictureData by LongTermPicNum in ascending order */ void SortPicByLTPicNum(AVCPictureData *data[], int num) { int i, j; AVCPictureData *temp; for (i = 0; i < num - 1; i++) { for (j = i + 1; j < num; j++) { if (data[j]->LongTermPicNum < data[i]->LongTermPicNum) { temp = data[j]; data[j] = data[i]; data[i] = temp; } } } return ; } /* sort by PicOrderCnt, descending order */ void SortFrameByPOC(AVCFrameStore *data[], int num, int descending) { int i, j; AVCFrameStore *temp; if (descending) { for (i = 0; i < num - 1; i++) { for (j = i + 1; j < num; j++) { if (data[j]->PicOrderCnt > data[i]->PicOrderCnt) { temp = data[j]; data[j] = data[i]; data[i] = temp; } } } } else { for (i = 0; i < num - 1; i++) { for (j = i + 1; j < num; j++) { if (data[j]->PicOrderCnt < data[i]->PicOrderCnt) { temp = data[j]; data[j] = data[i]; data[i] = temp; } } } } return ; } ================================================ FILE: RtspCamera/jni/avc_h264/dec/Android.mk ================================================ # # Copyright (C) 2008 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This makefile supplies the rules for building a library of JNI code for # use by our example platform shared library. LOCAL_PATH:= $(call my-dir) include $(CLEAR_VARS) LOCAL_MODULE_TAGS := optional # This is the target being built. LOCAL_MODULE:= libH264Decoder # All of the source files that we will compile. LOCAL_SRC_FILES:= \ src/3GPVideoParser.cpp \ src/avc_bitstream.cpp \ src/avcdec_api.cpp \ src/header.cpp \ src/itrans.cpp \ src/pred_inter.cpp \ src/pred_intra.cpp \ src/residual.cpp \ src/slice.cpp \ src/vlc.cpp \ src/yuv2rgb.cpp \ src/pvavcdecoder.cpp \ src/NativeH264Decoder.cpp \ ../common/src/deblock.cpp \ ../common/src/dpb.cpp \ ../common/src/fmo.cpp \ ../common/src/mb_access.cpp \ ../common/src/reflist.cpp # All of the shared libraries we link against. LOCAL_SHARED_LIBRARIES := # No static libraries. LOCAL_STATIC_LIBRARIES := # Also need the JNI headers. LOCAL_C_INCLUDES += \ $(JNI_H_INCLUDE)\ $(LOCAL_PATH)/src \ $(LOCAL_PATH)/include \ $(AVC_ROOT)/oscl \ $(AVC_ROOT)/common/include # No specia compiler flags. LOCAL_CFLAGS += # Link libs (ex logs) LOCAL_LDLIBS := -llog # Don't prelink this library. For more efficient code, you may want # to add this library to the prelink map and set this to true. LOCAL_PRELINK_MODULE := false include $(BUILD_SHARED_LIBRARY) ================================================ FILE: RtspCamera/jni/avc_h264/dec/include/avcdec_api.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /** This file contains application function interfaces to the AVC decoder library and necessary type defitionitions and enumerations. @publishedAll */ #ifndef _AVCDEC_API_H_ #define _AVCDEC_API_H_ #ifndef OSCL_BASE_H_INCLUDED #include "oscl_base.h" #endif #include "avcapi_common.h" /** This enumeration is used for the status returned from the library interface. */ typedef enum { /** The followings are fail with details. Their values are negative. */ AVCDEC_NO_DATA = -4, AVCDEC_NOT_SUPPORTED = -3, /** Fail information */ AVCDEC_NO_BUFFER = -2, /* no output picture buffer available */ AVCDEC_MEMORY_FAIL = -1, /* memory allocation failed */ AVCDEC_FAIL = 0, /** Generic success value */ AVCDEC_SUCCESS = 1, AVCDEC_PICTURE_OUTPUT_READY = 2, AVCDEC_PICTURE_READY = 3, /** The followings are success with warnings. Their values are positive integers. */ AVCDEC_NO_NEXT_SC = 4, AVCDEC_REDUNDANT_FRAME = 5, AVCDEC_CONCEALED_FRAME = 6 /* detect and conceal the error */ } AVCDec_Status; /** This structure contains sequence parameters information. */ typedef struct tagAVCDecSPSInfo { int FrameWidth; int FrameHeight; uint frame_only_flag; int frame_crop_left; int frame_crop_right; int frame_crop_top; int frame_crop_bottom; int num_frames; // minimal number of YUV frame buffers required } AVCDecSPSInfo; #ifdef __cplusplus extern "C" { #endif /** THE FOLLOWINGS ARE APIS */ /** This function parses one NAL unit from byte stream format input according to Annex B. \param "bitstream" "Pointer to the bitstream buffer." \param "nal_unit" "Point to pointer and the location of the start of the first NAL unit found in bitstream." \param "size" "As input, the pointer to the size of bitstream in bytes. As output, the value is changed to be the size of the found NAL unit." \return "AVCDEC_SUCCESS if success, AVCDEC_FAIL if no first start code is found, AVCDEC_NO_NEX_SC if the first start code is found, but the second start code is missing (potential partial NAL)." */ OSCL_IMPORT_REF AVCDec_Status PVAVCAnnexBGetNALUnit(uint8 *bitstream, uint8 **nal_unit, int *size); /** This function sniffs the nal_unit_type such that users can call corresponding APIs. \param "bitstream" "Pointer to the beginning of a NAL unit (start with forbidden_zero_bit, etc.)." \param "size" "size of the bitstream (NumBytesInNALunit + 1)." \param "nal_unit_type" "Pointer to the return value of nal unit type." \return "AVCDEC_SUCCESS if success, AVCDEC_FAIL otherwise." */ OSCL_IMPORT_REF AVCDec_Status PVAVCDecGetNALType(uint8 *bitstream, int size, int *nal_type, int *nal_ref_idc); /** This function decodes the sequence parameters set, initializes related parameters and allocates memory (reference frames list), must also be compliant with Annex A. It is equivalent to decode VOL header of MPEG4. \param "avcHandle" "Handle to the AVC decoder library object." \param "nal_unit" "Pointer to the buffer containing single NAL unit. The content will change due to EBSP-to-RBSP conversion." \param "nal_size" "size of the bitstream NumBytesInNALunit." \return "AVCDEC_SUCCESS if success, AVCDEC_FAIL if profile and level is not supported, AVCDEC_MEMORY_FAIL if memory allocations return null." */ OSCL_IMPORT_REF AVCDec_Status PVAVCDecSeqParamSet(AVCHandle *avcHandle, uint8 *nal_unit, int nal_size); /** This function returns sequence parameters such as dimension and field flag of the most recently decoded SPS. More can be added later or grouped together into a structure. This API can be called after PVAVCInitSequence. If no sequence parameter has been decoded yet, it will return AVCDEC_FAIL. \param "avcHandle" "Handle to the AVC decoder library object." \param "seqInfo" "Pointer to the AVCDecSeqParamInfo structure." \return "AVCDEC_SUCCESS if success and AVCDEC_FAIL if fail." \note "This API returns the SPS Info of the most recently decoded SPS (to be used right after PVAVCDecSeqParamSet)." */ OSCL_IMPORT_REF AVCDec_Status PVAVCDecGetSeqInfo(AVCHandle *avcHandle, AVCDecSPSInfo *seqInfo); /** This function decodes the picture parameters set and initializes related parameters. Note thate the PPS may not be present for every picture. \param "avcHandle" "Handle to the AVC decoder library object." \param "nal_unit" "Pointer to the buffer containing single NAL unit. The content will change due to EBSP-to-RBSP conversion." \param "nal_size" "size of the bitstream NumBytesInNALunit." \return "AVCDEC_SUCCESS if success, AVCDEC_FAIL if profile and level is not supported." */ OSCL_IMPORT_REF AVCDec_Status PVAVCDecPicParamSet(AVCHandle *avcHandle, uint8 *nal_unit, int nal_size); /** This function decodes one NAL unit of bitstream. The type of nal unit is one of the followings, 1, 5. (for now, no data partitioning, type 2,3,4). \param "avcHandle" "Handle to the AVC decoder library object." \param "nal_unit" "Pointer to the buffer containing a single or partial NAL unit. The content will change due to EBSP-to-RBSP conversion." \param "buf_size" "Size of the buffer (less than or equal nal_size)." \param "nal_size" "size of the current NAL unit NumBytesInNALunit." \return "AVCDEC_PICTURE_READY for success and an output is ready, AVCDEC_SUCCESS for success but no output is ready, AVCDEC_PACKET_LOSS is GetData returns AVCDEC_PACKET_LOSS, AVCDEC_FAIL if syntax error is detected, AVCDEC_MEMORY_FAIL if memory is corrupted. AVCDEC_NO_PICTURE if no frame memory to write to (users need to get output and/or return picture). AVCDEC_REDUNDANT_PICTURE if error has been detected in the primary picture and redundant picture is available, AVCDEC_CONCEALED_PICTURE if error has been detected and decoder has concealed it." */ OSCL_IMPORT_REF AVCDec_Status PVAVCDecSEI(AVCHandle *avcHandle, uint8 *nal_unit, int nal_size); OSCL_IMPORT_REF AVCDec_Status PVAVCDecodeSlice(AVCHandle *avcHandle, uint8 *buffer, int buf_size); /** Check the availability of the decoded picture in decoding order (frame_num). The AVCFrameIO also provide displaying order information such that the application can re-order the frame for display. A picture can be retrieved only once. \param "avcHandle" "Handle to the AVC decoder library object." \param "output" "Pointer to the AVCOutput structure. Note that decoder library will not re-used the pixel memory in this structure until it has been returned thru PVAVCReleaseOutput API." \return "AVCDEC_SUCCESS for success, AVCDEC_FAIL if no picture is available to be displayed, AVCDEC_PICTURE_READY if there is another picture to be displayed." */ OSCL_IMPORT_REF AVCDec_Status PVAVCDecGetOutput(AVCHandle *avcHandle, int *indx, int *release_flag, AVCFrameIO *output); /** This function resets the decoder and expects to see the next IDR slice. \param "avcHandle" "Handle to the AVC decoder library object." */ OSCL_IMPORT_REF void PVAVCDecReset(AVCHandle *avcHandle); /** This function performs clean up operation including memory deallocation. \param "avcHandle" "Handle to the AVC decoder library object." */ OSCL_IMPORT_REF void PVAVCCleanUpDecoder(AVCHandle *avcHandle); //AVCDec_Status EBSPtoRBSP(uint8 *nal_unit,int *size); /** CALLBACK FUNCTION TO BE IMPLEMENTED BY APPLICATION */ /** In AVCHandle structure, userData is a pointer to an object with the following member functions. */ AVCDec_Status CBAVCDec_GetData(uint32 *userData, unsigned char **buffer, unsigned int *size); #ifdef __cplusplus } #endif #endif /* _AVCDEC_API_H_ */ ================================================ FILE: RtspCamera/jni/avc_h264/dec/include/pvavcdecoder.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef PVAVCDECODER_H_INCLUDED #define PVAVCDECODER_H_INCLUDED #ifndef PVAVCDECODERINTERFACE_H_INCLUDED #include "pvavcdecoderinterface.h" #endif #ifndef AVCDEC_API_H_INCLUDED #include "avcdec_api.h" #endif // AVC video decoder class PVAVCDecoder : public PVAVCDecoderInterface { public: static PVAVCDecoder* New(void); virtual ~PVAVCDecoder(); virtual int AVC_DPBAlloc(uint frame_size_in_mbs, uint num_buffers); virtual void AVC_FrameUnbind(int indx); virtual int AVC_FrameBind(int indx, uint8** yuv); virtual void CleanUpAVCDecoder(void); virtual void ResetAVCDecoder(void); virtual int32 DecodeSPS(uint8 *bitstream, int32 buffer_size); virtual int32 DecodePPS(uint8 *bitstream, int32 buffer_size); virtual int32 DecodeAVCSlice(uint8 *bitstream, int32 *buffer_size); virtual bool GetDecOutput(int *indx, int *release, AVCFrameIO* output); virtual void GetVideoDimensions(int32 *width, int32 *height, int32 *top, int32 *left, int32 *bottom, int32 *right); private: PVAVCDecoder(); bool Construct(void); AVCHandle iAvcHandle; uint8* iDPB; bool* iFrameUsed; uint8** iFramePtr; int iNumFrames; }; #endif ================================================ FILE: RtspCamera/jni/avc_h264/dec/include/pvavcdecoder_factory.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef PVAVCDECODER_FACTORY_H_INCLUDED #define PVAVCDECODER_FACTORY_H_INCLUDED #ifndef OSCL_BASE_H_INCLUDED #include "oscl_base.h" #endif #ifndef OSCL_MEM_H_INCLUDED #include "oscl_mem.h" #endif class PVAVCDecoderInterface; class PVAVCDecoderFactory { public: /** * Creates an instance of a PVAVCDecoder. If the creation fails, this function will leave. * * @returns A pointer to an instance of PVAVCDecoder as PVAVCDecoderInterface reference or leaves if instantiation fails **/ OSCL_IMPORT_REF static PVAVCDecoderInterface* CreatePVAVCDecoder(void); /** * Deletes an instance of PVAVCDecoder and reclaims all allocated resources. * * @param aVideoDec The PVAVCDecoder instance to be deleted * @returns A status code indicating success or failure of deletion **/ OSCL_IMPORT_REF static bool DeletePVAVCDecoder(PVAVCDecoderInterface* aVideoDec); }; #endif ================================================ FILE: RtspCamera/jni/avc_h264/dec/include/pvavcdecoderinterface.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef PVAVCDECODERINTERFACE_H_INCLUDED #define PVAVCDECODERINTERFACE_H_INCLUDED // includes #ifndef OSCL_BASE_H_INCLUDED #include "oscl_base.h" #endif #ifndef AVCDEC_API_H_INCLUDED #include "avcdec_api.h" #endif typedef void (*FunctionType_Unbind)(void *, int); typedef int (*FunctionType_Alloc)(void *, int, uint8 **); typedef int (*FunctionType_SPS)(void *, uint, uint); typedef int (*FunctionType_Malloc)(void *, int32, int); typedef void(*FunctionType_Free)(void *, int); // PVAVCDecoderInterface pure virtual interface class class PVAVCDecoderInterface { public: virtual ~PVAVCDecoderInterface() {}; virtual void CleanUpAVCDecoder(void) = 0; virtual void ResetAVCDecoder(void) = 0; virtual int32 DecodeSPS(uint8 *bitstream, int32 buffer_size) = 0; virtual int32 DecodePPS(uint8 *bitstream, int32 buffer_size) = 0; virtual int32 DecodeAVCSlice(uint8 *bitstream, int32 *buffer_size) = 0; virtual bool GetDecOutput(int *indx, int *release, AVCFrameIO* output) = 0; virtual void GetVideoDimensions(int32 *width, int32 *height, int32 *top, int32 *left, int32 *bottom, int32 *right) = 0; }; #endif // PVAVCDECODERINTERFACE_H_INCLUDED ================================================ FILE: RtspCamera/jni/avc_h264/dec/src/3GPVideoParser.cpp ================================================ /* * Copyright (C) 2009 OrangeLabs * 3GPVideoParser.cpp * * Created on: 12 août 2009 * Author: rglt1266 */ #define LOG_TAG "3GPPSampleReader" #include #include #include #include #include #include "3GPVideoParser.h" /* Variables */ FILE* f = NULL; // File to parse uint32 TimeScale = 0; // Ticks per second uint32 VideoLength = 0; // Video length (time) uint32 VideoWidth = 0; uint32 VideoHeight = 0; char VideoCodec[5]; // Codec type: d263/mp4v.... uint32 moovAtomPtr = 0; uint32 moovAtomSize = 0; uint32 trakAtomPtr = 0; uint32 trakAtomSize = 0; /* Buffers and pointers*/ uint8* moovBuff = 0; uint8* sttsPtr = 0; uint8* stcoPtr = 0; uint8* stszPtr = 0; uint8* stscPtr = 0; uint8* stsdPtr = 0; Sample* samplePtr = 0; /** * Endien convert */ uint32 EndienConvert (uint32 input){ return ((input & 0xFF) << 24) | ((input & 0xFF00) << 8) | ((uint32)(input & 0xFF0000) >> 8) | ((uint32)(input & 0xFF000000) >> 24); } /** * Get a uint32 value at a precised position in a uint8 buffer */ uint32 getUint32FromUint8Buffer (uint8* buffer,uint32 offset){ return ( ((buffer[offset]<<24)& 0xff000000) | ((buffer[offset+1]<<16)& 0xff0000) | ((buffer[offset+2]<<8)& 0xff00) | ((buffer[offset+3])& 0xff)); } /** * Find a particular value in a uint8 buffer reading uint32 */ int32 findAtom (uint8* buffer,uint32 bufferSize, uint32 valueToFind){ uint32 tmp; uint32 i = 0; for (i=0;i<(bufferSize-4);i++){ tmp = getUint32FromUint8Buffer(buffer,i); if (tmp == valueToFind){ return i-4; } } return VPAtomError; } /** * Find a particular value in a uint32 buffer */ int32 findAtom (uint32* buffer,uint32 bufferSize, uint32 valueToFind){ uint32 i = 0; for (i=0;i<(bufferSize);i++){ if (EndienConvert(buffer[i]) == valueToFind){ return i; } } return VPAtomError; } /** * Cleanup the parser * * @return error code */ int cleanupParser(void){ /* Clean atom info */ free(moovBuff); VideoWidth = 0; VideoHeight = 0; VideoCodec[0] = '\0'; VideoLength = 0; return VPAtomSucces; } /** * Init the parser * * @param filePath path of the file to read * @param width check if the video width is correct * @param heigth check if the video height is correct * @return error code */ int Init3GPVideoParser (char *filePath){ uint32 anAtomSize = 0; uint32 anAtomType = 0; uint32 trakOffset = 0; int32 pos = 0; int32 fileSize; /* Load file */ f = fopen(filePath,"r"); if (f == NULL) { return VPAtomError; } fseek( f, 0L, SEEK_END ); fileSize = ftell( f ); if (fileSize <= 8 ) return VPAtomError; // File is too small ! /* Check if file format is correct ie it's a 3gp file*/ fseek(f,4,SEEK_SET); fread(&anAtomType,sizeof(uint32),1,f); anAtomType = EndienConvert(anAtomType); if (anAtomType != AtomFtyp) return VPAtomError; /* Start parsing from begining*/ rewind (f); // Find Moov Atom while (ftell(f) 0) { int32 trakSize = getUint32FromUint8Buffer(moovBuff,pos); if (findAtom(moovBuff+pos,trakSize,AtomVmhd)){ trakAtomPtr = moovAtomPtr+pos; trakAtomSize = trakSize; break; } else { // This is not the videotrack } // Trying to find new trak pos = findAtom(moovBuff+pos,moovAtomSize-pos,AtomTrak); } if (trakAtomPtr == 0) { return VPAtomError; } trakOffset = trakAtomPtr - moovAtomPtr; // Find MDHD pos = findAtom(moovBuff+trakOffset,trakAtomSize,AtomMdhd); if (pos > 0){ uint8* Ptr = moovBuff + trakOffset + pos + 16; // Skip Atom size and Atom name TimeScale = getUint32FromUint8Buffer(Ptr,4); VideoLength = getUint32FromUint8Buffer(Ptr,8); } else { return VPAtomError; } // Find STTS pos = findAtom(moovBuff+trakOffset,trakAtomSize,AtomStts); if (pos > 0){ sttsPtr = moovBuff + trakOffset + pos + 16; // Skip Atom size and Atom name } else { return VPAtomError; } // Find STSZ pos = findAtom(moovBuff+trakOffset,trakAtomSize,AtomStsz); if (pos > 0){ stszPtr = moovBuff + trakOffset + pos + 20; // Skip Atom size and Atom name } else { return VPAtomError; } // Find STCO pos = findAtom(moovBuff+trakOffset,trakAtomSize,AtomStco); if (pos > 0){ stcoPtr = moovBuff + trakOffset + pos + 16; // Skip Atom size, Atom name, ... } else { return VPAtomError; } // Find STSC pos = findAtom(moovBuff+trakOffset,trakAtomSize,AtomStsc); if (pos > 0){ stscPtr = moovBuff + trakOffset + pos + 16; // Skip Atom size, Atom name, ... } else { return VPAtomError; } // Find STSD pos = findAtom(moovBuff+trakOffset,trakAtomSize,AtomStsd); if (pos > 0){ stsdPtr = moovBuff + trakOffset + pos + 16; // Skip Atom size and Atom name VideoWidth = (getUint32FromUint8Buffer(stsdPtr,32)>>16) & 0xFFFF; VideoHeight = getUint32FromUint8Buffer(stsdPtr,32) & 0xFFFF; VideoCodec[0] = *(stsdPtr+90); VideoCodec[1] = *(stsdPtr+91); VideoCodec[2] = *(stsdPtr+92); VideoCodec[3] = *(stsdPtr+93); VideoCodec[4]= '\0'; } else { return VPAtomError; } /** * Prepare Sample list */ uint32 countChunk = 0; // Total number of chunk uint32 currChunk=0; // Counter for current chunk uint32 currChunkInStsc=0; // Current chunk described in stsc Atom uint32 ChunkAddr = 0; // Current chunk offset uint32 countSample = 0; // Counter for sample in a chunk uint32 currSample = 0; // Counter for current sample (/total sample in file) uint32 SamplePerChunk = 0; // Value sample per chunk uint32 currStscPos = 0; // Current stsc table uint32 Offset = 0; // Offset from ChunkAddr to sample data start int32 currSttsPos = 0; uint32 SameTimestampCount = 0; // For case where n sample have the same timestamp uint32 temp; Sample* currSamplePtr = 0; // Pointer to current Sample Sample* aSample = 0; // Current Sample element bool initList = false; // Boolean changed after first sample is read /* Get "Number of entries" field of stco atom */ countChunk = getUint32FromUint8Buffer(stcoPtr-4,0); /* Init currChunk */ currChunkInStsc = getUint32FromUint8Buffer(stscPtr,currStscPos*12); for (currChunk=0;currChunksize = getUint32FromUint8Buffer(stszPtr,currSample*4); currSample++; /* Get sample addr */ aSample->addr = ChunkAddr + Offset; Offset = Offset + aSample->size; /* Get sample timestamp */ if (SameTimestampCount == 0){ // Read new stts element SameTimestampCount = getUint32FromUint8Buffer(sttsPtr,currSttsPos*8); currSttsPos++; } temp = getUint32FromUint8Buffer(sttsPtr,(currSttsPos-1)*8+4); aSample->timestamp = (uint32)((temp*1000)/TimeScale); SameTimestampCount--; /* Set next to NULL */ aSample->next = NULL; /* Update the sample list */ if (initList == false){ samplePtr = aSample; currSamplePtr = aSample; initList = true; } else { currSamplePtr->next = aSample; currSamplePtr = aSample; currSamplePtr->next = NULL; } } } return VPAtomSucces; } /** * Get Videoframe * * @param aOutBuffer buffer to write the videoframe * @param aBufferSize size of the buffer * @param aTimestamp timestamp * @return error code for overrun buffer */ int getFrame (uint8* aOutBuffer,uint32* aBufferSize, uint32* aTimestamp){ // Temp sample to free data Sample* tmp; if (samplePtr != NULL){ if (aOutBuffer == NULL || f==NULL){ return VPAtomError; } fseek(f,samplePtr->addr,SEEK_SET); if (fread(aOutBuffer,1,samplePtr->size,f) != samplePtr->size){ return VPAtomError; } *aTimestamp = samplePtr->timestamp; *aBufferSize = samplePtr->size; /* Free the sample */ tmp = samplePtr; samplePtr = samplePtr->next; free(tmp); return VPAtomSucces; } else { aOutBuffer = NULL; *aBufferSize = 0; *aTimestamp = 0; return VPAtomError; } } /** * Release file by closing it * * @return error code */ int release(){ if (f != NULL){ fclose(f); } return cleanupParser(); } /** * Get the video duration * * @return video duration in seconds ( last 3 digits are ms) */ uint32 getVideoDuration (){ uint32 retValue = 0; retValue = ((VideoLength/TimeScale)*1000)+(VideoLength%TimeScale); return retValue; } /** * Get the video codec * * @return video codec string */ char* getVideoCodec (){ return VideoCodec; } /** * Get video width * * @return video width */ uint32 getVideoWidth (){ return VideoWidth; } /** * Get the video height * * @return video height */ uint32 getVideoHeight(){ return VideoHeight; } ================================================ FILE: RtspCamera/jni/avc_h264/dec/src/3GPVideoParser.h ================================================ /* * Copyright (C) 2009 OrangeLabs * 3GPVideoParser.h * * Created on: 12 août 2009 * Author: rglt1266 */ #ifndef _3GPVIDEOPARSER_H_ #define _3GPVIDEOPARSER_H_ /* Define new types */ typedef unsigned char uint8; typedef unsigned short uint16; typedef short int16; typedef unsigned long uint32; typedef long int32; #define DEBUG 1; /* Define important atoms 4Bytes code (char)*/ #define AtomFtyp 0x66747970 /* File type compatibility atom */ #define AtomMdat 0x6D646174 /* Movie sample data atom */ #define AtomMoov 0x6D6F6F76 /* Movie ressource metadata atom */ #define AtomMdhd 0x6D646864 /* Video media information header atom */ #define AtomMvhd 0x6D766864 /* Video media information header atom */ #define AtomStts 0x73747473 /* Time-to-sample atom */ #define AtomStco 0x7374636F /* Sample-to-chunck atom */ #define AtomTrak 0x7472616B /* Trak atom */ #define AtomStsz 0x7374737A /* Sample size atom */ #define AtomStsc 0x73747363 /* Nb of sample per chunck */ #define AtomStsd 0x73747364 /* Nb of sample per chunck */ #define AtomVmhd 0x766D6864 /* Identifier of a video track */ /* Define error codes */ #define VPAtomError 0 #define VPAtomSucces 1 typedef struct { uint32 ptr; uint32 size; } Atom; struct sample { uint32 addr; uint32 size; uint32 timestamp; struct sample *next; }; typedef struct sample Sample; int Init3GPVideoParser (char *); int release(); int getFrame (uint8*,uint32*, uint32*); uint32 getVideoDuration(); uint32 getVideoWidth(); uint32 getVideoHeight(); char* getVideoCodec(); #endif /* 3GPVIDEOPARSER_H_ */ ================================================ FILE: RtspCamera/jni/avc_h264/dec/src/NativeH264Decoder.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 2009 OrangeLabs * * Author: Alexis Gilabert Senar * Date: 2009-07-01 * ------------------------------------------------------------------- */ #define LOG_TAG "NativeDec" #include "android/log.h" #include "NativeH264Decoder.h" #include "pvavcdecoder.h" #include "3GPVideoParser.h" #include "yuv2rgb.h" // xxx pa try to read nal unit type #include "avcdec_api.h" int iSrcWidth = 352; int iSrcHeight = 288; #define MB_BASED_DEBLOCK typedef enum { SPS, PPS, SLICE } DEC_STATE; /* * Global variables * */ PVAVCDecoder *decoder; int parserInitialized = 0; int decoderInitialized = 0; uint8* aOutBuffer; uint8* aInputBuf; DEC_STATE state; AVCFrameIO outVid; /** * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder * Method: InitDecoder * Signature: ()I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_InitDecoder (JNIEnv * env, jclass clazz){ state = SPS; aOutBuffer = (uint8*)malloc(iSrcWidth*iSrcHeight*3/2); decoder = PVAVCDecoder::New(); return (decoder!=NULL)?1:0; } /** * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder * Method: DeinitDecoder * Signature: ()I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_DeinitDecoder (JNIEnv * env, jclass clazz){ state = SPS; free(aOutBuffer); delete(decoder); return 1; } /** * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder * Method: DecodeAndConvert * Signature: ([B[IJ)[I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_DecodeAndConvert (JNIEnv *env, jclass clazz, jbyteArray h264Frame, jintArray decoded) { int32 size = 0; int32 status; int indexFrame; int releaseFrame; /* Set volbuf with h263Frame data*/ jint len = env->GetArrayLength(h264Frame); jbyte data[len]; env->GetByteArrayRegion(h264Frame, 0, len, data); aInputBuf = (uint8*)malloc(len); memcpy(aInputBuf,(uint8*)data,len); size = len; // xxx pa try to read nal unit type int nal_unit_type = (AVCNalUnitType)(aInputBuf[0] & 0x1F); __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "nal_unit_type : %d", nal_unit_type); // xxx pa new switch based on incoming NAL Units instead of fixed state machine // this is the only approach to react on SPS/PPS which are part of in-band parameter settings (sended in between) switch (nal_unit_type){ case AVC_NALTYPE_SPS: __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "decode: AVC_NALTYPE_SPS"); // xxx pa Reset decoder, prepare it for a new IDR frame. // decoder->ResetAVCDecoder(); // ===========> if (decoder->DecodeSPS(aInputBuf,size)==AVCDEC_SUCCESS){ __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "decode: state: SPS->PPS"); state = PPS; } else { return 0; } break; case AVC_NALTYPE_PPS: __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "decode: AVC_NALTYPE_PPS"); if (state != PPS) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "BREAK not in state: PPS"); break; } // ===========> if (decoder->DecodePPS(aInputBuf,size)==AVCDEC_SUCCESS){ state = SLICE; __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "decode: state: PPS->SLICE"); } else { // xxx pa reset state to SPS lookup __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "decode: reset state: PPS->SPS"); state = SPS; return 0; } break; case AVC_NALTYPE_IDR : // xxx pa Reset decoder, prepare it for a new IDR frame. decoder->ResetAVCDecoder(); // don't break case AVC_NALTYPE_SLICE : __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "decode: AVC_NALTYPE_SLICE or AVC_NALTYPE_IDR"); // ===========> if ((status=decoder->DecodeAVCSlice(aInputBuf,&size))>AVCDEC_FAIL) { // ===========> // decoder->GetDecOutput(&indexFrame,&releaseFrame,&outVid); // xxx pa react on dbp:: DPBInitBuffer return AVC_NO_BUFFER failures (which should not happen) if ((decoder->GetDecOutput(&indexFrame,&releaseFrame,&outVid) == AVC_NO_BUFFER)) { __android_log_print(ANDROID_LOG_ERROR, LOG_TAG, "decode: GetDecOutput failed"); return 0; } if (releaseFrame == 1){ // ===========> decoder->AVC_FrameUnbind(indexFrame); } /* Copy result to YUV array ! */ memcpy(aOutBuffer,outVid.YCbCr[0],iSrcWidth*iSrcHeight); memcpy(aOutBuffer+(iSrcWidth*iSrcHeight),outVid.YCbCr[1],(iSrcWidth*iSrcHeight)/4); memcpy(aOutBuffer+(iSrcWidth*iSrcHeight)+((iSrcWidth*iSrcHeight)/4),outVid.YCbCr[2],(iSrcWidth*iSrcHeight)/4); /* Create the output buffer */ uint32* resultBuffer= (uint32*) malloc(iSrcWidth*iSrcHeight*sizeof(uint32)); if (resultBuffer == NULL) return 0; /********** Convert to rgb ***********/ convert(iSrcWidth,iSrcHeight,aOutBuffer,resultBuffer); /* Return Bitmap image */ (env)->SetIntArrayRegion(decoded, 0, iSrcWidth*iSrcHeight, (const jint*)resultBuffer); free(resultBuffer); } else { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "status: %ld",status); } break; default: __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "decode: UNKNOWN NAL unit type: %d", nal_unit_type); } return 1; } /** * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder * Method: InitParser * Signature: (Ljava/lang/String;)I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_InitParser (JNIEnv *env, jclass clazz, jstring pathToFile){ return 0; } /** * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_Native3GPPFileParser * Method: DeinitParser * Signature: ()I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_DeinitParser (JNIEnv *env, jclass clazz){ parserInitialized = 0; return release(); } /** * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_Native3GPPFileParser * Method: getVideoLength * Signature: ()I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_getVideoLength (JNIEnv *env, jclass clazz) { jint videoLength = getVideoDuration(); return videoLength; } /** * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_Native3GPPFileParser * Method: getVideoWidth * Signature: ()I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_getVideoWidth (JNIEnv *env, jclass clazz) { return getVideoWidth(); } /** * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_Native3GPPFileParser * Method: getVideoHeight * Signature: ()I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_getVideoHeight (JNIEnv *env, jclass clazz) { return getVideoHeight(); } /** * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_Native3GPPFileParser * Method: getVideoCoding * Signature: ()Ljava/lang/String; */ JNIEXPORT jstring JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_getVideoCoding (JNIEnv *env, jclass clazz) { char* charVideoCoding = getVideoCodec(); jstring stringVideoCoding = (env)->NewStringUTF(charVideoCoding); return stringVideoCoding; } /** * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_Native3GPPFileParser * Method: getVideoSample * Signature: ([I)Lcom/orangelabs/rcs/core/ims/protocol/rtp/codec/video/VideoSample */ JNIEXPORT jobject JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_getVideoSample (JNIEnv *env, jclass clazz, jintArray Decoded) { jobject object = NULL; // Return created object return object; } /** * This is called by the VM when the shared library is first loaded. */ jint JNI_OnLoad(JavaVM* vm, void* reserved) { JNIEnv* env = NULL; jint result = -1; if (vm->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) { goto bail; } /* success -- return valid version number */ result = JNI_VERSION_1_4; bail: return result; } ================================================ FILE: RtspCamera/jni/avc_h264/dec/src/NativeH264Decoder.h ================================================ /* DO NOT EDIT THIS FILE - it is machine generated */ #include /* Header for class com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder */ #ifndef _Included_NativeH264Decoder #define _Included_NativeH264Decoder #ifdef __cplusplus extern "C" { #endif /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder * Method: InitDecoder * Signature: ()I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_InitDecoder (JNIEnv *, jclass); /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder * Method: DeinitDecoder * Signature: ()I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_DeinitDecoder (JNIEnv *, jclass); /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder * Method: DecodeAndConvert * Signature: ([B[IJ)I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_DecodeAndConvert (JNIEnv *, jclass, jbyteArray, jintArray); /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder * Method: InitParser * Signature: (Ljava/lang/String;)I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_InitParser (JNIEnv *, jclass, jstring); /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder * Method: DeinitParser * Signature: ()I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_DeinitParser (JNIEnv *, jclass); /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder * Method: getVideoLength * Signature: ()I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_getVideoLength (JNIEnv *, jclass); /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder * Method: getVideoWidth * Signature: ()I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_getVideoWidth (JNIEnv *, jclass); /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder * Method: getVideoHeight * Signature: ()I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_getVideoHeight (JNIEnv *, jclass); /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder * Method: getVideoCoding * Signature: ()Ljava/lang/String; */ JNIEXPORT jstring JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_getVideoCoding (JNIEnv *, jclass); /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder * Method: getVideoSample * Signature: ([I)Lcom/orangelabs/rcs/core/ims/protocol/rtp/codec/video/h264/decoder/VideoSample; */ JNIEXPORT jobject JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_getVideoSample (JNIEnv *, jclass, jintArray); #ifdef __cplusplus } #endif #endif ================================================ FILE: RtspCamera/jni/avc_h264/dec/src/avc_bitstream.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "avcdec_bitstream.h" /* Swapping may not be needed anymore since we read one byte at a time and perform EBSP to RBSP conversion in bitstream. */ #ifdef LITTLE_ENDIAN #if (WORD_SIZE==32) /* this can be replaced with assembly instructions */ #define SWAP_BYTES(x) ((((x)&0xFF)<<24) | (((x)&0xFF00)<<8) | (((x)&0xFF0000)>>8) | (((x)&0xFF000000)>>24)) #else /* for 16-bit */ #define SWAP_BYTES(x) ((((x)&0xFF)<<8) | (((x)&0xFF00)>>8)) #endif #else #define SWAP_BYTES(x) (x) #endif /* array for trailing bit pattern as function of number of bits */ /* the first one is unused. */ const static uint8 trailing_bits[9] = {0, 0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80}; /* ======================================================================== */ /* Function : BitstreamInit() */ /* Date : 11/4/2003 */ /* Purpose : Populate bitstream structure with bitstream buffer and size */ /* it also initializes internal data */ /* In/out : */ /* Return : AVCDEC_SUCCESS if successed, AVCDEC_FAIL if failed. */ /* Modified : */ /* ======================================================================== */ /* |--------|--------|----~~~~~-----|---------|---------|---------| ^ ^read_pos ^data_end_pos bitstreamBuffer <---------> current_word |xxxxxxxxxxxxx----| = current_word 32 or 16 bits <------------> bit_left ======================================================================== */ /* ======================================================================== */ /* Function : BitstreamNextWord() */ /* Date : 12/4/2003 */ /* Purpose : Read up to machine word. */ /* In/out : */ /* Return : Next word with emulation prevention code removed. Everything in the bitstream structure got modified except current_word */ /* Modified : */ /* ======================================================================== */ AVCDec_Status BitstreamInit(AVCDecBitstream *stream, uint8 *buffer, int size) { EBSPtoRBSP(buffer, &size); stream->incnt = 0; stream->incnt_next = 0; stream->bitcnt = 0; stream->curr_word = stream->next_word = 0; stream->read_pos = 0; stream->bitstreamBuffer = buffer; stream->data_end_pos = size; stream->nal_size = size; return AVCDEC_SUCCESS; } /* ======================================================================== */ /* Function : AVC_BitstreamFillCache() */ /* Date : 1/1/2005 */ /* Purpose : Read up to machine word. */ /* In/out : */ /* Return : Read in 4 bytes of input data */ /* Modified : */ /* ======================================================================== */ AVCDec_Status AVC_BitstreamFillCache(AVCDecBitstream *stream) { uint8 *bitstreamBuffer = stream->bitstreamBuffer; uint8 *v; int num_bits, i; stream->curr_word |= (stream->next_word >> stream->incnt); // stream->incnt cannot be 32 stream->next_word <<= (31 - stream->incnt); stream->next_word <<= 1; num_bits = stream->incnt_next + stream->incnt; if (num_bits >= 32) { stream->incnt_next -= (32 - stream->incnt); stream->incnt = 32; return AVCDEC_SUCCESS; } /* this check can be removed if there is additional extra 4 bytes at the end of the bitstream */ v = bitstreamBuffer + stream->read_pos; if (stream->read_pos > stream->data_end_pos - 4) { if (stream->data_end_pos <= stream->read_pos) { stream->incnt = num_bits; stream->incnt_next = 0; return AVCDEC_SUCCESS; } stream->next_word = 0; for (i = 0; i < stream->data_end_pos - stream->read_pos; i++) { stream->next_word |= (v[i] << ((3 - i) << 3)); } stream->read_pos = stream->data_end_pos; stream->curr_word |= (stream->next_word >> num_bits); // this is safe stream->next_word <<= (31 - num_bits); stream->next_word <<= 1; num_bits = i << 3; stream->incnt += stream->incnt_next; stream->incnt_next = num_bits - (32 - stream->incnt); if (stream->incnt_next < 0) { stream->incnt += num_bits; stream->incnt_next = 0; } else { stream->incnt = 32; } return AVCDEC_SUCCESS; } stream->next_word = ((uint32)v[0] << 24) | (v[1] << 16) | (v[2] << 8) | v[3]; stream->read_pos += 4; stream->curr_word |= (stream->next_word >> num_bits); // this is safe stream->next_word <<= (31 - num_bits); stream->next_word <<= 1; stream->incnt_next += stream->incnt; stream->incnt = 32; return AVCDEC_SUCCESS; } /* ======================================================================== */ /* Function : BitstreamReadBits() */ /* Date : 11/4/2003 */ /* Purpose : Read up to machine word. */ /* In/out : */ /* Return : AVCDEC_SUCCESS if successed, AVCDEC_FAIL if number of bits */ /* is greater than the word-size, AVCDEC_PACKET_LOSS or */ /* AVCDEC_NO_DATA if callback to get data fails. */ /* Modified : */ /* ======================================================================== */ AVCDec_Status BitstreamReadBits(AVCDecBitstream *stream, int nBits, uint *code) { if (stream->incnt < nBits) { /* frame-based decoding */ AVC_BitstreamFillCache(stream); } *code = stream->curr_word >> (32 - nBits); BitstreamFlushBits(stream, nBits); return AVCDEC_SUCCESS; } /* ======================================================================== */ /* Function : BitstreamShowBits() */ /* Date : 11/4/2003 */ /* Purpose : Show up to machine word without advancing the pointer. */ /* In/out : */ /* Return : AVCDEC_SUCCESS if successed, AVCDEC_FAIL if number of bits */ /* is greater than the word-size, AVCDEC_NO_DATA if it needs */ /* to callback to get data. */ /* Modified : */ /* ======================================================================== */ AVCDec_Status BitstreamShowBits(AVCDecBitstream *stream, int nBits, uint *code) { if (stream->incnt < nBits) { /* frame-based decoding */ AVC_BitstreamFillCache(stream); } *code = stream->curr_word >> (32 - nBits); return AVCDEC_SUCCESS; } /* ======================================================================== */ /* Function : BitstreamRead1Bit() */ /* Date : 11/4/2003 */ /* Purpose : Read 1 bit from the bitstream. */ /* In/out : */ /* Return : AVCDEC_SUCCESS if successed, AVCDEC_FAIL if number of bits */ /* is greater than the word-size, AVCDEC_PACKET_LOSS or */ /* AVCDEC_NO_DATA if callback to get data fails. */ /* Modified : */ /* ======================================================================== */ AVCDec_Status BitstreamRead1Bit(AVCDecBitstream *stream, uint *code) { if (stream->incnt < 1) { /* frame-based decoding */ AVC_BitstreamFillCache(stream); } *code = stream->curr_word >> 31; BitstreamFlushBits(stream, 1); return AVCDEC_SUCCESS; } AVCDec_Status BitstreamByteAlign(AVCDecBitstream *stream) { uint n_stuffed; n_stuffed = (8 - (stream->bitcnt & 0x7)) & 0x7; /* 07/05/01 */ stream->bitcnt += n_stuffed; stream->incnt -= n_stuffed; if (stream->incnt < 0) { stream->bitcnt += stream->incnt; stream->incnt = 0; } stream->curr_word <<= n_stuffed; return AVCDEC_SUCCESS; } /* check whether there are more RBSP data. */ /* ignore the emulation prevention code, assume it has been taken out. */ bool more_rbsp_data(AVCDecBitstream *stream) { int total_bit_left; uint code; if (stream->read_pos >= stream->nal_size) { total_bit_left = stream->incnt_next + stream->incnt; if (total_bit_left <= 0) { return FALSE; } else if (total_bit_left <= 8) { BitstreamShowBits(stream, total_bit_left, &code); if (code == trailing_bits[total_bit_left]) { return FALSE; } } } return TRUE; } ================================================ FILE: RtspCamera/jni/avc_h264/dec/src/avcdec_api.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /** This file contains application function interfaces to the AVC decoder library. @publishedAll */ #include "oscl_types.h" #include "oscl_mem.h" #include "avcdec_api.h" #include "avcdec_lib.h" #include "avcdec_bitstream.h" // xxx pa #define LOG_TAG "avcdec_api" #include "android/log.h" /* ======================================================================== */ /* Function : EBSPtoRBSP() */ /* Date : 11/4/2003 */ /* Purpose : Convert EBSP to RBSP and overwrite it. */ /* Assuming that forbidden_zero, nal_ref_idc and nal_unit_type */ /* (first byte), has been taken out of the nal_unit. */ /* In/out : */ /* Return : */ /* Modified : */ /* ======================================================================== */ /** @pseudocode " NumBytesInRBSP = 0; for(i=0:i< *size; i++){ if(i+2 < *size && next_bits(24)==0x000003){ rbsp_byte[NumBytesInRBSP++]; rbsp_byte[NumBytesInRBSP++]; i+=2; emulation_prevention_three_byte (0x03) } else rbsp_byte[NumBytesInRBSP++]; }" */ AVCDec_Status EBSPtoRBSP(uint8 *nal_unit, int *size) { int i, j; int count = 0; /* This code is based on EBSPtoRBSP of JM */ j = 0; for (i = 0; i < *size; i++) { if (count == 2 && nal_unit[i] == 0x03) { i++; count = 0; } nal_unit[j] = nal_unit[i]; if (nal_unit[i] == 0x00) count++; else count = 0; j++; } *size = j; return AVCDEC_SUCCESS; } /* ======================================================================== */ /* Function : PVAVCAnnexBGetNALUnit() */ /* Date : 11/3/2003 */ /* Purpose : Parse a NAL from byte stream format. */ /* In/out : */ /* Return : AVCDEC_SUCCESS if succeed, AVC_FAIL if fail. */ /* Modified : */ /* ======================================================================== */ /** @pseudocode " byte_stream_nal_unit(NumBytesInNalunit){ while(next_bits(24) != 0x000001) zero_byte if(more_data_in_byte_stream()){ start_code_prefix_one_3bytes // equal 0x000001 nal_unit(NumBytesInNALunit) } }" */ OSCL_EXPORT_REF AVCDec_Status PVAVCAnnexBGetNALUnit(uint8 *bitstream, uint8 **nal_unit, int *size) { int i, j, FoundStartCode = 0; int end; i = 0; while (bitstream[i] == 0 && i < *size) { i++; } if (i >= *size) { *nal_unit = bitstream; return AVCDEC_FAIL; /* cannot find any start_code_prefix. */ } else if (bitstream[i] != 0x1) { i = -1; /* start_code_prefix is not at the beginning, continue */ } i++; *nal_unit = bitstream + i; /* point to the beginning of the NAL unit */ j = end = i; while (!FoundStartCode) { while ((j + 1 < *size) && (bitstream[j] != 0 || bitstream[j+1] != 0)) /* see 2 consecutive zero bytes */ { j++; } end = j; /* stop and check for start code */ while (j + 2 < *size && bitstream[j+2] == 0) /* keep reading for zero byte */ { j++; } if (j + 2 >= *size) { *size -= i; return AVCDEC_NO_NEXT_SC; /* cannot find the second start_code_prefix */ } if (bitstream[j+2] == 0x1) { FoundStartCode = 1; } else { /* could be emulation code 0x3 */ j += 2; /* continue the search */ } } *size = end - i; return AVCDEC_SUCCESS; } /* ======================================================================== */ /* Function : PVAVCGetNALType() */ /* Date : 11/4/2003 */ /* Purpose : Sniff NAL type from the bitstream */ /* In/out : */ /* Return : AVCDEC_SUCCESS if succeed, AVC_FAIL if fail. */ /* Modified : */ /* ======================================================================== */ OSCL_EXPORT_REF AVCDec_Status PVAVCDecGetNALType(uint8 *bitstream, int size, int *nal_type, int *nal_ref_idc) { int forbidden_zero_bit; if (size > 0) { forbidden_zero_bit = bitstream[0] >> 7; if (forbidden_zero_bit != 0) return AVCDEC_FAIL; *nal_ref_idc = (bitstream[0] & 0x60) >> 5; *nal_type = bitstream[0] & 0x1F; return AVCDEC_SUCCESS; } return AVCDEC_FAIL; } /* ======================================================================== */ /* Function : PVAVCDecSeqParamSet() */ /* Date : 11/4/2003 */ /* Purpose : Initialize sequence, memory allocation if necessary. */ /* In/out : */ /* Return : AVCDEC_SUCCESS if succeed, AVC_FAIL if fail. */ /* Modified : */ /* ======================================================================== */ OSCL_EXPORT_REF AVCDec_Status PVAVCDecSeqParamSet(AVCHandle *avcHandle, uint8 *nal_unit, int nal_size) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecSeqParamSet"); AVCDec_Status status; AVCDecObject *decvid; AVCCommonObj *video; AVCDecBitstream *bitstream; void *userData = avcHandle->userData; bool first_seq = FALSE; int i; DEBUG_LOG(userData, AVC_LOGTYPE_INFO, "PVAVCDecSeqParamSet", -1, -1); if (avcHandle->AVCObject == NULL) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecSeqParamSet (AVCObject == NULL)"); first_seq = TRUE; //avcHandle->memory_usage = 0; /* allocate AVCDecObject */ avcHandle->AVCObject = (void*)avcHandle->CBAVC_Malloc(userData, sizeof(AVCDecObject), 0/*DEFAULT_ATTR*/); if (avcHandle->AVCObject == NULL) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecSeqParamSet (AVCObject == NULL) return: AVCDEC_MEMORY_FAIL"); return AVCDEC_MEMORY_FAIL; } decvid = (AVCDecObject*) avcHandle->AVCObject; oscl_memset(decvid, 0, sizeof(AVCDecObject)); decvid->common = (AVCCommonObj*)avcHandle->CBAVC_Malloc(userData, sizeof(AVCCommonObj), 0); if (decvid->common == NULL) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecSeqParamSet (common == NULL) return: AVCDEC_MEMORY_FAIL"); return AVCDEC_MEMORY_FAIL; } video = decvid->common; oscl_memset(video, 0, sizeof(AVCCommonObj)); video->seq_parameter_set_id = 9999; /* set it to some illegal value */ decvid->bitstream = (AVCDecBitstream *) avcHandle->CBAVC_Malloc(userData, sizeof(AVCDecBitstream), 1/*DEFAULT_ATTR*/); if (decvid->bitstream == NULL) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecSeqParamSet (bitstream == NULL) return: AVCDEC_MEMORY_FAIL"); return AVCDEC_MEMORY_FAIL; } decvid->bitstream->userData = avcHandle->userData; /* callback for more data */ decvid->avcHandle = avcHandle; decvid->debugEnable = avcHandle->debugEnable; } decvid = (AVCDecObject*) avcHandle->AVCObject; video = decvid->common; bitstream = decvid->bitstream; /* check if we can reuse the memory without re-allocating it. */ /* always check if(first_seq==TRUE) */ /* Conversion from EBSP to RBSP */ video->forbidden_bit = nal_unit[0] >> 7; if (video->forbidden_bit) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecSeqParamSet (forbidden_bit) return: AVCDEC_FAIL"); return AVCDEC_FAIL; } video->nal_ref_idc = (nal_unit[0] & 0x60) >> 5; video->nal_unit_type = (AVCNalUnitType)(nal_unit[0] & 0x1F); if (video->nal_unit_type != AVC_NALTYPE_SPS) /* not a SPS NAL */ { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecSeqParamSet (nal_unit_type != AVC_NALTYPE_SPS) return: AVCDEC_FAIL"); return AVCDEC_FAIL; } /* Initialize bitstream structure*/ BitstreamInit(bitstream, nal_unit + 1, nal_size - 1); /* if first_seq == TRUE, allocate the following memory */ if (first_seq == TRUE) { video->currSeqParams = NULL; /* initialize it to NULL */ video->currPicParams = NULL; /* There are 32 pointers to sequence param set, seqParams. There are 255 pointers to picture param set, picParams.*/ for (i = 0; i < 32; i++) decvid->seqParams[i] = NULL; for (i = 0; i < 256; i++) decvid->picParams[i] = NULL; video->MbToSliceGroupMap = NULL; video->mem_mgr_ctrl_eq_5 = FALSE; video->newPic = TRUE; video->newSlice = TRUE; __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecSeqParamSet video->newSlice = TRUE"); video->currPic = NULL; video->currFS = NULL; video->prevRefPic = NULL; video->mbNum = 0; // MC_Conceal /* Allocate sliceHdr. */ video->sliceHdr = (AVCSliceHeader*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCSliceHeader), 5/*DEFAULT_ATTR*/); if (video->sliceHdr == NULL) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecSeqParamSet (sliceHdr == NULL) return: AVCDEC_FAIL"); return AVCDEC_MEMORY_FAIL; } video->decPicBuf = (AVCDecPicBuffer*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCDecPicBuffer), 3/*DEFAULT_ATTR*/); if (video->decPicBuf == NULL) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecSeqParamSet (decPicBuf == NULL) return: AVCDEC_FAIL"); return AVCDEC_MEMORY_FAIL; } oscl_memset(video->decPicBuf, 0, sizeof(AVCDecPicBuffer)); } /* Decode SPS, allocate video->seqParams[i] and assign video->currSeqParams */ status = DecodeSPS(decvid, bitstream); if (status != AVCDEC_SUCCESS) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecSeqParamSet (status != AVCDEC_SUCCESS) return: status: %d", status); return status; } __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecSeqParamSet return: status: AVCDEC_SUCCESS"); return AVCDEC_SUCCESS; } /* ======================================================================== */ /* Function : PVAVCDecGetSeqInfo() */ /* Date : 11/4/2003 */ /* Purpose : Get sequence parameter info of the last decoded SPS */ /* In/out : */ /* Return : AVCDEC_SUCCESS if succeed, AVC_FAIL if fail. */ /* Modified : */ /* 12/20/03: change input argument, use structure instead. */ /* ======================================================================== */ OSCL_EXPORT_REF AVCDec_Status PVAVCDecGetSeqInfo(AVCHandle *avcHandle, AVCDecSPSInfo *seqInfo) { AVCDecObject *decvid = (AVCDecObject*) avcHandle->AVCObject; AVCCommonObj *video; int PicWidthInMbs, PicHeightInMapUnits, FrameHeightInMbs; if (decvid == NULL || decvid->lastSPS == NULL) { return AVCDEC_FAIL; } video = decvid->common; PicWidthInMbs = decvid->lastSPS->pic_width_in_mbs_minus1 + 1; PicHeightInMapUnits = decvid->lastSPS->pic_height_in_map_units_minus1 + 1 ; FrameHeightInMbs = (2 - decvid->lastSPS->frame_mbs_only_flag) * PicHeightInMapUnits ; seqInfo->FrameWidth = PicWidthInMbs << 4; seqInfo->FrameHeight = FrameHeightInMbs << 4; seqInfo->frame_only_flag = decvid->lastSPS->frame_mbs_only_flag; if (decvid->lastSPS->frame_cropping_flag) { seqInfo->frame_crop_left = 2 * decvid->lastSPS->frame_crop_left_offset; seqInfo->frame_crop_right = seqInfo->FrameWidth - (2 * decvid->lastSPS->frame_crop_right_offset + 1); if (seqInfo->frame_only_flag) { seqInfo->frame_crop_top = 2 * decvid->lastSPS->frame_crop_top_offset; seqInfo->frame_crop_bottom = seqInfo->FrameHeight - (2 * decvid->lastSPS->frame_crop_bottom_offset + 1); /* Note in 7.4.2.1, there is a contraint on the value of frame_crop_left and frame_crop_top such that they have to be less than or equal to frame_crop_right/2 and frame_crop_bottom/2, respectively. */ } else { seqInfo->frame_crop_top = 4 * decvid->lastSPS->frame_crop_top_offset; seqInfo->frame_crop_bottom = seqInfo->FrameHeight - (4 * decvid->lastSPS->frame_crop_bottom_offset + 1); /* Note in 7.4.2.1, there is a contraint on the value of frame_crop_left and frame_crop_top such that they have to be less than or equal to frame_crop_right/2 and frame_crop_bottom/4, respectively. */ } } else /* no cropping flag, just give the first and last pixel */ { seqInfo->frame_crop_bottom = seqInfo->FrameHeight - 1; seqInfo->frame_crop_right = seqInfo->FrameWidth - 1; seqInfo->frame_crop_top = seqInfo->frame_crop_left = 0; } seqInfo->num_frames = (uint32)(MaxDPBX2[(uint32)mapLev2Idx[decvid->lastSPS->level_idc]] << 2) / (3 * PicWidthInMbs * PicHeightInMapUnits) + 1; if (seqInfo->num_frames >= MAX_FS) { seqInfo->num_frames = MAX_FS; } return AVCDEC_SUCCESS; } /* ======================================================================== */ /* Function : PVAVCDecPicParamSet() */ /* Date : 11/4/2003 */ /* Purpose : Initialize picture */ /* create reference picture list. */ /* In/out : */ /* Return : AVCDEC_SUCCESS if succeed, AVC_FAIL if fail. */ /* Modified : */ /* ======================================================================== */ /** Since PPS doesn't contain much data, most of the picture initialization will be done after decoding the slice header in PVAVCDecodeSlice. */ OSCL_EXPORT_REF AVCDec_Status PVAVCDecPicParamSet(AVCHandle *avcHandle, uint8 *nal_unit, int nal_size) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecPicParamSet"); AVCDec_Status status; AVCDecObject *decvid = (AVCDecObject*) avcHandle->AVCObject; AVCCommonObj *video; AVCDecBitstream *bitstream; if (decvid == NULL) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecPicParamSet return: AVCDEC_FAIL"); return AVCDEC_FAIL; } video = decvid->common; bitstream = decvid->bitstream; /* 1. Convert EBSP to RBSP. Create bitstream structure */ video->forbidden_bit = nal_unit[0] >> 7; video->nal_ref_idc = (nal_unit[0] & 0x60) >> 5; video->nal_unit_type = (AVCNalUnitType)(nal_unit[0] & 0x1F); if (video->nal_unit_type != AVC_NALTYPE_PPS) /* not a PPS NAL */ { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecPicParamSet (nal_unit_type != AVC_NALTYPE_PPS) return: AVCDEC_FAIL"); return AVCDEC_FAIL; } /* 2. Initialize bitstream structure*/ BitstreamInit(bitstream, nal_unit + 1, nal_size - 1); /* 2. Decode pic_parameter_set_rbsp syntax. Allocate video->picParams[i] and assign to currPicParams */ status = DecodePPS(decvid, video, bitstream); if (status != AVCDEC_SUCCESS) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecPicParamSet (status != AVCDEC_SUCCESS) return: status: %d", status); return status; } video->SliceGroupChangeRate = video->currPicParams->slice_group_change_rate_minus1 + 1 ; __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecPicParamSet return: status: AVCDEC_SUCCESS"); return AVCDEC_SUCCESS; } OSCL_EXPORT_REF AVCDec_Status PVAVCDecSEI(AVCHandle *avcHandle, uint8 *nal_unit, int nal_size) { OSCL_UNUSED_ARG(avcHandle); OSCL_UNUSED_ARG(nal_unit); OSCL_UNUSED_ARG(nal_size); return AVCDEC_SUCCESS; } /* ======================================================================== */ /* Function : PVAVCDecodeSlice() */ /* Date : 11/4/2003 */ /* Purpose : Decode one NAL unit. */ /* In/out : */ /* Return : See enum AVCDec_Status for return values. */ /* Modified : */ /* ======================================================================== */ OSCL_EXPORT_REF AVCDec_Status PVAVCDecodeSlice(AVCHandle *avcHandle, uint8 *buffer, int buf_size) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecodeSlice"); AVCDecObject *decvid = (AVCDecObject*) avcHandle->AVCObject; AVCCommonObj *video; AVCDecBitstream *bitstream; AVCDec_Status status; __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecodeSlice --1"); if (decvid == NULL) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecodeSlice (decvid == NULL) return: AVCDEC_FAIL"); return AVCDEC_FAIL; } video = decvid->common; __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecodeSlice --2 video->newSlice: %d", video->newSlice); bitstream = decvid->bitstream; if (video->mem_mgr_ctrl_eq_5) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecodeSlice (video->mem_mgr_ctrl_eq_5) return: AVCDEC_PICTURE_OUTPUT_READY"); return AVCDEC_PICTURE_OUTPUT_READY; // to flushout frame buffers } __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecodeSlice --3"); if (video->newSlice) { /* 2. Check NAL type */ __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecodeSlice 2. Check NAL type"); if (buffer == NULL) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecodeSlice (buffer == NULL) return: AVCDEC_FAIL"); return AVCDEC_FAIL; } video->prev_nal_unit_type = video->nal_unit_type; video->forbidden_bit = buffer[0] >> 7; video->nal_ref_idc = (buffer[0] & 0x60) >> 5; video->nal_unit_type = (AVCNalUnitType)(buffer[0] & 0x1F); __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecodeSlice nal_unit_type =%d", video->nal_unit_type); if (video->nal_unit_type == AVC_NALTYPE_AUD) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecodeSlice (nal_unit_type == AVC_NALTYPE_AUD) return: AVCDEC_SUCCESS"); return AVCDEC_SUCCESS; } if (video->nal_unit_type != AVC_NALTYPE_SLICE && video->nal_unit_type != AVC_NALTYPE_IDR) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecodeSlice (nal_unit_type != AVC_NALTYPE_IDR) return: AVCDEC_NOT_SUPPORTED"); return AVCDEC_NOT_SUPPORTED; /* not supported */ } if (video->nal_unit_type >= 2 && video->nal_unit_type <= 4) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecodeSlice (video->nal_unit_type >= 2 && <= 4) return: AVCDEC_NOT_SUPPORTED"); return AVCDEC_NOT_SUPPORTED; /* not supported */ } else { video->slice_data_partitioning = FALSE; } video->newSlice = FALSE; /* Initialize bitstream structure*/ BitstreamInit(bitstream, buffer + 1, buf_size - 1); /* 2.1 Decode Slice Header (separate function)*/ __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecodeSlice 2.1 Decode Slice Header"); status = DecodeSliceHeader(decvid, video, bitstream); if (status != AVCDEC_SUCCESS) { video->newSlice = TRUE; __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecodeSlice (status != AVCDEC_SUCCESS) return: status: %d", status); return status; } if (video->sliceHdr->frame_num != video->prevFrameNum || (video->sliceHdr->first_mb_in_slice < (uint)video->mbNum && video->currSeqParams->constrained_set1_flag == 1)) { video->newPic = TRUE; if (video->numMBs > 0) { // Conceal missing MBs of previously decoded frame ConcealSlice(decvid, video->PicSizeInMbs - video->numMBs, video->PicSizeInMbs); // Conceal video->numMBs = 0; // DeblockPicture(video); // No need to deblock /* 3.2 Decoded frame reference marking. */ /* 3.3 Put the decoded picture in output buffers */ /* set video->mem_mge_ctrl_eq_5 */ AVCNalUnitType temp = video->nal_unit_type; video->nal_unit_type = video->prev_nal_unit_type; StorePictureInDPB(avcHandle, video); video->nal_unit_type = temp; video->mbNum = 0; // MC_Conceal __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecodeSlice (video->numMBs > 0) return: AVCDEC_PICTURE_OUTPUT_READY"); return AVCDEC_PICTURE_OUTPUT_READY; } } if (video->nal_unit_type == AVC_NALTYPE_IDR) { video->prevFrameNum = 0; video->PrevRefFrameNum = 0; } if (!video->currSeqParams->gaps_in_frame_num_value_allowed_flag) { /* no gaps allowed, frame_num has to increase by one only */ /* if(sliceHdr->frame_num != (video->PrevRefFrameNum + 1)%video->MaxFrameNum) */ if (video->sliceHdr->frame_num != video->PrevRefFrameNum && video->sliceHdr->frame_num != (video->PrevRefFrameNum + 1) % video->MaxFrameNum) { // Conceal missing MBs of previously decoded frame video->numMBs = 0; video->newPic = TRUE; video->prevFrameNum++; // FIX video->PrevRefFrameNum++; AVCNalUnitType temp = video->nal_unit_type; video->nal_unit_type = AVC_NALTYPE_SLICE; //video->prev_nal_unit_type; status = (AVCDec_Status)DPBInitBuffer(avcHandle, video); if (status != AVCDEC_SUCCESS) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecodeSlice gaps_in_frame_num_value_allowed_flag DPBInitBuffer (status != AVCDEC_SUCCESS) return: status: %d", status); return status; } video->currFS->IsOutputted = 0x01; video->currFS->IsReference = 3; video->currFS->IsLongTerm = 0; DecodePOC(video); /* find an empty memory from DPB and assigned to currPic */ DPBInitPic(video, video->PrevRefFrameNum % video->MaxFrameNum); RefListInit(video); ConcealSlice(decvid, 0, video->PicSizeInMbs); // Conceal video->currFS->IsOutputted |= 0x02; //conceal frame /* 3.2 Decoded frame reference marking. */ /* 3.3 Put the decoded picture in output buffers */ /* set video->mem_mge_ctrl_eq_5 */ video->mbNum = 0; // Conceal StorePictureInDPB(avcHandle, video); video->nal_unit_type = temp; __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecodeSlice gaps_in_frame_num_value_allowed_flag return: AVCDEC_PICTURE_OUTPUT_READY"); return AVCDEC_PICTURE_OUTPUT_READY; } } } __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecodeSlice --4"); if (video->newPic == TRUE) { status = (AVCDec_Status)DPBInitBuffer(avcHandle, video); if (status != AVCDEC_SUCCESS) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecodeSlice DPBInitBuffer->(newPic == TRUE) return: status: %d", status); return status; } } __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecodeSlice --5"); video->newSlice = TRUE; /* function pointer setting at slice-level */ // OPTIMIZE decvid->residual_block = &residual_block_cavlc; /* derive picture order count */ if (video->newPic == TRUE) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecodeSlice --5.1"); video->numMBs = video->PicSizeInMbs; if (video->nal_unit_type != AVC_NALTYPE_IDR && video->currSeqParams->gaps_in_frame_num_value_allowed_flag) { if (video->sliceHdr->frame_num != (video->PrevRefFrameNum + 1) % video->MaxFrameNum) { status = fill_frame_num_gap(avcHandle, video); if (status != AVCDEC_SUCCESS) { video->numMBs = 0; __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecodeSlice fill_frame_num_gap->(status != AVCDEC_SUCCESS) return: status: %d", status); return status; } status = (AVCDec_Status)DPBInitBuffer(avcHandle, video); if (status != AVCDEC_SUCCESS) { video->numMBs = 0; __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecodeSlice DPBInitBuffer->(status != AVCDEC_SUCCESS) return: status: %d", status); return status; } } } /* if there's gap in the frame_num, we have to fill in the gap with imaginary frames that won't get used for short-term ref. */ /* see fill_frame_num_gap() in JM */ DecodePOC(video); /* find an empty memory from DPB and assigned to currPic */ DPBInitPic(video, video->CurrPicNum); video->currPic->isReference = TRUE; // FIX if (video->nal_ref_idc == 0) { video->currPic->isReference = FALSE; video->currFS->IsOutputted |= 0x02; /* The MASK 0x02 means not needed for reference, or returned */ /* node need to check for freeing of this buffer */ } FMOInit(video); if (video->currPic->isReference) { video->PrevRefFrameNum = video->sliceHdr->frame_num; } video->prevFrameNum = video->sliceHdr->frame_num; } __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecodeSlice --6"); video->newPic = FALSE; /* Initialize refListIdx for this picture */ RefListInit(video); /* Re-order the reference list according to the ref_pic_list_reordering() */ status = (AVCDec_Status)ReOrderList(video); if (status != AVCDEC_SUCCESS) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecodeSlice ReOrderList->(status != AVCDEC_SUCCESS) return: status: AVCDEC_FAIL"); return AVCDEC_FAIL; } /* 2.2 Decode Slice. */ __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecodeSlice 2.2 Decode Slice"); status = (AVCDec_Status)DecodeSlice(decvid); video->slice_id++; // slice if (status == AVCDEC_PICTURE_READY) { /* 3. Check complete picture */ #ifndef MB_BASED_DEBLOCK /* 3.1 Deblock */ DeblockPicture(video); #endif /* 3.2 Decoded frame reference marking. */ /* 3.3 Put the decoded picture in output buffers */ /* set video->mem_mge_ctrl_eq_5 */ // xxx pa call to AVC_FRAME_UNBIND status = (AVCDec_Status)StorePictureInDPB(avcHandle, video); // CHECK check the retunr status if (status != AVCDEC_SUCCESS) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecodeSlice StorePictureInDPB->(status != AVCDEC_SUCCESS) return: status: AVCDEC_FAIL"); return AVCDEC_FAIL; } if (video->mem_mgr_ctrl_eq_5) { video->PrevRefFrameNum = 0; video->prevFrameNum = 0; video->prevPicOrderCntMsb = 0; video->prevPicOrderCntLsb = video->TopFieldOrderCnt; video->prevFrameNumOffset = 0; } else { video->prevPicOrderCntMsb = video->PicOrderCntMsb; video->prevPicOrderCntLsb = video->sliceHdr->pic_order_cnt_lsb; video->prevFrameNumOffset = video->FrameNumOffset; } __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecodeSlice (status == AVCDEC_PICTURE_READY) return: status: AVCDEC_PICTURE_READY"); return AVCDEC_PICTURE_READY; } else if (status != AVCDEC_SUCCESS) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecodeSlice (status != AVCDEC_SUCCESS) return: status: AVCDEC_FAIL"); return AVCDEC_FAIL; } __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecodeSlice final return: status: AVCDEC_SUCCESS"); return AVCDEC_SUCCESS; } /* ======================================================================== */ /* Function : PVAVCDecGetOutput() */ /* Date : 11/3/2003 */ /* Purpose : Get the next picture according to PicOrderCnt. */ /* In/out : */ /* Return : AVCFrameIO structure */ /* Modified : */ /* ======================================================================== */ OSCL_EXPORT_REF AVCDec_Status PVAVCDecGetOutput(AVCHandle *avcHandle, int *indx, int *release, AVCFrameIO *output) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecGetOutput"); AVCDecObject *decvid = (AVCDecObject*) avcHandle->AVCObject; AVCCommonObj *video; AVCDecPicBuffer *dpb; AVCFrameStore *oldestFrame = NULL; int i, first = 1; int count_frame = 0; int index = 0; int min_poc = 0; if (decvid == NULL) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "(decvid == NULL) return AVCDEC_FAIL"); return AVCDEC_FAIL; } video = decvid->common; dpb = video->decPicBuf; if (dpb->num_fs == 0) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "(num_fs == 0) return AVCDEC_FAIL"); return AVCDEC_FAIL; } /* search for the oldest frame_num in dpb */ /* extension to field decoding, we have to search for every top_field/bottom_field within each frame in the dpb. This code only works for frame based.*/ if (video->mem_mgr_ctrl_eq_5 == FALSE) { for (i = 0; i < dpb->num_fs; i++) { if ((dpb->fs[i]->IsOutputted & 0x01) == 0) { count_frame++; if (first) { min_poc = dpb->fs[i]->PicOrderCnt; first = 0; oldestFrame = dpb->fs[i]; index = i; } if (dpb->fs[i]->PicOrderCnt < min_poc) { min_poc = dpb->fs[i]->PicOrderCnt; oldestFrame = dpb->fs[i]; index = i; } } } } else { for (i = 0; i < dpb->num_fs; i++) { if ((dpb->fs[i]->IsOutputted & 0x01) == 0 && dpb->fs[i] != video->currFS) { count_frame++; if (first) { min_poc = dpb->fs[i]->PicOrderCnt; first = 0; oldestFrame = dpb->fs[i]; index = i; } if (dpb->fs[i]->PicOrderCnt < min_poc) { min_poc = dpb->fs[i]->PicOrderCnt; oldestFrame = dpb->fs[i]; index = i; } } } if (count_frame < 2 && video->nal_unit_type != AVC_NALTYPE_IDR) { video->mem_mgr_ctrl_eq_5 = FALSE; // FIX } else if (count_frame < 1 && video->nal_unit_type == AVC_NALTYPE_IDR) { for (i = 0; i < dpb->num_fs; i++) { if (dpb->fs[i] == video->currFS && (dpb->fs[i]->IsOutputted & 0x01) == 0) { oldestFrame = dpb->fs[i]; index = i; break; } } video->mem_mgr_ctrl_eq_5 = FALSE; } } if (oldestFrame == NULL) { /* Check for Mem_mgmt_operation_5 based forced output */ for (i = 0; i < dpb->num_fs; i++) { /* looking for the one not used or not reference and has been outputted */ if (dpb->fs[i]->IsReference == 0 && dpb->fs[i]->IsOutputted == 3) { break; } } if (i < dpb->num_fs) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "(i < dpb->num_fs) return AVCDEC_FAIL"); /* there are frames available for decoding */ return AVCDEC_FAIL; /* no frame to be outputted */ } /* no free frame available, we have to release one to continue decoding */ int MinIdx = 0; int32 MinFrameNumWrap = 0x7FFFFFFF; for (i = 0; i < dpb->num_fs; i++) { if (dpb->fs[i]->IsReference && !dpb->fs[i]->IsLongTerm) { if (dpb->fs[i]->FrameNumWrap < MinFrameNumWrap) { MinFrameNumWrap = dpb->fs[i]->FrameNumWrap; MinIdx = i; } } } /* mark the frame with smallest PicOrderCnt to be unused for reference */ dpb->fs[MinIdx]->IsReference = 0; dpb->fs[MinIdx]->IsLongTerm = 0; dpb->fs[MinIdx]->frame.isReference = FALSE; dpb->fs[MinIdx]->frame.isLongTerm = FALSE; dpb->fs[MinIdx]->IsOutputted |= 0x02; #ifdef PV_MEMORY_POOL if (dpb->fs[MinIdx]->IsOutputted == 3) { avcHandle->CBAVC_FrameUnbind(avcHandle->userData, MinIdx); } #endif __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "(oldestFrame == NULL ?!?) return AVCDEC_FAIL"); return AVCDEC_FAIL; } /* MASK 0x01 means the frame is outputted (for display). A frame gets freed when it is outputted (0x01) and not needed for reference (0x02) */ oldestFrame->IsOutputted |= 0x01; if (oldestFrame->IsOutputted == 3) { *release = 1; /* flag to release the buffer */ } else { *release = 0; } /* do not release buffer here, release it after it is sent to the sink node */ output->YCbCr[0] = oldestFrame->frame.Sl; output->YCbCr[1] = oldestFrame->frame.Scb; output->YCbCr[2] = oldestFrame->frame.Scr; output->height = oldestFrame->frame.height; output->pitch = oldestFrame->frame.width; output->disp_order = oldestFrame->PicOrderCnt; output->coding_order = oldestFrame->FrameNum; output->id = (uint32) oldestFrame->base_dpb; /* use the pointer as the id */ *indx = index; __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "final return AVCDEC_SUCCESS"); return AVCDEC_SUCCESS; } /* ======================================================================== */ /* Function : PVAVCDecReset() */ /* Date : 03/04/2004 */ /* Purpose : Reset decoder, prepare it for a new IDR frame. */ /* In/out : */ /* Return : void */ /* Modified : */ /* ======================================================================== */ OSCL_EXPORT_REF void PVAVCDecReset(AVCHandle *avcHandle) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecReset"); AVCDecObject *decvid = (AVCDecObject*) avcHandle->AVCObject; AVCCommonObj *video; AVCDecPicBuffer *dpb; int i; if (decvid == NULL) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecReset decvid == NULL"); return; } video = decvid->common; dpb = video->decPicBuf; __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecReset --1 dpb->num_fs: %d", dpb->num_fs); /* reset the DPB */ for (i = 0; i < dpb->num_fs; i++) { dpb->fs[i]->IsLongTerm = 0; dpb->fs[i]->IsReference = 0; dpb->fs[i]->IsOutputted = 3; dpb->fs[i]->frame.isReference = 0; dpb->fs[i]->frame.isLongTerm = 0; // xxx pa like dpb:StorePictureInDPB try to hold iFrameUsed structure in sync! #ifdef PV_MEMORY_POOL avcHandle->CBAVC_FrameUnbind(avcHandle->userData, i); #endif } __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCDecReset --2"); video->mem_mgr_ctrl_eq_5 = FALSE; video->newPic = TRUE; video->newSlice = TRUE; video->currPic = NULL; video->currFS = NULL; video->prevRefPic = NULL; video->prevFrameNum = 0; video->PrevRefFrameNum = 0; video->prevFrameNumOffset = 0; video->FrameNumOffset = 0; video->mbNum = 0; video->numMBs = 0; return ; } /* ======================================================================== */ /* Function : PVAVCCleanUpDecoder() */ /* Date : 11/4/2003 */ /* Purpose : Clean up the decoder, free all memories allocated. */ /* In/out : */ /* Return : void */ /* Modified : */ /* ======================================================================== */ OSCL_EXPORT_REF void PVAVCCleanUpDecoder(AVCHandle *avcHandle) { AVCDecObject *decvid = (AVCDecObject*) avcHandle->AVCObject; AVCCommonObj *video; void *userData = avcHandle->userData; int i; DEBUG_LOG(userData, AVC_LOGTYPE_INFO, "PVAVCCleanUpDecoder", -1, -1); if (decvid != NULL) { video = decvid->common; if (video != NULL) { if (video->MbToSliceGroupMap != NULL) { avcHandle->CBAVC_Free(userData, (int)video->MbToSliceGroupMap); } #ifdef MB_BASED_DEBLOCK if (video->intra_pred_top != NULL) { avcHandle->CBAVC_Free(userData, (int)video->intra_pred_top); } if (video->intra_pred_top_cb != NULL) { avcHandle->CBAVC_Free(userData, (int)video->intra_pred_top_cb); } if (video->intra_pred_top_cr != NULL) { avcHandle->CBAVC_Free(userData, (int)video->intra_pred_top_cr); } #endif if (video->mblock != NULL) { avcHandle->CBAVC_Free(userData, (int)video->mblock); } if (video->decPicBuf != NULL) { CleanUpDPB(avcHandle, video); avcHandle->CBAVC_Free(userData, (int)video->decPicBuf); } if (video->sliceHdr != NULL) { avcHandle->CBAVC_Free(userData, (int)video->sliceHdr); } avcHandle->CBAVC_Free(userData, (int)video); /* last thing to do */ } for (i = 0; i < 256; i++) { if (decvid->picParams[i] != NULL) { if (decvid->picParams[i]->slice_group_id != NULL) { avcHandle->CBAVC_Free(userData, (int)decvid->picParams[i]->slice_group_id); } avcHandle->CBAVC_Free(userData, (int)decvid->picParams[i]); } } for (i = 0; i < 32; i++) { if (decvid->seqParams[i] != NULL) { avcHandle->CBAVC_Free(userData, (int)decvid->seqParams[i]); } } if (decvid->bitstream != NULL) { avcHandle->CBAVC_Free(userData, (int)decvid->bitstream); } avcHandle->CBAVC_Free(userData, (int)decvid); } return ; } ================================================ FILE: RtspCamera/jni/avc_h264/dec/src/avcdec_bitstream.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /** This file contains bitstream related functions. @publishedAll */ #ifndef _AVCDEC_BITSTREAM_H_ #define _AVCDEC_BITSTREAM_H_ #include "avcdec_lib.h" #define WORD_SIZE 32 /* this can vary, default to 32 bit for now */ #ifndef __cplusplus #define AVC_GETDATA(x,y) userData->AVC_GetData(x,y) #endif #ifdef __cplusplus extern "C" { #endif #define BitstreamFlushBits(A,B) {(A)->bitcnt += (B); (A)->incnt -= (B); (A)->curr_word <<= (B);} AVCDec_Status AVC_BitstreamFillCache(AVCDecBitstream *stream); /** This function populates bitstream structure. \param "stream" "Pointer to bitstream structure." \param "buffer" "Pointer to the bitstream buffer." \param "size" "Size of the buffer." \param "nal_size" "Size of the NAL unit." \param "resetall" "Flag for reset everything." \return "AVCDEC_SUCCESS for success and AVCDEC_FAIL for fail." */ AVCDec_Status BitstreamInit(AVCDecBitstream *stream, uint8 *buffer, int size); /** This function reads next aligned word and remove the emulation prevention code if necessary. \param "stream" "Pointer to bitstream structure." \return "Next word." */ uint BitstreamNextWord(AVCDecBitstream *stream); /** This function reads nBits bits from the current position and advance the pointer. \param "stream" "Pointer to bitstream structure." \param "nBits" "Number of bits to be read." \param "code" "Point to the read value." \return "AVCDEC_SUCCESS if successed, AVCDEC_FAIL if number of bits is greater than the word-size, AVCDEC_PACKET_LOSS or AVCDEC_NO_DATA if callback to get data fails." */ AVCDec_Status BitstreamReadBits(AVCDecBitstream *stream, int nBits, uint *code); /** This function shows nBits bits from the current position without advancing the pointer. \param "stream" "Pointer to bitstream structure." \param "nBits" "Number of bits to be read." \param "code" "Point to the read value." \return "AVCDEC_SUCCESS if successed, AVCDEC_FAIL if number of bits is greater than the word-size, AVCDEC_NO_DATA if it needs to callback to get data." */ AVCDec_Status BitstreamShowBits(AVCDecBitstream *stream, int nBits, uint *code); /** This function flushes nBits bits from the current position. \param "stream" "Pointer to bitstream structure." \param "nBits" "Number of bits to be read." \return "AVCDEC_SUCCESS if successed, AVCDEC_FAIL if number of bits is greater than the word-size It will not call back to get more data. Users should call BitstreamShowBits to determine how much they want to flush." */ /** This function read 1 bit from the current position and advance the pointer. \param "stream" "Pointer to bitstream structure." \param "nBits" "Number of bits to be read." \param "code" "Point to the read value." \return "AVCDEC_SUCCESS if successed, AVCDEC_FAIL if number of bits is greater than the word-size, AVCDEC_PACKET_LOSS or AVCDEC_NO_DATA if callback to get data fails." */ AVCDec_Status BitstreamRead1Bit(AVCDecBitstream *stream, uint *code); /** This function checks whether the current bit position is byte-aligned or not. \param "stream" "Pointer to the bitstream structure." \return "TRUE if byte-aligned, FALSE otherwise." */ bool byte_aligned(AVCDecBitstream *stream); AVCDec_Status BitstreamByteAlign(AVCDecBitstream *stream); /** This function checks whether there are more RBSP data before the trailing bits. \param "stream" "Pointer to the bitstream structure." \return "TRUE if yes, FALSE otherwise." */ bool more_rbsp_data(AVCDecBitstream *stream); #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* _AVCDEC_BITSTREAM_H_ */ ================================================ FILE: RtspCamera/jni/avc_h264/dec/src/avcdec_int.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /** This file contains application function interfaces to the AVC decoder library and necessary type defitionitions and enumerations. Naming convention for variables: lower_case_with_under_line is syntax element in subclause 7.2 and 7.3 noUnderLine or NoUnderLine is derived variables defined somewhere else in the draft or introduced by this decoder library. @publishedAll */ #ifndef _AVCDEC_INT_H_ #define _AVCDEC_INT_H_ #include "avcint_common.h" #include "avcdec_api.h" /** Bitstream structure contains bitstream related parameters such as the pointer to the buffer, the current byte position and bit position. @publishedAll */ typedef struct tagDecBitstream { uint8 *bitstreamBuffer; /* pointer to buffer memory */ int nal_size; /* size of the current NAL unit */ int data_end_pos; /* bitstreamBuffer size in bytes */ int read_pos; /* next position to read from bitstreamBuffer */ uint curr_word; /* byte-swapped (MSB left) current word read from buffer */ int bit_left; /* number of bit left in current_word */ uint next_word; /* in case for old data in previous buffer hasn't been flushed. */ int incnt; /* bit left in the prev_word */ int incnt_next; int bitcnt; void *userData; } AVCDecBitstream; /** This structure is the main object for AVC decoder library providing access to all global variables. It is allocated at PVAVCInitDecoder and freed at PVAVCCleanUpDecoder. @publishedAll */ typedef struct tagDecObject { AVCCommonObj *common; AVCDecBitstream *bitstream; /* for current NAL */ /* sequence parameter set */ AVCSeqParamSet *seqParams[32]; /* Array of pointers, get allocated at arrival of new seq_id */ AVCSeqParamSet *lastSPS; /* point to the most recently decoded SPS, for PVAVCDecGetSeqInfo */ /* picture parameter set */ AVCPicParamSet *picParams[256]; /* Array of pointers to picture param set structures */ /* For internal operation, scratch memory for MV, prediction, transform, etc.*/ uint ref_idx_l0[4]; /* [mbPartIdx], te(v) */ uint ref_idx_l1[4]; /* function pointers */ AVCDec_Status(*residual_block)(struct tagDecObject*, int, int, int *, int *, int *); /* Application control data */ AVCHandle *avcHandle; void (*AVC_DebugLog)(AVCLogType type, char *string1, char *string2); /*bool*/ uint debugEnable; } AVCDecObject; #endif /* _AVCDEC_INT_H_ */ ================================================ FILE: RtspCamera/jni/avc_h264/dec/src/avcdec_lib.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /** This file contains declarations of internal functions for AVC decoder library. @publishedAll */ #ifndef _AVCDEC_LIB_H_ #define _AVCDEC_LIB_H_ #include "avclib_common.h" #include "avcdec_int.h" /*----------- avcdec_api.c -------------*/ /** This function takes out the emulation prevention bytes from the input to creat RBSP. The result is written over the input bitstream. \param "nal_unit" "(I/O) Pointer to the input buffer." \param "size" "(I/O) Pointer to the size of the input/output buffer." \return "AVCDEC_SUCCESS for success and AVCDEC_FAIL otherwise." */ AVCDec_Status EBSPtoRBSP(uint8 *nal_unit, int *size); /*------------- pred_intra.c ---------------*/ /** This function is the main entry point to intra prediction operation on a macroblock. \param "video" "Pointer to AVCCommonObj." */ AVCStatus IntraMBPrediction(AVCCommonObj *video); void SaveNeighborForIntraPred(AVCCommonObj *video, int offset); AVCStatus Intra_4x4(AVCCommonObj *video, int component, int SubBlock_indx, uint8 *comp); void Intra_4x4_Vertical(AVCCommonObj *video, int block_offset); void Intra_4x4_Horizontal(AVCCommonObj *video, int pitch, int block_offset); void Intra_4x4_DC(AVCCommonObj *video, int pitch, int block_offset, AVCNeighborAvailability *availability); void Intra_4x4_Down_Left(AVCCommonObj *video, int block_offset, AVCNeighborAvailability *availability); void Intra_4x4_Diagonal_Down_Right(AVCCommonObj *video, int pitch, int block_offset); void Intra_4x4_Diagonal_Vertical_Right(AVCCommonObj *video, int pitch, int block_offset); void Intra_4x4_Diagonal_Horizontal_Down(AVCCommonObj *video, int pitch, int block_offset); void Intra_4x4_Vertical_Left(AVCCommonObj *video, int block_offset, AVCNeighborAvailability *availability); void Intra_4x4_Horizontal_Up(AVCCommonObj *video, int pitch, int block_offset); void Intra_16x16_Vertical(AVCCommonObj *video); void Intra_16x16_Horizontal(AVCCommonObj *video, int pitch); void Intra_16x16_DC(AVCCommonObj *video, int pitch); void Intra_16x16_Plane(AVCCommonObj *video, int pitch); void Intra_Chroma_DC(AVCCommonObj *video, int pitch, uint8 *predCb, uint8 *predCr); void Intra_Chroma_Horizontal(AVCCommonObj *video, int pitch, uint8 *predCb, uint8 *predCr); void Intra_Chroma_Vertical(AVCCommonObj *video, uint8 *predCb, uint8 *predCr); void Intra_Chroma_Plane(AVCCommonObj *video, int pitch, uint8 *predCb, uint8 *predCr); /*------------ pred_inter.c ---------------*/ /** This function is the main entrance to inter prediction operation for a macroblock. For decoding, this function also calls inverse transform and compensation. \param "video" "Pointer to AVCCommonObj." \return "void" */ void InterMBPrediction(AVCCommonObj *video); /** This function is called for luma motion compensation. \param "ref" "Pointer to the origin of a reference luma." \param "picwidth" "Width of the picture." \param "picheight" "Height of the picture." \param "x_pos" "X-coordinate of the predicted block in quarter pel resolution." \param "y_pos" "Y-coordinate of the predicted block in quarter pel resolution." \param "pred" "Pointer to the output predicted block." \param "pred_pitch" "Width of pred." \param "blkwidth" "Width of the current partition." \param "blkheight" "Height of the current partition." \return "void" */ void LumaMotionComp(uint8 *ref, int picwidth, int picheight, int x_pos, int y_pos, uint8 *pred, int pred_pitch, int blkwidth, int blkheight); /** Functions below are special cases for luma motion compensation. LumaFullPelMC is for full pixel motion compensation. LumaBorderMC is for interpolation in only one dimension. LumaCrossMC is for interpolation in one dimension and half point in the other dimension. LumaDiagonalMC is for interpolation in diagonal direction. \param "ref" "Pointer to the origin of a reference luma." \param "picwidth" "Width of the picture." \param "picheight" "Height of the picture." \param "x_pos" "X-coordinate of the predicted block in full pel resolution." \param "y_pos" "Y-coordinate of the predicted block in full pel resolution." \param "dx" "Fraction of x_pos in quarter pel." \param "dy" "Fraction of y_pos in quarter pel." \param "curr" "Pointer to the current partition in the current picture." \param "residue" "Pointer to the current partition for the residue block." \param "blkwidth" "Width of the current partition." \param "blkheight" "Height of the current partition." \return "void" */ void CreatePad(uint8 *ref, int picwidth, int picheight, int x_pos, int y_pos, uint8 *out, int blkwidth, int blkheight); void FullPelMC(uint8 *in, int inwidth, uint8 *out, int outpitch, int blkwidth, int blkheight); void HorzInterp1MC(uint8 *in, int inpitch, uint8 *out, int outpitch, int blkwidth, int blkheight, int dx); void HorzInterp2MC(int *in, int inpitch, uint8 *out, int outpitch, int blkwidth, int blkheight, int dx); void HorzInterp3MC(uint8 *in, int inpitch, int *out, int outpitch, int blkwidth, int blkheight); void VertInterp1MC(uint8 *in, int inpitch, uint8 *out, int outpitch, int blkwidth, int blkheight, int dy); void VertInterp2MC(uint8 *in, int inpitch, int *out, int outpitch, int blkwidth, int blkheight); void VertInterp3MC(int *in, int inpitch, uint8 *out, int outpitch, int blkwidth, int blkheight, int dy); void DiagonalInterpMC(uint8 *in1, uint8 *in2, int inpitch, uint8 *out, int outpitch, int blkwidth, int blkheight); void ChromaMotionComp(uint8 *ref, int picwidth, int picheight, int x_pos, int y_pos, uint8 *pred, int pred_pitch, int blkwidth, int blkheight); void ChromaFullPelMC(uint8 *in, int inpitch, uint8 *out, int outpitch, int blkwidth, int blkheight) ; void ChromaBorderMC(uint8 *ref, int picwidth, int dx, int dy, uint8 *pred, int pred_pitch, int blkwidth, int blkheight); void ChromaDiagonalMC(uint8 *ref, int picwidth, int dx, int dy, uint8 *pred, int pred_pitch, int blkwidth, int blkheight); void ChromaFullPelMCOutside(uint8 *ref, uint8 *pred, int pred_pitch, int blkwidth, int blkheight, int x_inc, int y_inc0, int y_inc1, int x_mid, int y_mid); void ChromaBorderMCOutside(uint8 *ref, int picwidth, int dx, int dy, uint8 *pred, int pred_pitch, int blkwidth, int blkheight, int x_inc, int z_inc, int y_inc0, int y_inc1, int x_mid, int y_mid); void ChromaDiagonalMCOutside(uint8 *ref, int picwidth, int dx, int dy, uint8 *pred, int pred_pitch, int blkwidth, int blkheight, int x_inc, int z_inc, int y_inc0, int y_inc1, int x_mid, int y_mid); void ChromaDiagonalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy, uint8 *pOut, int predPitch, int blkwidth, int blkheight); void ChromaHorizontalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy, uint8 *pOut, int predPitch, int blkwidth, int blkheight); void ChromaVerticalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy, uint8 *pOut, int predPitch, int blkwidth, int blkheight); void ChromaFullMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy, uint8 *pOut, int predPitch, int blkwidth, int blkheight); void ChromaVerticalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy, uint8 *pOut, int predPitch, int blkwidth, int blkheight); void ChromaHorizontalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy, uint8 *pOut, int predPitch, int blkwidth, int blkheight); void ChromaDiagonalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy, uint8 *pOut, int predPitch, int blkwidth, int blkheight); /*----------- slice.c ---------------*/ /** This function performs the main decoding loop for slice data including INTRA/INTER prediction, transform and quantization and compensation. See decode_frame_slice() in JM. \param "video" "Pointer to AVCDecObject." \return "AVCDEC_SUCCESS for success, AVCDEC_PICTURE_READY for end-of-picture and AVCDEC_FAIL otherwise." */ AVCDec_Status DecodeSlice(AVCDecObject *video); AVCDec_Status ConcealSlice(AVCDecObject *decvid, int mbnum_start, int mbnum_end); /** This function performs the decoding of one macroblock. \param "video" "Pointer to AVCDecObject." \param "prevMbSkipped" "A value derived in 7.3.4." \return "AVCDEC_SUCCESS for success or AVCDEC_FAIL otherwise." */ AVCDec_Status DecodeMB(AVCDecObject *video); /** This function performs macroblock prediction type decoding as in subclause 7.3.5.1. \param "video" "Pointer to AVCCommonObj." \param "currMB" "Pointer to the current macroblock." \param "stream" "Pointer to AVCDecBitstream." \return "AVCDEC_SUCCESS for success or AVCDEC_FAIL otherwise." */ AVCDec_Status mb_pred(AVCCommonObj *video, AVCMacroblock *currMB, AVCDecBitstream *stream); /** This function performs sub-macroblock prediction type decoding as in subclause 7.3.5.2. \param "video" "Pointer to AVCCommonObj." \param "currMB" "Pointer to the current macroblock." \param "stream" "Pointer to AVCDecBitstream." \return "AVCDEC_SUCCESS for success or AVCDEC_FAIL otherwise." */ AVCDec_Status sub_mb_pred(AVCCommonObj *video, AVCMacroblock *currMB, AVCDecBitstream *stream); /** This function interprets the mb_type and sets necessary information when the slice type is AVC_I_SLICE. in the macroblock structure. \param "mblock" "Pointer to current AVCMacroblock." \param "mb_type" "From the syntax bitstream." \return "void" */ void InterpretMBModeI(AVCMacroblock *mblock, uint mb_type); /** This function interprets the mb_type and sets necessary information when the slice type is AVC_P_SLICE. in the macroblock structure. \param "mblock" "Pointer to current AVCMacroblock." \param "mb_type" "From the syntax bitstream." \return "void" */ void InterpretMBModeP(AVCMacroblock *mblock, uint mb_type); /** This function interprets the mb_type and sets necessary information when the slice type is AVC_B_SLICE. in the macroblock structure. \param "mblock" "Pointer to current AVCMacroblock." \param "mb_type" "From the syntax bitstream." \return "void" */ void InterpretMBModeB(AVCMacroblock *mblock, uint mb_type); /** This function interprets the mb_type and sets necessary information when the slice type is AVC_SI_SLICE. in the macroblock structure. \param "mblock" "Pointer to current AVCMacroblock." \param "mb_type" "From the syntax bitstream." \return "void" */ void InterpretMBModeSI(AVCMacroblock *mblock, uint mb_type); /** This function interprets the sub_mb_type and sets necessary information when the slice type is AVC_P_SLICE. in the macroblock structure. \param "mblock" "Pointer to current AVCMacroblock." \param "sub_mb_type" "From the syntax bitstream." \return "void" */ void InterpretSubMBModeP(AVCMacroblock *mblock, uint *sub_mb_type); /** This function interprets the sub_mb_type and sets necessary information when the slice type is AVC_B_SLICE. in the macroblock structure. \param "mblock" "Pointer to current AVCMacroblock." \param "sub_mb_type" "From the syntax bitstream." \return "void" */ void InterpretSubMBModeB(AVCMacroblock *mblock, uint *sub_mb_type); /** This function decodes the Intra4x4 prediction mode from neighboring information and from the decoded syntax. \param "video" "Pointer to AVCCommonObj." \param "currMB" "Pointer to current macroblock." \param "stream" "Pointer to AVCDecBitstream." \return "AVCDEC_SUCCESS or AVCDEC_FAIL." */ AVCDec_Status DecodeIntra4x4Mode(AVCCommonObj *video, AVCMacroblock *currMB, AVCDecBitstream *stream); /*----------- vlc.c -------------------*/ /** This function reads and decodes Exp-Golomb codes. \param "bitstream" "Pointer to AVCDecBitstream." \param "codeNum" "Pointer to the value of the codeNum." \return "AVCDEC_SUCCESS or AVCDEC_FAIL." */ AVCDec_Status ue_v(AVCDecBitstream *bitstream, uint *codeNum); /** This function reads and decodes signed Exp-Golomb codes. \param "bitstream" "Pointer to AVCDecBitstream." \param "value" "Pointer to syntax element value." \return "AVCDEC_SUCCESS or AVCDEC_FAIL." */ AVCDec_Status se_v(AVCDecBitstream *bitstream, int *value); /** This function reads and decodes signed Exp-Golomb codes for 32 bit codeword. \param "bitstream" "Pointer to AVCDecBitstream." \param "value" "Pointer to syntax element value." \return "AVCDEC_SUCCESS or AVCDEC_FAIL." */ AVCDec_Status se_v32bit(AVCDecBitstream *bitstream, int32 *value); /** This function reads and decodes truncated Exp-Golomb codes. \param "bitstream" "Pointer to AVCDecBitstream." \param "value" "Pointer to syntax element value." \param "range" "Range of the value as input to determine the algorithm." \return "AVCDEC_SUCCESS or AVCDEC_FAIL." */ AVCDec_Status te_v(AVCDecBitstream *bitstream, uint *value, uint range); /** This function parse Exp-Golomb code from the bitstream. \param "bitstream" "Pointer to AVCDecBitstream." \param "leadingZeros" "Pointer to the number of leading zeros." \param "infobits" "Pointer to the value after leading zeros and the first one. The total number of bits read is 2*leadingZeros + 1." \return "AVCDEC_SUCCESS or AVCDEC_FAIL." */ AVCDec_Status GetEGBitstring(AVCDecBitstream *bitstream, int *leadingZeros, int *infobits); /** This function parse Exp-Golomb code from the bitstream for 32 bit codewords. \param "bitstream" "Pointer to AVCDecBitstream." \param "leadingZeros" "Pointer to the number of leading zeros." \param "infobits" "Pointer to the value after leading zeros and the first one. The total number of bits read is 2*leadingZeros + 1." \return "AVCDEC_SUCCESS or AVCDEC_FAIL." */ AVCDec_Status GetEGBitstring32bit(AVCDecBitstream *bitstream, int *leadingZeros, uint32 *infobits); /** This function performs CAVLC decoding of the CBP (coded block pattern) of a macroblock by calling ue_v() and then mapping the codeNum to the corresponding CBP value. \param "currMB" "Pointer to the current AVCMacroblock structure." \param "stream" "Pointer to the AVCDecBitstream." \return "void" */ AVCDec_Status DecodeCBP(AVCMacroblock *currMB, AVCDecBitstream *stream); /** This function decodes the syntax for trailing ones and total coefficient. Subject to optimization. \param "stream" "Pointer to the AVCDecBitstream." \param "TrailingOnes" "Pointer to the trailing one variable output." \param "TotalCoeff" "Pointer to the total coefficient variable output." \param "nC" "Context for number of nonzero coefficient (prediction context)." \return "AVCDEC_SUCCESS for success." */ AVCDec_Status ce_TotalCoeffTrailingOnes(AVCDecBitstream *stream, int *TrailingOnes, int *TotalCoeff, int nC); /** This function decodes the syntax for trailing ones and total coefficient for chroma DC block. Subject to optimization. \param "stream" "Pointer to the AVCDecBitstream." \param "TrailingOnes" "Pointer to the trailing one variable output." \param "TotalCoeff" "Pointer to the total coefficient variable output." \return "AVCDEC_SUCCESS for success." */ AVCDec_Status ce_TotalCoeffTrailingOnesChromaDC(AVCDecBitstream *stream, int *TrailingOnes, int *TotalCoeff); /** This function decode a VLC table with 2 output. \param "stream" "Pointer to the AVCDecBitstream." \param "lentab" "Table for code length." \param "codtab" "Table for code value." \param "tabwidth" "Width of the table or alphabet size of the first output." \param "tabheight" "Height of the table or alphabet size of the second output." \param "code1" "Pointer to the first output." \param "code2" "Pointer to the second output." \return "AVCDEC_SUCCESS for success." */ AVCDec_Status code_from_bitstream_2d(AVCDecBitstream *stream, int *lentab, int *codtab, int tabwidth, int tabheight, int *code1, int *code2); /** This function decodes the level_prefix VLC value as in Table 9-6. \param "stream" "Pointer to the AVCDecBitstream." \param "code" "Pointer to the output." \return "AVCDEC_SUCCESS for success." */ AVCDec_Status ce_LevelPrefix(AVCDecBitstream *stream, uint *code); /** This function decodes total_zeros VLC syntax as in Table 9-7 and 9-8. \param "stream" "Pointer to the AVCDecBitstream." \param "code" "Pointer to the output." \param "TotalCoeff" "Context parameter." \return "AVCDEC_SUCCESS for success." */ AVCDec_Status ce_TotalZeros(AVCDecBitstream *stream, int *code, int TotalCoeff); /** This function decodes total_zeros VLC syntax for chroma DC as in Table 9-9. \param "stream" "Pointer to the AVCDecBitstream." \param "code" "Pointer to the output." \param "TotalCoeff" "Context parameter." \return "AVCDEC_SUCCESS for success." */ AVCDec_Status ce_TotalZerosChromaDC(AVCDecBitstream *stream, int *code, int TotalCoeff); /** This function decodes run_before VLC syntax as in Table 9-10. \param "stream" "Pointer to the AVCDecBitstream." \param "code" "Pointer to the output." \param "zeroLeft" "Context parameter." \return "AVCDEC_SUCCESS for success." */ AVCDec_Status ce_RunBefore(AVCDecBitstream *stream, int *code, int zeroLeft); /*----------- header.c -------------------*/ /** This function parses vui_parameters. \param "decvid" "Pointer to AVCDecObject." \param "stream" "Pointer to AVCDecBitstream." \return "AVCDEC_SUCCESS or AVCDEC_FAIL." */ AVCDec_Status vui_parameters(AVCDecObject *decvid, AVCDecBitstream *stream, AVCSeqParamSet *currSPS); AVCDec_Status sei_payload(AVCDecObject *decvid, AVCDecBitstream *stream, uint payloadType, uint payloadSize); AVCDec_Status buffering_period(AVCDecObject *decvid, AVCDecBitstream *stream); AVCDec_Status pic_timing(AVCDecObject *decvid, AVCDecBitstream *stream); AVCDec_Status recovery_point(AVCDecObject *decvid, AVCDecBitstream *stream); AVCDec_Status dec_ref_pic_marking_repetition(AVCDecObject *decvid, AVCDecBitstream *stream); AVCDec_Status motion_constrained_slice_group_set(AVCDecObject *decvid, AVCDecBitstream *stream); /** This function parses hrd_parameters. \param "decvid" "Pointer to AVCDecObject." \param "stream" "Pointer to AVCDecBitstream." \return "AVCDEC_SUCCESS or AVCDEC_FAIL." */ AVCDec_Status hrd_parameters(AVCDecObject *decvid, AVCDecBitstream *stream, AVCHRDParams *HRDParam); /** This function decodes the syntax in sequence parameter set slice and fill up the AVCSeqParamSet structure. \param "decvid" "Pointer to AVCDecObject." \param "video" "Pointer to AVCCommonObj." \param "stream" "Pointer to AVCDecBitstream." \return "AVCDEC_SUCCESS or AVCDEC_FAIL." */ AVCDec_Status DecodeSPS(AVCDecObject *decvid, AVCDecBitstream *stream); /** This function decodes the syntax in picture parameter set and fill up the AVCPicParamSet structure. \param "decvid" "Pointer to AVCDecObject." \param "video" "Pointer to AVCCommonObj." \param "stream" "Pointer to AVCDecBitstream." \return "AVCDEC_SUCCESS or AVCDEC_FAIL." */ AVCDec_Status DecodePPS(AVCDecObject *decvid, AVCCommonObj *video, AVCDecBitstream *stream); AVCDec_Status DecodeSEI(AVCDecObject *decvid, AVCDecBitstream *stream); /** This function decodes slice header, calls related functions such as reference picture list reordering, prediction weight table, decode ref marking. See FirstPartOfSliceHeader() and RestOfSliceHeader() in JM. \param "decvid" "Pointer to AVCDecObject." \param "video" "Pointer to AVCCommonObj." \param "stream" "Pointer to AVCDecBitstream." \return "AVCDEC_SUCCESS for success and AVCDEC_FAIL otherwise." */ AVCDec_Status DecodeSliceHeader(AVCDecObject *decvid, AVCCommonObj *video, AVCDecBitstream *stream); /** This function performes necessary operations to create dummy frames when there is a gap in frame_num. \param "video" "Pointer to AVCCommonObj." \return "AVCDEC_SUCCESS for success and AVCDEC_FAIL otherwise." */ AVCDec_Status fill_frame_num_gap(AVCHandle *avcHandle, AVCCommonObj *video); /** This function decodes ref_pic_list_reordering related syntax and fill up the AVCSliceHeader structure. \param "video" "Pointer to AVCCommonObj." \param "stream" "Pointer to AVCDecBitstream." \param "sliceHdr" "Pointer to AVCSliceHdr." \param "slice_type" "Value of slice_type - 5 if greater than 5." \return "AVCDEC_SUCCESS for success and AVCDEC_FAIL otherwise." */ AVCDec_Status ref_pic_list_reordering(AVCCommonObj *video, AVCDecBitstream *stream, AVCSliceHeader *sliceHdr, int slice_type); /** This function decodes dec_ref_pic_marking related syntax and fill up the AVCSliceHeader structure. \param "video" "Pointer to AVCCommonObj." \param "stream" "Pointer to AVCDecBitstream." \param "sliceHdr" "Pointer to AVCSliceHdr." \return "AVCDEC_SUCCESS for success and AVCDEC_FAIL otherwise." */ AVCDec_Status dec_ref_pic_marking(AVCCommonObj *video, AVCDecBitstream *stream, AVCSliceHeader *sliceHdr); /** This function performs POC related operation prior to decoding a picture \param "video" "Pointer to AVCCommonObj." \return "AVCDEC_SUCCESS for success and AVCDEC_FAIL otherwise." See also PostPOC() for initialization of some variables. */ AVCDec_Status DecodePOC(AVCCommonObj *video); /*------------ residual.c ------------------*/ /** This function decodes the intra pcm data and fill it in the corresponding location on the current picture. \param "video" "Pointer to AVCCommonObj." \param "stream" "Pointer to AVCDecBitstream." */ AVCDec_Status DecodeIntraPCM(AVCCommonObj *video, AVCDecBitstream *stream); /** This function performs residual syntax decoding as well as quantization and transformation of the decoded coefficients. See subclause 7.3.5.3. \param "video" "Pointer to AVCDecObject." \param "currMB" "Pointer to current macroblock." */ AVCDec_Status residual(AVCDecObject *video, AVCMacroblock *currMB); /** This function performs CAVLC syntax decoding to get the run and level information of the coefficients. \param "video" "Pointer to AVCDecObject." \param "type" "One of AVCResidualType for a particular 4x4 block." \param "bx" "Horizontal block index." \param "by" "Vertical block index." \param "level" "Pointer to array of level for output." \param "run" "Pointer to array of run for output." \param "numcoeff" "Pointer to the total number of nonzero coefficients." \return "AVCDEC_SUCCESS for success." */ AVCDec_Status residual_block_cavlc(AVCDecObject *video, int nC, int maxNumCoeff, int *level, int *run, int *numcoeff); #endif /* _AVCDEC_LIB_H_ */ ================================================ FILE: RtspCamera/jni/avc_h264/dec/src/header.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "avcdec_lib.h" #include "avcdec_bitstream.h" #include "oscl_mem.h" #include "avcdec_api.h" /** see subclause 7.4.2.1 */ AVCDec_Status DecodeSPS(AVCDecObject *decvid, AVCDecBitstream *stream) { AVCDec_Status status = AVCDEC_SUCCESS; AVCSeqParamSet *seqParam, tempSeqParam; uint temp; int i; uint profile_idc, constrained_set0_flag, constrained_set1_flag, constrained_set2_flag,constrained_set3_flag; uint level_idc, seq_parameter_set_id; void *userData = decvid->avcHandle->userData; AVCHandle *avcHandle = decvid->avcHandle; DEBUG_LOG(userData, AVC_LOGTYPE_INFO, "DecodeSPS", -1, -1); BitstreamReadBits(stream, 8, &profile_idc); BitstreamRead1Bit(stream, &constrained_set0_flag); // if (profile_idc != 66 && constrained_set0_flag != 1) // { // return AVCDEC_FAIL; // } BitstreamRead1Bit(stream, &constrained_set1_flag); BitstreamRead1Bit(stream, &constrained_set2_flag); BitstreamRead1Bit(stream, &constrained_set3_flag); BitstreamReadBits(stream, 4, &temp); BitstreamReadBits(stream, 8, &level_idc); if (level_idc > 51) { return AVCDEC_FAIL; } if (mapLev2Idx[level_idc] == 255) { return AVCDEC_FAIL; } ue_v(stream, &seq_parameter_set_id); if (seq_parameter_set_id > 31) { return AVCDEC_FAIL; } /* Allocate sequence param set for seqParams[seq_parameter_set_id]. */ if (decvid->seqParams[seq_parameter_set_id] == NULL) /* allocate seqParams[id] */ { decvid->seqParams[seq_parameter_set_id] = (AVCSeqParamSet*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCSeqParamSet), DEFAULT_ATTR); if (decvid->seqParams[seq_parameter_set_id] == NULL) { return AVCDEC_MEMORY_FAIL; } } DEBUG_LOG(userData, AVC_LOGTYPE_INFO, "done alloc seqParams", -1, -1); seqParam = &tempSeqParam; // assign to temporary structure first oscl_memset((void*) seqParam, 0, sizeof(AVCSeqParamSet)); // init to 0 seqParam->profile_idc = profile_idc; seqParam->constrained_set0_flag = constrained_set0_flag; seqParam->constrained_set1_flag = constrained_set1_flag; seqParam->constrained_set2_flag = constrained_set2_flag; seqParam->constrained_set3_flag = constrained_set3_flag; seqParam->level_idc = level_idc; seqParam->seq_parameter_set_id = seq_parameter_set_id; /* continue decoding SPS */ ue_v(stream, &(seqParam->log2_max_frame_num_minus4)); if (seqParam->log2_max_frame_num_minus4 > 12) { return AVCDEC_FAIL; } ue_v(stream, &(seqParam->pic_order_cnt_type)); DEBUG_LOG(userData, AVC_LOGTYPE_INFO, "check point 1", seqParam->log2_max_frame_num_minus4, seqParam->pic_order_cnt_type); if (seqParam->pic_order_cnt_type == 0) { ue_v(stream, &(seqParam->log2_max_pic_order_cnt_lsb_minus4)); } else if (seqParam->pic_order_cnt_type == 1) { // MC_CHECK BitstreamRead1Bit(stream, (uint*)&(seqParam->delta_pic_order_always_zero_flag)); se_v32bit(stream, &(seqParam->offset_for_non_ref_pic)); se_v32bit(stream, &(seqParam->offset_for_top_to_bottom_field)); ue_v(stream, &(seqParam->num_ref_frames_in_pic_order_cnt_cycle)); for (i = 0; i < (int)(seqParam->num_ref_frames_in_pic_order_cnt_cycle); i++) { se_v32bit(stream, &(seqParam->offset_for_ref_frame[i])); } } ue_v(stream, &(seqParam->num_ref_frames)); if (seqParam->num_ref_frames > 16) { return AVCDEC_FAIL; } DEBUG_LOG(userData, AVC_LOGTYPE_INFO, "check point 2", seqParam->num_ref_frames, -1); BitstreamRead1Bit(stream, (uint*)&(seqParam->gaps_in_frame_num_value_allowed_flag)); ue_v(stream, &(seqParam->pic_width_in_mbs_minus1)); DEBUG_LOG(userData, AVC_LOGTYPE_INFO, "picwidth", seqParam->pic_width_in_mbs_minus1, -1); ue_v(stream, &(seqParam->pic_height_in_map_units_minus1)); DEBUG_LOG(userData, AVC_LOGTYPE_INFO, "picwidth", seqParam->pic_height_in_map_units_minus1, -1); BitstreamRead1Bit(stream, (uint*)&(seqParam->frame_mbs_only_flag)); seqParam->mb_adaptive_frame_field_flag = 0; /* default value */ if (!seqParam->frame_mbs_only_flag) { BitstreamRead1Bit(stream, (uint*)&(seqParam->mb_adaptive_frame_field_flag)); } DEBUG_LOG(userData, AVC_LOGTYPE_INFO, "check point 3", seqParam->frame_mbs_only_flag, -1); BitstreamRead1Bit(stream, (uint*)&(seqParam->direct_8x8_inference_flag)); DEBUG_LOG(userData, AVC_LOGTYPE_INFO, "check point 4", seqParam->direct_8x8_inference_flag, -1); BitstreamRead1Bit(stream, (uint*)&(seqParam->frame_cropping_flag)); seqParam->frame_crop_left_offset = 0; /* default value */ seqParam->frame_crop_right_offset = 0;/* default value */ seqParam->frame_crop_top_offset = 0;/* default value */ seqParam->frame_crop_bottom_offset = 0;/* default value */ if (seqParam->frame_cropping_flag) { ue_v(stream, &(seqParam->frame_crop_left_offset)); ue_v(stream, &(seqParam->frame_crop_right_offset)); ue_v(stream, &(seqParam->frame_crop_top_offset)); ue_v(stream, &(seqParam->frame_crop_bottom_offset)); } DEBUG_LOG(userData, AVC_LOGTYPE_INFO, "check point 5", seqParam->frame_cropping_flag, -1); BitstreamRead1Bit(stream, (uint*)&(seqParam->vui_parameters_present_flag)); if (seqParam->vui_parameters_present_flag) { status = vui_parameters(decvid, stream, seqParam); if (status != AVCDEC_SUCCESS) { return status; } } /* now everything is good, copy it */ oscl_memcpy(decvid->seqParams[seq_parameter_set_id], seqParam, sizeof(AVCSeqParamSet)); decvid->lastSPS = decvid->seqParams[seq_parameter_set_id]; /* for PVAVCDecGetSeqInfo */ return status; } AVCDec_Status vui_parameters(AVCDecObject *decvid, AVCDecBitstream *stream, AVCSeqParamSet *currSPS) { uint temp; uint temp32; uint aspect_ratio_idc, overscan_appopriate_flag, video_format, video_full_range_flag; /* aspect_ratio_info_present_flag */ BitstreamRead1Bit(stream, &temp); if (temp) { BitstreamReadBits(stream, 8, &aspect_ratio_idc); if (aspect_ratio_idc == 255) { /* sar_width */ BitstreamReadBits(stream, 16, &temp); /* sar_height */ BitstreamReadBits(stream, 16, &temp); } } /* overscan_info_present */ BitstreamRead1Bit(stream, &temp); if (temp) { BitstreamRead1Bit(stream, &overscan_appopriate_flag); } /* video_signal_type_present_flag */ BitstreamRead1Bit(stream, &temp); if (temp) { BitstreamReadBits(stream, 3, &video_format); BitstreamRead1Bit(stream, &video_full_range_flag); /* colour_description_present_flag */ BitstreamRead1Bit(stream, &temp); if (temp) { /* colour_primaries */ BitstreamReadBits(stream, 8, &temp); /* transfer_characteristics */ BitstreamReadBits(stream, 8, &temp); /* matrix coefficients */ BitstreamReadBits(stream, 8, &temp); } } /* chroma_loc_info_present_flag */ BitstreamRead1Bit(stream, &temp); if (temp) { /* chroma_sample_loc_type_top_field */ ue_v(stream, &temp); /* chroma_sample_loc_type_bottom_field */ ue_v(stream, &temp); } /* timing_info_present_flag*/ BitstreamRead1Bit(stream, &temp); if (temp) { /* num_unit_in_tick*/ BitstreamReadBits(stream, 32, &temp32); /* time_scale */ BitstreamReadBits(stream, 32, &temp32); /* fixed_frame_rate_flag */ BitstreamRead1Bit(stream, &temp); } /* nal_hrd_parameters_present_flag */ BitstreamRead1Bit(stream, &temp); currSPS->vui_parameters.nal_hrd_parameters_present_flag = temp; if (temp) { hrd_parameters(decvid, stream, &(currSPS->vui_parameters.nal_hrd_parameters)); } /* vcl_hrd_parameters_present_flag*/ BitstreamRead1Bit(stream, &temp); currSPS->vui_parameters.vcl_hrd_parameters_present_flag = temp; if (temp) { hrd_parameters(decvid, stream, &(currSPS->vui_parameters.vcl_hrd_parameters)); } if (currSPS->vui_parameters.nal_hrd_parameters_present_flag || currSPS->vui_parameters.vcl_hrd_parameters_present_flag) { /* low_delay_hrd_flag */ BitstreamRead1Bit(stream, &temp); } /* pic_struct_present_flag */ BitstreamRead1Bit(stream, &temp); currSPS->vui_parameters.pic_struct_present_flag = temp; /* bitstream_restriction_flag */ BitstreamRead1Bit(stream, &temp); if (temp) { /* motion_vectors_over_pic_boundaries_flag */ BitstreamRead1Bit(stream, &temp); /* max_bytes_per_pic_denom */ ue_v(stream, &temp); /* max_bits_per_mb_denom */ ue_v(stream, &temp); /* log2_max_mv_length_horizontal */ ue_v(stream, &temp); /* log2_max_mv_length_vertical */ ue_v(stream, &temp); /* num_reorder_frames */ ue_v(stream, &temp); /* max_dec_frame_buffering */ ue_v(stream, &temp); } return AVCDEC_SUCCESS; } AVCDec_Status hrd_parameters(AVCDecObject *decvid, AVCDecBitstream *stream, AVCHRDParams *HRDParam) { OSCL_UNUSED_ARG(decvid); uint temp; uint cpb_cnt_minus1; uint i; ue_v(stream, &cpb_cnt_minus1); HRDParam->cpb_cnt_minus1 = cpb_cnt_minus1; /* bit_rate_scale */ BitstreamReadBits(stream, 4, &temp); /* cpb_size_scale */ BitstreamReadBits(stream, 4, &temp); for (i = 0; i <= cpb_cnt_minus1; i++) { /* bit_rate_value_minus1[i] */ ue_v(stream, &temp); /* cpb_size_value_minus1[i] */ ue_v(stream, &temp); /* cbr_flag[i] */ ue_v(stream, &temp); } /* initial_cpb_removal_delay_length_minus1 */ BitstreamReadBits(stream, 5, &temp); /* cpb_removal_delay_length_minus1 */ BitstreamReadBits(stream, 5, &temp); HRDParam->cpb_removal_delay_length_minus1 = temp; /* dpb_output_delay_length_minus1 */ BitstreamReadBits(stream, 5, &temp); HRDParam->dpb_output_delay_length_minus1 = temp; /* time_offset_length */ BitstreamReadBits(stream, 5, &temp); HRDParam->time_offset_length = temp; return AVCDEC_SUCCESS; } /** see subclause 7.4.2.2 */ AVCDec_Status DecodePPS(AVCDecObject *decvid, AVCCommonObj *video, AVCDecBitstream *stream) { AVCPicParamSet *picParam; AVCDec_Status status; int i, iGroup, numBits; int PicWidthInMbs, PicHeightInMapUnits, PicSizeInMapUnits; uint pic_parameter_set_id, seq_parameter_set_id; uint temp_uint; void *userData = decvid->avcHandle->userData; AVCHandle *avcHandle = decvid->avcHandle; AVCPicParamSet tempPicParam; /* add this to make sure that we don't overwrite good PPS with corrupted new PPS */ ue_v(stream, &pic_parameter_set_id); if (pic_parameter_set_id > 255) { return AVCDEC_FAIL; } ue_v(stream, &seq_parameter_set_id); if (seq_parameter_set_id > 31) { return AVCDEC_FAIL; } picParam = &tempPicParam; /* decode everything into this local structure first */ oscl_memset((void *) picParam, 0, sizeof(AVCPicParamSet)); // init the structure to 0 picParam->slice_group_id = NULL; picParam->seq_parameter_set_id = seq_parameter_set_id; picParam->pic_parameter_set_id = pic_parameter_set_id; BitstreamRead1Bit(stream, (uint*)&(picParam->entropy_coding_mode_flag)); if (picParam->entropy_coding_mode_flag) { status = AVCDEC_NOT_SUPPORTED; goto clean_up; } BitstreamRead1Bit(stream, (uint*)&(picParam->pic_order_present_flag)); ue_v(stream, &(picParam->num_slice_groups_minus1)); if (picParam->num_slice_groups_minus1 > MAX_NUM_SLICE_GROUP - 1) { status = AVCDEC_FAIL; goto clean_up; } picParam->slice_group_change_rate_minus1 = 0; /* default value */ if (picParam->num_slice_groups_minus1 > 0) { ue_v(stream, &(picParam->slice_group_map_type)); if (picParam->slice_group_map_type > 6) { status = AVCDEC_FAIL; /* out of range */ goto clean_up; } if (picParam->slice_group_map_type == 0) { for (iGroup = 0; iGroup <= (int)picParam->num_slice_groups_minus1; iGroup++) { ue_v(stream, &(picParam->run_length_minus1[iGroup])); } } else if (picParam->slice_group_map_type == 2) { // MC_CHECK <= or < for (iGroup = 0; iGroup < (int)picParam->num_slice_groups_minus1; iGroup++) { ue_v(stream, &(picParam->top_left[iGroup])); ue_v(stream, &(picParam->bottom_right[iGroup])); } } else if (picParam->slice_group_map_type == 3 || picParam->slice_group_map_type == 4 || picParam->slice_group_map_type == 5) { BitstreamRead1Bit(stream, (uint*)&(picParam->slice_group_change_direction_flag)); ue_v(stream, &(picParam->slice_group_change_rate_minus1)); } else if (picParam->slice_group_map_type == 6) { ue_v(stream, &(picParam->pic_size_in_map_units_minus1)); numBits = 0;/* ceil(log2(num_slice_groups_minus1+1)) bits */ i = picParam->num_slice_groups_minus1; while (i > 0) { numBits++; i >>= 1; } i = picParam->seq_parameter_set_id; if (decvid->seqParams[i] == NULL) { status = AVCDEC_FAIL; goto clean_up; } PicWidthInMbs = decvid->seqParams[i]->pic_width_in_mbs_minus1 + 1; PicHeightInMapUnits = decvid->seqParams[i]->pic_height_in_map_units_minus1 + 1 ; PicSizeInMapUnits = PicWidthInMbs * PicHeightInMapUnits ; /* information has to be consistent with the seq_param */ if ((int)picParam->pic_size_in_map_units_minus1 != PicSizeInMapUnits - 1) { status = AVCDEC_FAIL; goto clean_up; } if (picParam->slice_group_id) { avcHandle->CBAVC_Free(userData, (int)picParam->slice_group_id); } picParam->slice_group_id = (uint*)avcHandle->CBAVC_Malloc(userData, sizeof(uint) * PicSizeInMapUnits, DEFAULT_ATTR); if (picParam->slice_group_id == NULL) { status = AVCDEC_MEMORY_FAIL; goto clean_up; } for (i = 0; i < PicSizeInMapUnits; i++) { BitstreamReadBits(stream, numBits, &(picParam->slice_group_id[i])); } } } ue_v(stream, &(picParam->num_ref_idx_l0_active_minus1)); if (picParam->num_ref_idx_l0_active_minus1 > 31) { status = AVCDEC_FAIL; /* out of range */ goto clean_up; } ue_v(stream, &(picParam->num_ref_idx_l1_active_minus1)); if (picParam->num_ref_idx_l1_active_minus1 > 31) { status = AVCDEC_FAIL; /* out of range */ goto clean_up; } BitstreamRead1Bit(stream, (uint*)&(picParam->weighted_pred_flag)); BitstreamReadBits(stream, 2, &(picParam->weighted_bipred_idc)); if (picParam->weighted_bipred_idc > 2) { status = AVCDEC_FAIL; /* out of range */ goto clean_up; } se_v(stream, &(picParam->pic_init_qp_minus26)); if (picParam->pic_init_qp_minus26 < -26 || picParam->pic_init_qp_minus26 > 25) { status = AVCDEC_FAIL; /* out of range */ goto clean_up; } se_v(stream, &(picParam->pic_init_qs_minus26)); if (picParam->pic_init_qs_minus26 < -26 || picParam->pic_init_qs_minus26 > 25) { status = AVCDEC_FAIL; /* out of range */ goto clean_up; } se_v(stream, &(picParam->chroma_qp_index_offset)); if (picParam->chroma_qp_index_offset < -12 || picParam->chroma_qp_index_offset > 12) { status = AVCDEC_FAIL; /* out of range */ goto clean_up; } BitstreamReadBits(stream, 3, &temp_uint); picParam->deblocking_filter_control_present_flag = temp_uint >> 2; picParam->constrained_intra_pred_flag = (temp_uint >> 1) & 1; picParam->redundant_pic_cnt_present_flag = temp_uint & 1; // add this final check if (decvid->seqParams[picParam->seq_parameter_set_id] == NULL) // associated SPS is not found { status = AVCDEC_FAIL; goto clean_up; } // now that everything is OK - we may want to allocate the structure /* 2.1 if picParams[pic_param_set_id] is NULL, allocate it. */ if (decvid->picParams[pic_parameter_set_id] == NULL) { decvid->picParams[pic_parameter_set_id] = (AVCPicParamSet*)avcHandle->CBAVC_Malloc(userData, sizeof(AVCPicParamSet), DEFAULT_ATTR); if (decvid->picParams[pic_parameter_set_id] == NULL) { return AVCDEC_MEMORY_FAIL; } oscl_memset(decvid->picParams[pic_parameter_set_id], 0, sizeof(AVCPicParamSet)); } /* Everything is successful, now copy it to the global structure */ oscl_memcpy(decvid->picParams[pic_parameter_set_id], picParam, sizeof(AVCPicParamSet)); video->currPicParams = decvid->picParams[pic_parameter_set_id]; return AVCDEC_SUCCESS; clean_up: if (picParam->slice_group_id != NULL) { avcHandle->CBAVC_Free(userData, (int)picParam->slice_group_id); } return status; } /* FirstPartOfSliceHeader(); RestOfSliceHeader() */ /** see subclause 7.4.3 */ AVCDec_Status DecodeSliceHeader(AVCDecObject *decvid, AVCCommonObj *video, AVCDecBitstream *stream) { AVCSliceHeader *sliceHdr = video->sliceHdr; AVCPicParamSet *currPPS; AVCSeqParamSet *currSPS; AVCDec_Status status; uint idr_pic_id; int slice_type, temp, i; ue_v(stream, &(sliceHdr->first_mb_in_slice)); ue_v(stream, (uint*)&slice_type); if (sliceHdr->first_mb_in_slice != 0) { if ((int)sliceHdr->slice_type >= 5 && (slice_type != (int)sliceHdr->slice_type) && (slice_type != (int)sliceHdr->slice_type - 5)) { return AVCDEC_FAIL; /* slice type doesn't follow the first slice in the picture */ } } sliceHdr->slice_type = (AVCSliceType) slice_type; if (slice_type > 4) { slice_type -= 5; } if (slice_type == 1 || slice_type > 2) { return AVCDEC_NOT_SUPPORTED; } video->slice_type = (AVCSliceType) slice_type; ue_v(stream, &(sliceHdr->pic_parameter_set_id)); /* end FirstPartSliceHeader() */ /* begin RestOfSliceHeader() */ /* after getting pic_parameter_set_id, we have to load corresponding SPS and PPS */ if (sliceHdr->pic_parameter_set_id > 255) { return AVCDEC_FAIL; } if (decvid->picParams[sliceHdr->pic_parameter_set_id] == NULL) return AVCDEC_FAIL; /* PPS doesn't exist */ currPPS = video->currPicParams = decvid->picParams[sliceHdr->pic_parameter_set_id]; if (decvid->seqParams[currPPS->seq_parameter_set_id] == NULL) return AVCDEC_FAIL; /* SPS doesn't exist */ currSPS = video->currSeqParams = decvid->seqParams[currPPS->seq_parameter_set_id]; if (currPPS->seq_parameter_set_id != video->seq_parameter_set_id) { video->seq_parameter_set_id = currPPS->seq_parameter_set_id; status = (AVCDec_Status)AVCConfigureSequence(decvid->avcHandle, video, false); if (status != AVCDEC_SUCCESS) return status; video->level_idc = currSPS->level_idc; } /* derived variables from SPS */ video->MaxFrameNum = 1 << (currSPS->log2_max_frame_num_minus4 + 4); // MC_OPTIMIZE video->PicWidthInMbs = currSPS->pic_width_in_mbs_minus1 + 1; video->PicWidthInSamplesL = video->PicWidthInMbs * 16 ; video->PicWidthInSamplesC = video->PicWidthInMbs * 8 ; video->PicHeightInMapUnits = currSPS->pic_height_in_map_units_minus1 + 1 ; video->PicSizeInMapUnits = video->PicWidthInMbs * video->PicHeightInMapUnits ; video->FrameHeightInMbs = (2 - currSPS->frame_mbs_only_flag) * video->PicHeightInMapUnits ; /* derived from PPS */ video->SliceGroupChangeRate = currPPS->slice_group_change_rate_minus1 + 1; /* then we can continue decoding slice header */ BitstreamReadBits(stream, currSPS->log2_max_frame_num_minus4 + 4, &(sliceHdr->frame_num)); if (video->currFS == NULL && sliceHdr->frame_num != 0) { video->prevFrameNum = video->PrevRefFrameNum = sliceHdr->frame_num - 1; } if (!currSPS->frame_mbs_only_flag) { BitstreamRead1Bit(stream, &(sliceHdr->field_pic_flag)); if (sliceHdr->field_pic_flag) { return AVCDEC_NOT_SUPPORTED; } } /* derived variables from slice header*/ video->PicHeightInMbs = video->FrameHeightInMbs; video->PicHeightInSamplesL = video->PicHeightInMbs * 16; video->PicHeightInSamplesC = video->PicHeightInMbs * 8; video->PicSizeInMbs = video->PicWidthInMbs * video->PicHeightInMbs; if (sliceHdr->first_mb_in_slice >= video->PicSizeInMbs) { return AVCDEC_FAIL; } video->MaxPicNum = video->MaxFrameNum; video->CurrPicNum = sliceHdr->frame_num; if (video->nal_unit_type == AVC_NALTYPE_IDR) { if (sliceHdr->frame_num != 0) { return AVCDEC_FAIL; } ue_v(stream, &idr_pic_id); } sliceHdr->delta_pic_order_cnt_bottom = 0; /* default value */ sliceHdr->delta_pic_order_cnt[0] = 0; /* default value */ sliceHdr->delta_pic_order_cnt[1] = 0; /* default value */ if (currSPS->pic_order_cnt_type == 0) { BitstreamReadBits(stream, currSPS->log2_max_pic_order_cnt_lsb_minus4 + 4, &(sliceHdr->pic_order_cnt_lsb)); video->MaxPicOrderCntLsb = 1 << (currSPS->log2_max_pic_order_cnt_lsb_minus4 + 4); if (sliceHdr->pic_order_cnt_lsb > video->MaxPicOrderCntLsb - 1) return AVCDEC_FAIL; /* out of range */ if (currPPS->pic_order_present_flag) { se_v32bit(stream, &(sliceHdr->delta_pic_order_cnt_bottom)); } } if (currSPS->pic_order_cnt_type == 1 && !currSPS->delta_pic_order_always_zero_flag) { se_v32bit(stream, &(sliceHdr->delta_pic_order_cnt[0])); if (currPPS->pic_order_present_flag) { se_v32bit(stream, &(sliceHdr->delta_pic_order_cnt[1])); } } sliceHdr->redundant_pic_cnt = 0; /* default value */ if (currPPS->redundant_pic_cnt_present_flag) { // MC_CHECK ue_v(stream, &(sliceHdr->redundant_pic_cnt)); if (sliceHdr->redundant_pic_cnt > 127) /* out of range */ return AVCDEC_FAIL; if (sliceHdr->redundant_pic_cnt > 0) /* redundant picture */ return AVCDEC_NOT_SUPPORTED; /* not supported */ } sliceHdr->num_ref_idx_l0_active_minus1 = currPPS->num_ref_idx_l0_active_minus1; sliceHdr->num_ref_idx_l1_active_minus1 = currPPS->num_ref_idx_l1_active_minus1; if (slice_type == AVC_P_SLICE) { BitstreamRead1Bit(stream, &(sliceHdr->num_ref_idx_active_override_flag)); if (sliceHdr->num_ref_idx_active_override_flag) { ue_v(stream, &(sliceHdr->num_ref_idx_l0_active_minus1)); } } // check bound if (sliceHdr->num_ref_idx_l0_active_minus1 > 15) // ||sliceHdr->num_ref_idx_l1_active_minus1 > 31) { return AVCDEC_FAIL; /* not allowed */ } /* if MbaffFrameFlag =1, max value of index is num_ref_idx_l0_active_minus1 for frame MBs and 2*sliceHdr->num_ref_idx_l0_active_minus1 + 1 for field MBs */ /* ref_pic_list_reordering() */ status = ref_pic_list_reordering(video, stream, sliceHdr, slice_type); if (status != AVCDEC_SUCCESS) { return status; } if (video->nal_ref_idc != 0) { dec_ref_pic_marking(video, stream, sliceHdr); } se_v(stream, &(sliceHdr->slice_qp_delta)); video->QPy = 26 + currPPS->pic_init_qp_minus26 + sliceHdr->slice_qp_delta; if (video->QPy > 51 || video->QPy < 0) { video->QPy = AVC_CLIP3(0, 51, video->QPy); // return AVCDEC_FAIL; } video->QPc = mapQPi2QPc[AVC_CLIP3(0, 51, video->QPy + video->currPicParams->chroma_qp_index_offset)]; video->QPy_div_6 = (video->QPy * 43) >> 8; video->QPy_mod_6 = video->QPy - 6 * video->QPy_div_6; video->QPc_div_6 = (video->QPc * 43) >> 8; video->QPc_mod_6 = video->QPc - 6 * video->QPc_div_6; sliceHdr->slice_alpha_c0_offset_div2 = 0; sliceHdr->slice_beta_offset_div_2 = 0; sliceHdr->disable_deblocking_filter_idc = 0; video->FilterOffsetA = video->FilterOffsetB = 0; if (currPPS->deblocking_filter_control_present_flag) { ue_v(stream, &(sliceHdr->disable_deblocking_filter_idc)); if (sliceHdr->disable_deblocking_filter_idc > 2) { return AVCDEC_FAIL; /* out of range */ } if (sliceHdr->disable_deblocking_filter_idc != 1) { se_v(stream, &(sliceHdr->slice_alpha_c0_offset_div2)); if (sliceHdr->slice_alpha_c0_offset_div2 < -6 || sliceHdr->slice_alpha_c0_offset_div2 > 6) { return AVCDEC_FAIL; } video->FilterOffsetA = sliceHdr->slice_alpha_c0_offset_div2 << 1; se_v(stream, &(sliceHdr->slice_beta_offset_div_2)); if (sliceHdr->slice_beta_offset_div_2 < -6 || sliceHdr->slice_beta_offset_div_2 > 6) { return AVCDEC_FAIL; } video->FilterOffsetB = sliceHdr->slice_beta_offset_div_2 << 1; } } if (currPPS->num_slice_groups_minus1 > 0 && currPPS->slice_group_map_type >= 3 && currPPS->slice_group_map_type <= 5) { /* Ceil(Log2(PicSizeInMapUnits/(float)SliceGroupChangeRate + 1)) */ temp = video->PicSizeInMapUnits / video->SliceGroupChangeRate; if (video->PicSizeInMapUnits % video->SliceGroupChangeRate) { temp++; } i = 0; temp++; while (temp) { temp >>= 1; i++; } BitstreamReadBits(stream, i, &(sliceHdr->slice_group_change_cycle)); video->MapUnitsInSliceGroup0 = AVC_MIN(sliceHdr->slice_group_change_cycle * video->SliceGroupChangeRate, video->PicSizeInMapUnits); } return AVCDEC_SUCCESS; } AVCDec_Status fill_frame_num_gap(AVCHandle *avcHandle, AVCCommonObj *video) { AVCDec_Status status; int CurrFrameNum; int UnusedShortTermFrameNum; int tmp1 = video->sliceHdr->delta_pic_order_cnt[0]; int tmp2 = video->sliceHdr->delta_pic_order_cnt[1]; int tmp3 = video->CurrPicNum; int tmp4 = video->sliceHdr->adaptive_ref_pic_marking_mode_flag; UnusedShortTermFrameNum = (video->prevFrameNum + 1) % video->MaxFrameNum; CurrFrameNum = video->sliceHdr->frame_num; video->sliceHdr->delta_pic_order_cnt[0] = 0; video->sliceHdr->delta_pic_order_cnt[1] = 0; while (CurrFrameNum != UnusedShortTermFrameNum) { video->CurrPicNum = UnusedShortTermFrameNum; video->sliceHdr->frame_num = UnusedShortTermFrameNum; status = (AVCDec_Status)DPBInitBuffer(avcHandle, video); if (status != AVCDEC_SUCCESS) /* no buffer available */ { return status; } DecodePOC(video); DPBInitPic(video, UnusedShortTermFrameNum); video->currFS->PicOrderCnt = video->PicOrderCnt; video->currFS->FrameNum = video->sliceHdr->frame_num; /* initialize everything to zero */ video->currFS->IsOutputted = 0x01; video->currFS->IsReference = 3; video->currFS->IsLongTerm = 0; video->currFS->frame.isReference = TRUE; video->currFS->frame.isLongTerm = FALSE; video->sliceHdr->adaptive_ref_pic_marking_mode_flag = 0; status = (AVCDec_Status)StorePictureInDPB(avcHandle, video); // MC_CHECK check the return status if (status != AVCDEC_SUCCESS) { return AVCDEC_FAIL; } video->prevFrameNum = UnusedShortTermFrameNum; UnusedShortTermFrameNum = (UnusedShortTermFrameNum + 1) % video->MaxFrameNum; } video->sliceHdr->frame_num = CurrFrameNum; video->CurrPicNum = tmp3; video->sliceHdr->delta_pic_order_cnt[0] = tmp1; video->sliceHdr->delta_pic_order_cnt[1] = tmp2; video->sliceHdr->adaptive_ref_pic_marking_mode_flag = tmp4; return AVCDEC_SUCCESS; } /** see subclause 7.4.3.1 */ AVCDec_Status ref_pic_list_reordering(AVCCommonObj *video, AVCDecBitstream *stream, AVCSliceHeader *sliceHdr, int slice_type) { int i; if (slice_type != AVC_I_SLICE) { BitstreamRead1Bit(stream, &(sliceHdr->ref_pic_list_reordering_flag_l0)); if (sliceHdr->ref_pic_list_reordering_flag_l0) { i = 0; do { ue_v(stream, &(sliceHdr->reordering_of_pic_nums_idc_l0[i])); if (sliceHdr->reordering_of_pic_nums_idc_l0[i] == 0 || sliceHdr->reordering_of_pic_nums_idc_l0[i] == 1) { ue_v(stream, &(sliceHdr->abs_diff_pic_num_minus1_l0[i])); if (sliceHdr->reordering_of_pic_nums_idc_l0[i] == 0 && sliceHdr->abs_diff_pic_num_minus1_l0[i] > video->MaxPicNum / 2 - 1) { return AVCDEC_FAIL; /* out of range */ } if (sliceHdr->reordering_of_pic_nums_idc_l0[i] == 1 && sliceHdr->abs_diff_pic_num_minus1_l0[i] > video->MaxPicNum / 2 - 2) { return AVCDEC_FAIL; /* out of range */ } } else if (sliceHdr->reordering_of_pic_nums_idc_l0[i] == 2) { ue_v(stream, &(sliceHdr->long_term_pic_num_l0[i])); } i++; } while (sliceHdr->reordering_of_pic_nums_idc_l0[i-1] != 3 && i <= (int)sliceHdr->num_ref_idx_l0_active_minus1 + 1) ; if (sliceHdr->reordering_of_pic_nums_idc_l0[i-1] != 3) // only way to exit the while loop { return AVCDEC_FAIL; } } } return AVCDEC_SUCCESS; } /** see subclause 7.4.3.3 */ AVCDec_Status dec_ref_pic_marking(AVCCommonObj *video, AVCDecBitstream *stream, AVCSliceHeader *sliceHdr) { int i; if (video->nal_unit_type == AVC_NALTYPE_IDR) { BitstreamRead1Bit(stream, &(sliceHdr->no_output_of_prior_pics_flag)); BitstreamRead1Bit(stream, &(sliceHdr->long_term_reference_flag)); if (sliceHdr->long_term_reference_flag == 0) /* used for short-term */ { video->MaxLongTermFrameIdx = -1; /* no long-term frame indx */ } else /* used for long-term */ { video->MaxLongTermFrameIdx = 0; video->LongTermFrameIdx = 0; } } else { BitstreamRead1Bit(stream, &(sliceHdr->adaptive_ref_pic_marking_mode_flag)); if (sliceHdr->adaptive_ref_pic_marking_mode_flag) { i = 0; do { ue_v(stream, &(sliceHdr->memory_management_control_operation[i])); if (sliceHdr->memory_management_control_operation[i] == 1 || sliceHdr->memory_management_control_operation[i] == 3) { ue_v(stream, &(sliceHdr->difference_of_pic_nums_minus1[i])); } if (sliceHdr->memory_management_control_operation[i] == 2) { ue_v(stream, &(sliceHdr->long_term_pic_num[i])); } if (sliceHdr->memory_management_control_operation[i] == 3 || sliceHdr->memory_management_control_operation[i] == 6) { ue_v(stream, &(sliceHdr->long_term_frame_idx[i])); } if (sliceHdr->memory_management_control_operation[i] == 4) { ue_v(stream, &(sliceHdr->max_long_term_frame_idx_plus1[i])); } i++; } while (sliceHdr->memory_management_control_operation[i-1] != 0 && i < MAX_DEC_REF_PIC_MARKING); if (i >= MAX_DEC_REF_PIC_MARKING) { return AVCDEC_FAIL; /* we're screwed!!, not enough memory */ } } } return AVCDEC_SUCCESS; } /* see subclause 8.2.1 Decoding process for picture order count. */ AVCDec_Status DecodePOC(AVCCommonObj *video) { AVCSeqParamSet *currSPS = video->currSeqParams; AVCSliceHeader *sliceHdr = video->sliceHdr; int i; switch (currSPS->pic_order_cnt_type) { case 0: /* POC MODE 0 , subclause 8.2.1.1 */ if (video->nal_unit_type == AVC_NALTYPE_IDR) { video->prevPicOrderCntMsb = 0; video->prevPicOrderCntLsb = 0; } /* Calculate the MSBs of current picture */ if (sliceHdr->pic_order_cnt_lsb < video->prevPicOrderCntLsb && (video->prevPicOrderCntLsb - sliceHdr->pic_order_cnt_lsb) >= (video->MaxPicOrderCntLsb / 2)) video->PicOrderCntMsb = video->prevPicOrderCntMsb + video->MaxPicOrderCntLsb; else if (sliceHdr->pic_order_cnt_lsb > video->prevPicOrderCntLsb && (sliceHdr->pic_order_cnt_lsb - video->prevPicOrderCntLsb) > (video->MaxPicOrderCntLsb / 2)) video->PicOrderCntMsb = video->prevPicOrderCntMsb - video->MaxPicOrderCntLsb; else video->PicOrderCntMsb = video->prevPicOrderCntMsb; /* JVT-I010 page 81 is different from JM7.3 */ video->PicOrderCnt = video->TopFieldOrderCnt = video->PicOrderCntMsb + sliceHdr->pic_order_cnt_lsb; video->BottomFieldOrderCnt = video->TopFieldOrderCnt + sliceHdr->delta_pic_order_cnt_bottom; break; case 1: /* POC MODE 1, subclause 8.2.1.2 */ /* calculate FrameNumOffset */ if (video->nal_unit_type == AVC_NALTYPE_IDR) { video->prevFrameNumOffset = 0; video->FrameNumOffset = 0; } else if (video->prevFrameNum > sliceHdr->frame_num) { video->FrameNumOffset = video->prevFrameNumOffset + video->MaxFrameNum; } else { video->FrameNumOffset = video->prevFrameNumOffset; } /* calculate absFrameNum */ if (currSPS->num_ref_frames_in_pic_order_cnt_cycle) { video->absFrameNum = video->FrameNumOffset + sliceHdr->frame_num; } else { video->absFrameNum = 0; } if (video->absFrameNum > 0 && video->nal_ref_idc == 0) { video->absFrameNum--; } /* derive picOrderCntCycleCnt and frameNumInPicOrderCntCycle */ if (video->absFrameNum > 0) { video->picOrderCntCycleCnt = (video->absFrameNum - 1) / currSPS->num_ref_frames_in_pic_order_cnt_cycle; video->frameNumInPicOrderCntCycle = (video->absFrameNum - 1) % currSPS->num_ref_frames_in_pic_order_cnt_cycle; } /* derive expectedDeltaPerPicOrderCntCycle */ video->expectedDeltaPerPicOrderCntCycle = 0; for (i = 0; i < (int)currSPS->num_ref_frames_in_pic_order_cnt_cycle; i++) { video->expectedDeltaPerPicOrderCntCycle += currSPS->offset_for_ref_frame[i]; } /* derive expectedPicOrderCnt */ if (video->absFrameNum) { video->expectedPicOrderCnt = video->picOrderCntCycleCnt * video->expectedDeltaPerPicOrderCntCycle; for (i = 0; i <= video->frameNumInPicOrderCntCycle; i++) { video->expectedPicOrderCnt += currSPS->offset_for_ref_frame[i]; } } else { video->expectedPicOrderCnt = 0; } if (video->nal_ref_idc == 0) { video->expectedPicOrderCnt += currSPS->offset_for_non_ref_pic; } /* derive TopFieldOrderCnt and BottomFieldOrderCnt */ video->TopFieldOrderCnt = video->expectedPicOrderCnt + sliceHdr->delta_pic_order_cnt[0]; video->BottomFieldOrderCnt = video->TopFieldOrderCnt + currSPS->offset_for_top_to_bottom_field + sliceHdr->delta_pic_order_cnt[1]; video->PicOrderCnt = AVC_MIN(video->TopFieldOrderCnt, video->BottomFieldOrderCnt); break; case 2: /* POC MODE 2, subclause 8.2.1.3 */ if (video->nal_unit_type == AVC_NALTYPE_IDR) { video->FrameNumOffset = 0; } else if (video->prevFrameNum > sliceHdr->frame_num) { video->FrameNumOffset = video->prevFrameNumOffset + video->MaxFrameNum; } else { video->FrameNumOffset = video->prevFrameNumOffset; } /* derive tempPicOrderCnt, we just use PicOrderCnt */ if (video->nal_unit_type == AVC_NALTYPE_IDR) { video->PicOrderCnt = 0; } else if (video->nal_ref_idc == 0) { video->PicOrderCnt = 2 * (video->FrameNumOffset + sliceHdr->frame_num) - 1; } else { video->PicOrderCnt = 2 * (video->FrameNumOffset + sliceHdr->frame_num); } video->TopFieldOrderCnt = video->BottomFieldOrderCnt = video->PicOrderCnt; break; default: return AVCDEC_FAIL; } return AVCDEC_SUCCESS; } AVCDec_Status DecodeSEI(AVCDecObject *decvid, AVCDecBitstream *stream) { OSCL_UNUSED_ARG(decvid); OSCL_UNUSED_ARG(stream); return AVCDEC_SUCCESS; } AVCDec_Status sei_payload(AVCDecObject *decvid, AVCDecBitstream *stream, uint payloadType, uint payloadSize) { AVCDec_Status status = AVCDEC_SUCCESS; uint i; switch (payloadType) { case 0: /* buffering period SEI */ status = buffering_period(decvid, stream); break; case 1: /* picture timing SEI */ status = pic_timing(decvid, stream); break; case 2: case 3: case 4: case 5: case 8: case 9: case 10: case 11: case 12: case 13: case 14: case 15: case 16: case 17: for (i = 0; i < payloadSize; i++) { BitstreamFlushBits(stream, 8); } break; case 6: /* recovery point SEI */ status = recovery_point(decvid, stream); break; case 7: /* decoded reference picture marking repetition SEI */ status = dec_ref_pic_marking_repetition(decvid, stream); break; case 18: /* motion-constrained slice group set SEI */ status = motion_constrained_slice_group_set(decvid, stream); break; default: /* reserved_sei_message */ for (i = 0; i < payloadSize; i++) { BitstreamFlushBits(stream, 8); } break; } BitstreamByteAlign(stream); return status; } AVCDec_Status buffering_period(AVCDecObject *decvid, AVCDecBitstream *stream) { AVCSeqParamSet *currSPS; uint seq_parameter_set_id; uint temp; uint i; ue_v(stream, &seq_parameter_set_id); if (seq_parameter_set_id > 31) { return AVCDEC_FAIL; } // decvid->common->seq_parameter_set_id = seq_parameter_set_id; currSPS = decvid->seqParams[seq_parameter_set_id]; if (currSPS->vui_parameters.nal_hrd_parameters_present_flag) { for (i = 0; i <= currSPS->vui_parameters.nal_hrd_parameters.cpb_cnt_minus1; i++) { /* initial_cpb_removal_delay[i] */ BitstreamReadBits(stream, currSPS->vui_parameters.nal_hrd_parameters.cpb_removal_delay_length_minus1 + 1, &temp); /*initial _cpb_removal_delay_offset[i] */ BitstreamReadBits(stream, currSPS->vui_parameters.nal_hrd_parameters.cpb_removal_delay_length_minus1 + 1, &temp); } } if (currSPS->vui_parameters.vcl_hrd_parameters_present_flag) { for (i = 0; i <= currSPS->vui_parameters.vcl_hrd_parameters.cpb_cnt_minus1; i++) { /* initial_cpb_removal_delay[i] */ BitstreamReadBits(stream, currSPS->vui_parameters.vcl_hrd_parameters.cpb_removal_delay_length_minus1 + 1, &temp); /*initial _cpb_removal_delay_offset[i] */ BitstreamReadBits(stream, currSPS->vui_parameters.vcl_hrd_parameters.cpb_removal_delay_length_minus1 + 1, &temp); } } return AVCDEC_SUCCESS; } AVCDec_Status pic_timing(AVCDecObject *decvid, AVCDecBitstream *stream) { AVCSeqParamSet *currSPS; uint temp, NumClockTs = 0, time_offset_length = 24, full_timestamp_flag; uint i; currSPS = decvid->seqParams[decvid->common->seq_parameter_set_id]; if (currSPS->vui_parameters.nal_hrd_parameters_present_flag) { BitstreamReadBits(stream, currSPS->vui_parameters.nal_hrd_parameters.cpb_removal_delay_length_minus1 + 1, &temp); BitstreamReadBits(stream, currSPS->vui_parameters.nal_hrd_parameters.dpb_output_delay_length_minus1 + 1, &temp); time_offset_length = currSPS->vui_parameters.nal_hrd_parameters.time_offset_length; } else if (currSPS->vui_parameters.vcl_hrd_parameters_present_flag) { BitstreamReadBits(stream, currSPS->vui_parameters.vcl_hrd_parameters.cpb_removal_delay_length_minus1 + 1, &temp); BitstreamReadBits(stream, currSPS->vui_parameters.vcl_hrd_parameters.dpb_output_delay_length_minus1 + 1, &temp); time_offset_length = currSPS->vui_parameters.vcl_hrd_parameters.time_offset_length; } if (currSPS->vui_parameters.pic_struct_present_flag) { /* pic_struct */ BitstreamReadBits(stream, 4, &temp); switch (temp) { case 0: case 1: case 2: NumClockTs = 1; break; case 3: case 4: case 7: NumClockTs = 2; break; case 5: case 6: case 8: NumClockTs = 3; break; default: NumClockTs = 0; break; } for (i = 0; i < NumClockTs; i++) { /* clock_timestamp_flag[i] */ BitstreamRead1Bit(stream, &temp); if (temp) { /* ct_type */ BitstreamReadBits(stream, 2, &temp); /* nuit_field_based_flag */ BitstreamRead1Bit(stream, &temp); /* counting_type */ BitstreamReadBits(stream, 5, &temp); /* full_timestamp_flag */ BitstreamRead1Bit(stream, &temp); full_timestamp_flag = temp; /* discontinuity_flag */ BitstreamRead1Bit(stream, &temp); /* cnt_dropped_flag */ BitstreamRead1Bit(stream, &temp); /* n_frames */ BitstreamReadBits(stream, 8, &temp); if (full_timestamp_flag) { /* seconds_value */ BitstreamReadBits(stream, 6, &temp); /* minutes_value */ BitstreamReadBits(stream, 6, &temp); /* hours_value */ BitstreamReadBits(stream, 5, &temp); } else { /* seconds_flag */ BitstreamRead1Bit(stream, &temp); if (temp) { /* seconds_value */ BitstreamReadBits(stream, 6, &temp); /* minutes_flag */ BitstreamRead1Bit(stream, &temp); if (temp) { /* minutes_value */ BitstreamReadBits(stream, 6, &temp); /* hourss_flag */ BitstreamRead1Bit(stream, &temp); if (temp) { /* hours_value */ BitstreamReadBits(stream, 5, &temp); } } } } if (time_offset_length) { /* time_offset */ BitstreamReadBits(stream, time_offset_length, &temp); } else { /* time_offset */ temp = 0; } } } } return AVCDEC_SUCCESS; } AVCDec_Status recovery_point(AVCDecObject *decvid, AVCDecBitstream *stream) { OSCL_UNUSED_ARG(decvid); uint temp; /* recover_frame_cnt */ ue_v(stream, &temp); /* exact_match_flag */ BitstreamRead1Bit(stream, &temp); /* broken_link_flag */ BitstreamRead1Bit(stream, &temp); /* changing slic_group_idc */ BitstreamReadBits(stream, 2, &temp); return AVCDEC_SUCCESS; } AVCDec_Status dec_ref_pic_marking_repetition(AVCDecObject *decvid, AVCDecBitstream *stream) { AVCSeqParamSet *currSPS; uint temp; currSPS = decvid->seqParams[decvid->common->seq_parameter_set_id]; /* original_idr_flag */ BitstreamRead1Bit(stream, &temp); /* original_frame_num */ ue_v(stream, &temp); if (currSPS->frame_mbs_only_flag == 0) { /* original_field_pic_flag */ BitstreamRead1Bit(stream, &temp); if (temp) { /* original_bottom_field_flag */ BitstreamRead1Bit(stream, &temp); } } /* dec_ref_pic_marking(video,stream,sliceHdr); */ return AVCDEC_SUCCESS; } AVCDec_Status motion_constrained_slice_group_set(AVCDecObject *decvid, AVCDecBitstream *stream) { OSCL_UNUSED_ARG(decvid); uint temp, i, numBits; /* num_slice_groups_in_set_minus1 */ ue_v(stream, &temp); numBits = 0;/* ceil(log2(num_slice_groups_minus1+1)) bits */ i = temp; while (i > 0) { numBits++; i >>= 1; } for (i = 0; i <= temp; i++) { /* slice_group_id */ BitstreamReadBits(stream, numBits, &temp); } /* exact_sample_value_match_flag */ BitstreamRead1Bit(stream, &temp); /* pan_scan_rect_flag */ BitstreamRead1Bit(stream, &temp); if (temp) { /* pan_scan_rect_id */ ue_v(stream, &temp); } return AVCDEC_SUCCESS; } ================================================ FILE: RtspCamera/jni/avc_h264/dec/src/itrans.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "avclib_common.h" #include "oscl_mem.h" /* input are in the first 16 elements of block, output must be in the location specified in Figure 8-6. */ /* subclause 8.5.6 */ void Intra16DCTrans(int16 *block, int Qq, int Rq) { int m0, m1, m2, m3; int j, offset; int16 *inout; int scale = dequant_coefres[Rq][0]; inout = block; for (j = 0; j < 4; j++) { m0 = inout[0] + inout[4]; m1 = inout[0] - inout[4]; m2 = inout[8] + inout[12]; m3 = inout[8] - inout[12]; inout[0] = m0 + m2; inout[4] = m0 - m2; inout[8] = m1 - m3; inout[12] = m1 + m3; inout += 64; } inout = block; if (Qq >= 2) /* this way should be faster than JM */ { /* they use (((m4*scale)<<(QPy/6))+2)>>2 for both cases. */ Qq -= 2; for (j = 0; j < 4; j++) { m0 = inout[0] + inout[64]; m1 = inout[0] - inout[64]; m2 = inout[128] + inout[192]; m3 = inout[128] - inout[192]; inout[0] = ((m0 + m2) * scale) << Qq; inout[64] = ((m0 - m2) * scale) << Qq; inout[128] = ((m1 - m3) * scale) << Qq; inout[192] = ((m1 + m3) * scale) << Qq; inout += 4; } } else { Qq = 2 - Qq; offset = 1 << (Qq - 1); for (j = 0; j < 4; j++) { m0 = inout[0] + inout[64]; m1 = inout[0] - inout[64]; m2 = inout[128] + inout[192]; m3 = inout[128] - inout[192]; inout[0] = (((m0 + m2) * scale + offset) >> Qq); inout[64] = (((m0 - m2) * scale + offset) >> Qq); inout[128] = (((m1 - m3) * scale + offset) >> Qq); inout[192] = (((m1 + m3) * scale + offset) >> Qq); inout += 4; } } return ; } /* see subclase 8.5.8 */ void itrans(int16 *block, uint8 *pred, uint8 *cur, int width) { int e0, e1, e2, e3; /* note, at every step of the calculation, these values */ /* shall never exceed 16bit sign value, but we don't check */ int i; /* to save the cycles. */ int16 *inout; inout = block; for (i = 4; i > 0; i--) { e0 = inout[0] + inout[2]; e1 = inout[0] - inout[2]; e2 = (inout[1] >> 1) - inout[3]; e3 = inout[1] + (inout[3] >> 1); inout[0] = e0 + e3; inout[1] = e1 + e2; inout[2] = e1 - e2; inout[3] = e0 - e3; inout += 16; } for (i = 4; i > 0; i--) { e0 = block[0] + block[32]; e1 = block[0] - block[32]; e2 = (block[16] >> 1) - block[48]; e3 = block[16] + (block[48] >> 1); e0 += e3; e3 = (e0 - (e3 << 1)); /* e0-e3 */ e1 += e2; e2 = (e1 - (e2 << 1)); /* e1-e2 */ e0 += 32; e1 += 32; e2 += 32; e3 += 32; #ifdef USE_PRED_BLOCK e0 = pred[0] + (e0 >> 6); if ((uint)e0 > 0xFF) e0 = 0xFF & (~(e0 >> 31)); /* clip */ e1 = pred[20] + (e1 >> 6); if ((uint)e1 > 0xFF) e1 = 0xFF & (~(e1 >> 31)); /* clip */ e2 = pred[40] + (e2 >> 6); if ((uint)e2 > 0xFF) e2 = 0xFF & (~(e2 >> 31)); /* clip */ e3 = pred[60] + (e3 >> 6); if ((uint)e3 > 0xFF) e3 = 0xFF & (~(e3 >> 31)); /* clip */ *cur = e0; *(cur += width) = e1; *(cur += width) = e2; cur[width] = e3; cur -= (width << 1); cur++; pred++; #else OSCL_UNUSED_ARG(pred); e0 = *cur + (e0 >> 6); if ((uint)e0 > 0xFF) e0 = 0xFF & (~(e0 >> 31)); /* clip */ *cur = e0; e1 = *(cur += width) + (e1 >> 6); if ((uint)e1 > 0xFF) e1 = 0xFF & (~(e1 >> 31)); /* clip */ *cur = e1; e2 = *(cur += width) + (e2 >> 6); if ((uint)e2 > 0xFF) e2 = 0xFF & (~(e2 >> 31)); /* clip */ *cur = e2; e3 = cur[width] + (e3 >> 6); if ((uint)e3 > 0xFF) e3 = 0xFF & (~(e3 >> 31)); /* clip */ cur[width] = e3; cur -= (width << 1); cur++; #endif block++; } return ; } /* see subclase 8.5.8 */ void ictrans(int16 *block, uint8 *pred, uint8 *cur, int width) { int e0, e1, e2, e3; /* note, at every step of the calculation, these values */ /* shall never exceed 16bit sign value, but we don't check */ int i; /* to save the cycles. */ int16 *inout; inout = block; for (i = 4; i > 0; i--) { e0 = inout[0] + inout[2]; e1 = inout[0] - inout[2]; e2 = (inout[1] >> 1) - inout[3]; e3 = inout[1] + (inout[3] >> 1); inout[0] = e0 + e3; inout[1] = e1 + e2; inout[2] = e1 - e2; inout[3] = e0 - e3; inout += 16; } for (i = 4; i > 0; i--) { e0 = block[0] + block[32]; e1 = block[0] - block[32]; e2 = (block[16] >> 1) - block[48]; e3 = block[16] + (block[48] >> 1); e0 += e3; e3 = (e0 - (e3 << 1)); /* e0-e3 */ e1 += e2; e2 = (e1 - (e2 << 1)); /* e1-e2 */ e0 += 32; e1 += 32; e2 += 32; e3 += 32; #ifdef USE_PRED_BLOCK e0 = pred[0] + (e0 >> 6); if ((uint)e0 > 0xFF) e0 = 0xFF & (~(e0 >> 31)); /* clip */ e1 = pred[12] + (e1 >> 6); if ((uint)e1 > 0xFF) e1 = 0xFF & (~(e1 >> 31)); /* clip */ e2 = pred[24] + (e2 >> 6); if ((uint)e2 > 0xFF) e2 = 0xFF & (~(e2 >> 31)); /* clip */ e3 = pred[36] + (e3 >> 6); if ((uint)e3 > 0xFF) e3 = 0xFF & (~(e3 >> 31)); /* clip */ *cur = e0; *(cur += width) = e1; *(cur += width) = e2; cur[width] = e3; cur -= (width << 1); cur++; pred++; #else OSCL_UNUSED_ARG(pred); e0 = *cur + (e0 >> 6); if ((uint)e0 > 0xFF) e0 = 0xFF & (~(e0 >> 31)); /* clip */ *cur = e0; e1 = *(cur += width) + (e1 >> 6); if ((uint)e1 > 0xFF) e1 = 0xFF & (~(e1 >> 31)); /* clip */ *cur = e1; e2 = *(cur += width) + (e2 >> 6); if ((uint)e2 > 0xFF) e2 = 0xFF & (~(e2 >> 31)); /* clip */ *cur = e2; e3 = cur[width] + (e3 >> 6); if ((uint)e3 > 0xFF) e3 = 0xFF & (~(e3 >> 31)); /* clip */ cur[width] = e3; cur -= (width << 1); cur++; #endif block++; } return ; } /* see subclause 8.5.7 */ void ChromaDCTrans(int16 *block, int Qq, int Rq) { int c00, c01, c10, c11; int f0, f1, f2, f3; int scale = dequant_coefres[Rq][0]; c00 = block[0] + block[4]; c01 = block[0] - block[4]; c10 = block[64] + block[68]; c11 = block[64] - block[68]; f0 = c00 + c10; f1 = c01 + c11; f2 = c00 - c10; f3 = c01 - c11; if (Qq >= 1) { Qq -= 1; block[0] = (f0 * scale) << Qq; block[4] = (f1 * scale) << Qq; block[64] = (f2 * scale) << Qq; block[68] = (f3 * scale) << Qq; } else { block[0] = (f0 * scale) >> 1; block[4] = (f1 * scale) >> 1; block[64] = (f2 * scale) >> 1; block[68] = (f3 * scale) >> 1; } return ; } void copy_block(uint8 *pred, uint8 *cur, int width, int pred_pitch) { uint32 temp; temp = *((uint32*)pred); pred += pred_pitch; *((uint32*)cur) = temp; cur += width; temp = *((uint32*)pred); pred += pred_pitch; *((uint32*)cur) = temp; cur += width; temp = *((uint32*)pred); pred += pred_pitch; *((uint32*)cur) = temp; cur += width; temp = *((uint32*)pred); *((uint32*)cur) = temp; return ; } ================================================ FILE: RtspCamera/jni/avc_h264/dec/src/pred_inter.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "avcdec_lib.h" #include "oscl_mem.h" #define CLIP_RESULT(x) if((uint)x > 0xFF){ \ x = 0xFF & (~(x>>31));} /* (blkwidth << 2) + (dy << 1) + dx */ static void (*const ChromaMC_SIMD[8])(uint8 *, int , int , int , uint8 *, int, int , int) = { &ChromaFullMC_SIMD, &ChromaHorizontalMC_SIMD, &ChromaVerticalMC_SIMD, &ChromaDiagonalMC_SIMD, &ChromaFullMC_SIMD, &ChromaHorizontalMC2_SIMD, &ChromaVerticalMC2_SIMD, &ChromaDiagonalMC2_SIMD }; /* Perform motion prediction and compensation with residue if exist. */ void InterMBPrediction(AVCCommonObj *video) { AVCMacroblock *currMB = video->currMB; AVCPictureData *currPic = video->currPic; int mbPartIdx, subMbPartIdx; int ref_idx; int offset_MbPart_indx = 0; int16 *mv; uint32 x_pos, y_pos; uint8 *curL, *curCb, *curCr; uint8 *ref_l, *ref_Cb, *ref_Cr; uint8 *predBlock, *predCb, *predCr; int block_x, block_y, offset_x, offset_y, offsetP, offset; int x_position = (video->mb_x << 4); int y_position = (video->mb_y << 4); int MbHeight, MbWidth, mbPartIdx_X, mbPartIdx_Y, offset_indx; int picWidth = currPic->pitch; int picHeight = currPic->height; int16 *dataBlock; uint32 cbp4x4; uint32 tmp_word; tmp_word = y_position * picWidth; curL = currPic->Sl + tmp_word + x_position; offset = (tmp_word >> 2) + (x_position >> 1); curCb = currPic->Scb + offset; curCr = currPic->Scr + offset; #ifdef USE_PRED_BLOCK predBlock = video->pred + 84; predCb = video->pred + 452; predCr = video->pred + 596; #else predBlock = curL; predCb = curCb; predCr = curCr; #endif GetMotionVectorPredictor(video, false); for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++) { MbHeight = currMB->SubMbPartHeight[mbPartIdx]; MbWidth = currMB->SubMbPartWidth[mbPartIdx]; mbPartIdx_X = ((mbPartIdx + offset_MbPart_indx) & 1); mbPartIdx_Y = (mbPartIdx + offset_MbPart_indx) >> 1; ref_idx = currMB->ref_idx_L0[(mbPartIdx_Y << 1) + mbPartIdx_X]; offset_indx = 0; ref_l = video->RefPicList0[ref_idx]->Sl; ref_Cb = video->RefPicList0[ref_idx]->Scb; ref_Cr = video->RefPicList0[ref_idx]->Scr; for (subMbPartIdx = 0; subMbPartIdx < currMB->NumSubMbPart[mbPartIdx]; subMbPartIdx++) { block_x = (mbPartIdx_X << 1) + ((subMbPartIdx + offset_indx) & 1); // check this block_y = (mbPartIdx_Y << 1) + (((subMbPartIdx + offset_indx) >> 1) & 1); mv = (int16*)(currMB->mvL0 + block_x + (block_y << 2)); offset_x = x_position + (block_x << 2); offset_y = y_position + (block_y << 2); x_pos = (offset_x << 2) + *mv++; /*quarter pel */ y_pos = (offset_y << 2) + *mv; /*quarter pel */ //offset = offset_y * currPic->width; //offsetC = (offset >> 2) + (offset_x >> 1); #ifdef USE_PRED_BLOCK offsetP = (block_y * 80) + (block_x << 2); LumaMotionComp(ref_l, picWidth, picHeight, x_pos, y_pos, /*comp_Sl + offset + offset_x,*/ predBlock + offsetP, 20, MbWidth, MbHeight); #else offsetP = (block_y << 2) * picWidth + (block_x << 2); LumaMotionComp(ref_l, picWidth, picHeight, x_pos, y_pos, /*comp_Sl + offset + offset_x,*/ predBlock + offsetP, picWidth, MbWidth, MbHeight); #endif #ifdef USE_PRED_BLOCK offsetP = (block_y * 24) + (block_x << 1); ChromaMotionComp(ref_Cb, picWidth >> 1, picHeight >> 1, x_pos, y_pos, /*comp_Scb + offsetC,*/ predCb + offsetP, 12, MbWidth >> 1, MbHeight >> 1); ChromaMotionComp(ref_Cr, picWidth >> 1, picHeight >> 1, x_pos, y_pos, /*comp_Scr + offsetC,*/ predCr + offsetP, 12, MbWidth >> 1, MbHeight >> 1); #else offsetP = (block_y * picWidth) + (block_x << 1); ChromaMotionComp(ref_Cb, picWidth >> 1, picHeight >> 1, x_pos, y_pos, /*comp_Scb + offsetC,*/ predCb + offsetP, picWidth >> 1, MbWidth >> 1, MbHeight >> 1); ChromaMotionComp(ref_Cr, picWidth >> 1, picHeight >> 1, x_pos, y_pos, /*comp_Scr + offsetC,*/ predCr + offsetP, picWidth >> 1, MbWidth >> 1, MbHeight >> 1); #endif offset_indx = currMB->SubMbPartWidth[mbPartIdx] >> 3; } offset_MbPart_indx = currMB->MbPartWidth >> 4; } /* used in decoder, used to be if(!encFlag) */ /* transform in raster scan order */ dataBlock = video->block; cbp4x4 = video->cbp4x4; /* luma */ for (block_y = 4; block_y > 0; block_y--) { for (block_x = 4; block_x > 0; block_x--) { #ifdef USE_PRED_BLOCK if (cbp4x4&1) { itrans(dataBlock, predBlock, predBlock, 20); } #else if (cbp4x4&1) { itrans(dataBlock, curL, curL, picWidth); } #endif cbp4x4 >>= 1; dataBlock += 4; #ifdef USE_PRED_BLOCK predBlock += 4; #else curL += 4; #endif } dataBlock += 48; #ifdef USE_PRED_BLOCK predBlock += 64; #else curL += ((picWidth << 2) - 16); #endif } /* chroma */ picWidth = (picWidth >> 1); for (block_y = 2; block_y > 0; block_y--) { for (block_x = 2; block_x > 0; block_x--) { #ifdef USE_PRED_BLOCK if (cbp4x4&1) { ictrans(dataBlock, predCb, predCb, 12); } #else if (cbp4x4&1) { ictrans(dataBlock, curCb, curCb, picWidth); } #endif cbp4x4 >>= 1; dataBlock += 4; #ifdef USE_PRED_BLOCK predCb += 4; #else curCb += 4; #endif } for (block_x = 2; block_x > 0; block_x--) { #ifdef USE_PRED_BLOCK if (cbp4x4&1) { ictrans(dataBlock, predCr, predCr, 12); } #else if (cbp4x4&1) { ictrans(dataBlock, curCr, curCr, picWidth); } #endif cbp4x4 >>= 1; dataBlock += 4; #ifdef USE_PRED_BLOCK predCr += 4; #else curCr += 4; #endif } dataBlock += 48; #ifdef USE_PRED_BLOCK predCb += 40; predCr += 40; #else curCb += ((picWidth << 2) - 8); curCr += ((picWidth << 2) - 8); #endif } #ifdef MB_BASED_DEBLOCK SaveNeighborForIntraPred(video, offset); #endif return ; } /* preform the actual motion comp here */ void LumaMotionComp(uint8 *ref, int picwidth, int picheight, int x_pos, int y_pos, uint8 *pred, int pred_pitch, int blkwidth, int blkheight) { int dx, dy; uint8 temp[24][24]; /* for padding, make the size multiple of 4 for packing */ int temp2[21][21]; /* for intermediate results */ uint8 *ref2; dx = x_pos & 3; dy = y_pos & 3; x_pos = x_pos >> 2; /* round it to full-pel resolution */ y_pos = y_pos >> 2; /* perform actual motion compensation */ if (dx == 0 && dy == 0) { /* fullpel position *//* G */ if (x_pos >= 0 && x_pos + blkwidth <= picwidth && y_pos >= 0 && y_pos + blkheight <= picheight) { ref += y_pos * picwidth + x_pos; FullPelMC(ref, picwidth, pred, pred_pitch, blkwidth, blkheight); } else { CreatePad(ref, picwidth, picheight, x_pos, y_pos, &temp[0][0], blkwidth, blkheight); FullPelMC(&temp[0][0], 24, pred, pred_pitch, blkwidth, blkheight); } } /* other positions */ else if (dy == 0) { /* no vertical interpolation *//* a,b,c*/ if (x_pos - 2 >= 0 && x_pos + 3 + blkwidth <= picwidth && y_pos >= 0 && y_pos + blkheight <= picheight) { ref += y_pos * picwidth + x_pos; HorzInterp1MC(ref, picwidth, pred, pred_pitch, blkwidth, blkheight, dx); } else /* need padding */ { CreatePad(ref, picwidth, picheight, x_pos - 2, y_pos, &temp[0][0], blkwidth + 5, blkheight); HorzInterp1MC(&temp[0][2], 24, pred, pred_pitch, blkwidth, blkheight, dx); } } else if (dx == 0) { /*no horizontal interpolation *//* d,h,n */ if (x_pos >= 0 && x_pos + blkwidth <= picwidth && y_pos - 2 >= 0 && y_pos + 3 + blkheight <= picheight) { ref += y_pos * picwidth + x_pos; VertInterp1MC(ref, picwidth, pred, pred_pitch, blkwidth, blkheight, dy); } else /* need padding */ { CreatePad(ref, picwidth, picheight, x_pos, y_pos - 2, &temp[0][0], blkwidth, blkheight + 5); VertInterp1MC(&temp[2][0], 24, pred, pred_pitch, blkwidth, blkheight, dy); } } else if (dy == 2) { /* horizontal cross *//* i, j, k */ if (x_pos - 2 >= 0 && x_pos + 3 + blkwidth <= picwidth && y_pos - 2 >= 0 && y_pos + 3 + blkheight <= picheight) { ref += y_pos * picwidth + x_pos - 2; /* move to the left 2 pixels */ VertInterp2MC(ref, picwidth, &temp2[0][0], 21, blkwidth + 5, blkheight); HorzInterp2MC(&temp2[0][2], 21, pred, pred_pitch, blkwidth, blkheight, dx); } else /* need padding */ { CreatePad(ref, picwidth, picheight, x_pos - 2, y_pos - 2, &temp[0][0], blkwidth + 5, blkheight + 5); VertInterp2MC(&temp[2][0], 24, &temp2[0][0], 21, blkwidth + 5, blkheight); HorzInterp2MC(&temp2[0][2], 21, pred, pred_pitch, blkwidth, blkheight, dx); } } else if (dx == 2) { /* vertical cross */ /* f,q */ if (x_pos - 2 >= 0 && x_pos + 3 + blkwidth <= picwidth && y_pos - 2 >= 0 && y_pos + 3 + blkheight <= picheight) { ref += (y_pos - 2) * picwidth + x_pos; /* move to up 2 lines */ HorzInterp3MC(ref, picwidth, &temp2[0][0], 21, blkwidth, blkheight + 5); VertInterp3MC(&temp2[2][0], 21, pred, pred_pitch, blkwidth, blkheight, dy); } else /* need padding */ { CreatePad(ref, picwidth, picheight, x_pos - 2, y_pos - 2, &temp[0][0], blkwidth + 5, blkheight + 5); HorzInterp3MC(&temp[0][2], 24, &temp2[0][0], 21, blkwidth, blkheight + 5); VertInterp3MC(&temp2[2][0], 21, pred, pred_pitch, blkwidth, blkheight, dy); } } else { /* diagonal *//* e,g,p,r */ if (x_pos - 2 >= 0 && x_pos + 3 + (dx / 2) + blkwidth <= picwidth && y_pos - 2 >= 0 && y_pos + 3 + blkheight + (dy / 2) <= picheight) { ref2 = ref + (y_pos + (dy / 2)) * picwidth + x_pos; ref += (y_pos * picwidth) + x_pos + (dx / 2); DiagonalInterpMC(ref2, ref, picwidth, pred, pred_pitch, blkwidth, blkheight); } else /* need padding */ { CreatePad(ref, picwidth, picheight, x_pos - 2, y_pos - 2, &temp[0][0], blkwidth + 5 + (dx / 2), blkheight + 5 + (dy / 2)); ref2 = &temp[2 + (dy/2)][2]; ref = &temp[2][2 + (dx/2)]; DiagonalInterpMC(ref2, ref, 24, pred, pred_pitch, blkwidth, blkheight); } } return ; } void CreateAlign(uint8 *ref, int picwidth, int y_pos, uint8 *out, int blkwidth, int blkheight) { int i, j; int offset, out_offset; uint32 prev_pix, result, pix1, pix2, pix4; out_offset = 24 - blkwidth; //switch(x_pos&0x3){ switch (((uint32)ref)&0x3) { case 1: ref += y_pos * picwidth; offset = picwidth - blkwidth - 3; for (j = 0; j < blkheight; j++) { pix1 = *ref++; pix2 = *((uint16*)ref); ref += 2; result = (pix2 << 8) | pix1; for (i = 3; i < blkwidth; i += 4) { pix4 = *((uint32*)ref); ref += 4; prev_pix = (pix4 << 24) & 0xFF000000; /* mask out byte belong to previous word */ result |= prev_pix; *((uint32*)out) = result; /* write 4 bytes */ out += 4; result = pix4 >> 8; /* for the next loop */ } ref += offset; out += out_offset; } break; case 2: ref += y_pos * picwidth; offset = picwidth - blkwidth - 2; for (j = 0; j < blkheight; j++) { result = *((uint16*)ref); ref += 2; for (i = 2; i < blkwidth; i += 4) { pix4 = *((uint32*)ref); ref += 4; prev_pix = (pix4 << 16) & 0xFFFF0000; /* mask out byte belong to previous word */ result |= prev_pix; *((uint32*)out) = result; /* write 4 bytes */ out += 4; result = pix4 >> 16; /* for the next loop */ } ref += offset; out += out_offset; } break; case 3: ref += y_pos * picwidth; offset = picwidth - blkwidth - 1; for (j = 0; j < blkheight; j++) { result = *ref++; for (i = 1; i < blkwidth; i += 4) { pix4 = *((uint32*)ref); ref += 4; prev_pix = (pix4 << 8) & 0xFFFFFF00; /* mask out byte belong to previous word */ result |= prev_pix; *((uint32*)out) = result; /* write 4 bytes */ out += 4; result = pix4 >> 24; /* for the next loop */ } ref += offset; out += out_offset; } break; } } void CreatePad(uint8 *ref, int picwidth, int picheight, int x_pos, int y_pos, uint8 *out, int blkwidth, int blkheight) { int x_inc0, x_mid; int y_inc, y_inc0, y_inc1, y_mid; int i, j; int offset; if (x_pos < 0) { x_inc0 = 0; /* increment for the first part */ x_mid = ((blkwidth + x_pos > 0) ? -x_pos : blkwidth); /* stopping point */ x_pos = 0; } else if (x_pos + blkwidth > picwidth) { x_inc0 = 1; /* increasing */ x_mid = ((picwidth > x_pos) ? picwidth - x_pos - 1 : 0); /* clip negative to zero, encode fool proof! */ } else /* normal case */ { x_inc0 = 1; x_mid = blkwidth; /* just one run */ } /* boundary for y_pos, taking the result from x_pos into account */ if (y_pos < 0) { y_inc0 = (x_inc0 ? - x_mid : -blkwidth + x_mid); /* offset depending on x_inc1 and x_inc0 */ y_inc1 = picwidth + y_inc0; y_mid = ((blkheight + y_pos > 0) ? -y_pos : blkheight); /* clip to prevent memory corruption */ y_pos = 0; } else if (y_pos + blkheight > picheight) { y_inc1 = (x_inc0 ? - x_mid : -blkwidth + x_mid); /* saturate */ y_inc0 = picwidth + y_inc1; /* increasing */ y_mid = ((picheight > y_pos) ? picheight - 1 - y_pos : 0); } else /* normal case */ { y_inc1 = (x_inc0 ? - x_mid : -blkwidth + x_mid); y_inc0 = picwidth + y_inc1; y_mid = blkheight; } /* clip y_pos and x_pos */ if (y_pos > picheight - 1) y_pos = picheight - 1; if (x_pos > picwidth - 1) x_pos = picwidth - 1; ref += y_pos * picwidth + x_pos; y_inc = y_inc0; /* start with top half */ offset = 24 - blkwidth; /* to use in offset out */ blkwidth -= x_mid; /* to use in the loop limit */ if (x_inc0 == 0) { for (j = 0; j < blkheight; j++) { if (j == y_mid) /* put a check here to reduce the code size (for unrolling the loop) */ { y_inc = y_inc1; /* switch to lower half */ } for (i = x_mid; i > 0; i--) /* first or third quarter */ { *out++ = *ref; } for (i = blkwidth; i > 0; i--) /* second or fourth quarter */ { *out++ = *ref++; } out += offset; ref += y_inc; } } else { for (j = 0; j < blkheight; j++) { if (j == y_mid) /* put a check here to reduce the code size (for unrolling the loop) */ { y_inc = y_inc1; /* switch to lower half */ } for (i = x_mid; i > 0; i--) /* first or third quarter */ { *out++ = *ref++; } for (i = blkwidth; i > 0; i--) /* second or fourth quarter */ { *out++ = *ref; } out += offset; ref += y_inc; } } return ; } void HorzInterp1MC(uint8 *in, int inpitch, uint8 *out, int outpitch, int blkwidth, int blkheight, int dx) { uint8 *p_ref; uint32 *p_cur; uint32 tmp, pkres; int result, curr_offset, ref_offset; int j; int32 r0, r1, r2, r3, r4, r5; int32 r13, r6; p_cur = (uint32*)out; /* assume it's word aligned */ curr_offset = (outpitch - blkwidth) >> 2; p_ref = in; ref_offset = inpitch - blkwidth; if (dx&1) { dx = ((dx >> 1) ? -3 : -4); /* use in 3/4 pel */ p_ref -= 2; r13 = 0; for (j = blkheight; j > 0; j--) { tmp = (uint32)(p_ref + blkwidth); r0 = p_ref[0]; r1 = p_ref[2]; r0 |= (r1 << 16); /* 0,c,0,a */ r1 = p_ref[1]; r2 = p_ref[3]; r1 |= (r2 << 16); /* 0,d,0,b */ while ((uint32)p_ref < tmp) { r2 = *(p_ref += 4); /* move pointer to e */ r3 = p_ref[2]; r2 |= (r3 << 16); /* 0,g,0,e */ r3 = p_ref[1]; r4 = p_ref[3]; r3 |= (r4 << 16); /* 0,h,0,f */ r4 = r0 + r3; /* c+h, a+f */ r5 = r0 + r1; /* c+d, a+b */ r6 = r2 + r3; /* g+h, e+f */ r5 >>= 16; r5 |= (r6 << 16); /* e+f, c+d */ r4 += r5 * 20; /* c+20*e+20*f+h, a+20*c+20*d+f */ r4 += 0x100010; /* +16, +16 */ r5 = r1 + r2; /* d+g, b+e */ r4 -= r5 * 5; /* c-5*d+20*e+20*f-5*g+h, a-5*b+20*c+20*d-5*e+f */ r4 >>= 5; r13 |= r4; /* check clipping */ r5 = p_ref[dx+2]; r6 = p_ref[dx+4]; r5 |= (r6 << 16); r4 += r5; r4 += 0x10001; r4 = (r4 >> 1) & 0xFF00FF; r5 = p_ref[4]; /* i */ r6 = (r5 << 16); r5 = r6 | (r2 >> 16);/* 0,i,0,g */ r5 += r1; /* d+i, b+g */ /* r5 not free */ r1 >>= 16; r1 |= (r3 << 16); /* 0,f,0,d */ /* r1 has changed */ r1 += r2; /* f+g, d+e */ r5 += 20 * r1; /* d+20f+20g+i, b+20d+20e+g */ r0 >>= 16; r0 |= (r2 << 16); /* 0,e,0,c */ /* r0 has changed */ r0 += r3; /* e+h, c+f */ r5 += 0x100010; /* 16,16 */ r5 -= r0 * 5; /* d-5e+20f+20g-5h+i, b-5c+20d+20e-5f+g */ r5 >>= 5; r13 |= r5; /* check clipping */ r0 = p_ref[dx+3]; r1 = p_ref[dx+5]; r0 |= (r1 << 16); r5 += r0; r5 += 0x10001; r5 = (r5 >> 1) & 0xFF00FF; r4 |= (r5 << 8); /* pack them together */ *p_cur++ = r4; r1 = r3; r0 = r2; } p_cur += curr_offset; /* move to the next line */ p_ref += ref_offset; /* ref_offset = inpitch-blkwidth; */ if (r13&0xFF000700) /* need clipping */ { /* move back to the beginning of the line */ p_ref -= (ref_offset + blkwidth); /* input */ p_cur -= (outpitch >> 2); tmp = (uint32)(p_ref + blkwidth); for (; (uint32)p_ref < tmp;) { r0 = *p_ref++; r1 = *p_ref++; r2 = *p_ref++; r3 = *p_ref++; r4 = *p_ref++; /* first pixel */ r5 = *p_ref++; result = (r0 + r5); r0 = (r1 + r4); result -= (r0 * 5);//result -= r0; result -= (r0<<2); r0 = (r2 + r3); result += (r0 * 20);//result += (r0<<4); result += (r0<<2); result = (result + 16) >> 5; CLIP_RESULT(result) /* 3/4 pel, no need to clip */ result = (result + p_ref[dx] + 1); pkres = (result >> 1) ; /* second pixel */ r0 = *p_ref++; result = (r1 + r0); r1 = (r2 + r5); result -= (r1 * 5);//result -= r1; result -= (r1<<2); r1 = (r3 + r4); result += (r1 * 20);//result += (r1<<4); result += (r1<<2); result = (result + 16) >> 5; CLIP_RESULT(result) /* 3/4 pel, no need to clip */ result = (result + p_ref[dx] + 1); result = (result >> 1); pkres |= (result << 8); /* third pixel */ r1 = *p_ref++; result = (r2 + r1); r2 = (r3 + r0); result -= (r2 * 5);//result -= r2; result -= (r2<<2); r2 = (r4 + r5); result += (r2 * 20);//result += (r2<<4); result += (r2<<2); result = (result + 16) >> 5; CLIP_RESULT(result) /* 3/4 pel, no need to clip */ result = (result + p_ref[dx] + 1); result = (result >> 1); pkres |= (result << 16); /* fourth pixel */ r2 = *p_ref++; result = (r3 + r2); r3 = (r4 + r1); result -= (r3 * 5);//result -= r3; result -= (r3<<2); r3 = (r5 + r0); result += (r3 * 20);//result += (r3<<4); result += (r3<<2); result = (result + 16) >> 5; CLIP_RESULT(result) /* 3/4 pel, no need to clip */ result = (result + p_ref[dx] + 1); result = (result >> 1); pkres |= (result << 24); *p_cur++ = pkres; /* write 4 pixels */ p_ref -= 5; /* offset back to the middle of filter */ } p_cur += curr_offset; /* move to the next line */ p_ref += ref_offset; /* move to the next line */ } } } else { p_ref -= 2; r13 = 0; for (j = blkheight; j > 0; j--) { tmp = (uint32)(p_ref + blkwidth); r0 = p_ref[0]; r1 = p_ref[2]; r0 |= (r1 << 16); /* 0,c,0,a */ r1 = p_ref[1]; r2 = p_ref[3]; r1 |= (r2 << 16); /* 0,d,0,b */ while ((uint32)p_ref < tmp) { r2 = *(p_ref += 4); /* move pointer to e */ r3 = p_ref[2]; r2 |= (r3 << 16); /* 0,g,0,e */ r3 = p_ref[1]; r4 = p_ref[3]; r3 |= (r4 << 16); /* 0,h,0,f */ r4 = r0 + r3; /* c+h, a+f */ r5 = r0 + r1; /* c+d, a+b */ r6 = r2 + r3; /* g+h, e+f */ r5 >>= 16; r5 |= (r6 << 16); /* e+f, c+d */ r4 += r5 * 20; /* c+20*e+20*f+h, a+20*c+20*d+f */ r4 += 0x100010; /* +16, +16 */ r5 = r1 + r2; /* d+g, b+e */ r4 -= r5 * 5; /* c-5*d+20*e+20*f-5*g+h, a-5*b+20*c+20*d-5*e+f */ r4 >>= 5; r13 |= r4; /* check clipping */ r4 &= 0xFF00FF; /* mask */ r5 = p_ref[4]; /* i */ r6 = (r5 << 16); r5 = r6 | (r2 >> 16);/* 0,i,0,g */ r5 += r1; /* d+i, b+g */ /* r5 not free */ r1 >>= 16; r1 |= (r3 << 16); /* 0,f,0,d */ /* r1 has changed */ r1 += r2; /* f+g, d+e */ r5 += 20 * r1; /* d+20f+20g+i, b+20d+20e+g */ r0 >>= 16; r0 |= (r2 << 16); /* 0,e,0,c */ /* r0 has changed */ r0 += r3; /* e+h, c+f */ r5 += 0x100010; /* 16,16 */ r5 -= r0 * 5; /* d-5e+20f+20g-5h+i, b-5c+20d+20e-5f+g */ r5 >>= 5; r13 |= r5; /* check clipping */ r5 &= 0xFF00FF; /* mask */ r4 |= (r5 << 8); /* pack them together */ *p_cur++ = r4; r1 = r3; r0 = r2; } p_cur += curr_offset; /* move to the next line */ p_ref += ref_offset; /* ref_offset = inpitch-blkwidth; */ if (r13&0xFF000700) /* need clipping */ { /* move back to the beginning of the line */ p_ref -= (ref_offset + blkwidth); /* input */ p_cur -= (outpitch >> 2); tmp = (uint32)(p_ref + blkwidth); for (; (uint32)p_ref < tmp;) { r0 = *p_ref++; r1 = *p_ref++; r2 = *p_ref++; r3 = *p_ref++; r4 = *p_ref++; /* first pixel */ r5 = *p_ref++; result = (r0 + r5); r0 = (r1 + r4); result -= (r0 * 5);//result -= r0; result -= (r0<<2); r0 = (r2 + r3); result += (r0 * 20);//result += (r0<<4); result += (r0<<2); result = (result + 16) >> 5; CLIP_RESULT(result) pkres = result; /* second pixel */ r0 = *p_ref++; result = (r1 + r0); r1 = (r2 + r5); result -= (r1 * 5);//result -= r1; result -= (r1<<2); r1 = (r3 + r4); result += (r1 * 20);//result += (r1<<4); result += (r1<<2); result = (result + 16) >> 5; CLIP_RESULT(result) pkres |= (result << 8); /* third pixel */ r1 = *p_ref++; result = (r2 + r1); r2 = (r3 + r0); result -= (r2 * 5);//result -= r2; result -= (r2<<2); r2 = (r4 + r5); result += (r2 * 20);//result += (r2<<4); result += (r2<<2); result = (result + 16) >> 5; CLIP_RESULT(result) pkres |= (result << 16); /* fourth pixel */ r2 = *p_ref++; result = (r3 + r2); r3 = (r4 + r1); result -= (r3 * 5);//result -= r3; result -= (r3<<2); r3 = (r5 + r0); result += (r3 * 20);//result += (r3<<4); result += (r3<<2); result = (result + 16) >> 5; CLIP_RESULT(result) pkres |= (result << 24); *p_cur++ = pkres; /* write 4 pixels */ p_ref -= 5; } p_cur += curr_offset; /* move to the next line */ p_ref += ref_offset; } } } return ; } void HorzInterp2MC(int *in, int inpitch, uint8 *out, int outpitch, int blkwidth, int blkheight, int dx) { int *p_ref; uint32 *p_cur; uint32 tmp, pkres; int result, result2, curr_offset, ref_offset; int j, r0, r1, r2, r3, r4, r5; p_cur = (uint32*)out; /* assume it's word aligned */ curr_offset = (outpitch - blkwidth) >> 2; p_ref = in; ref_offset = inpitch - blkwidth; if (dx&1) { dx = ((dx >> 1) ? -3 : -4); /* use in 3/4 pel */ for (j = blkheight; j > 0 ; j--) { tmp = (uint32)(p_ref + blkwidth); for (; (uint32)p_ref < tmp;) { r0 = p_ref[-2]; r1 = p_ref[-1]; r2 = *p_ref++; r3 = *p_ref++; r4 = *p_ref++; /* first pixel */ r5 = *p_ref++; result = (r0 + r5); r0 = (r1 + r4); result -= (r0 * 5);//result -= r0; result -= (r0<<2); r0 = (r2 + r3); result += (r0 * 20);//result += (r0<<4); result += (r0<<2); result = (result + 512) >> 10; CLIP_RESULT(result) result2 = ((p_ref[dx] + 16) >> 5); CLIP_RESULT(result2) /* 3/4 pel, no need to clip */ result = (result + result2 + 1); pkres = (result >> 1); /* second pixel */ r0 = *p_ref++; result = (r1 + r0); r1 = (r2 + r5); result -= (r1 * 5);//result -= r1; result -= (r1<<2); r1 = (r3 + r4); result += (r1 * 20);//result += (r1<<4); result += (r1<<2); result = (result + 512) >> 10; CLIP_RESULT(result) result2 = ((p_ref[dx] + 16) >> 5); CLIP_RESULT(result2) /* 3/4 pel, no need to clip */ result = (result + result2 + 1); result = (result >> 1); pkres |= (result << 8); /* third pixel */ r1 = *p_ref++; result = (r2 + r1); r2 = (r3 + r0); result -= (r2 * 5);//result -= r2; result -= (r2<<2); r2 = (r4 + r5); result += (r2 * 20);//result += (r2<<4); result += (r2<<2); result = (result + 512) >> 10; CLIP_RESULT(result) result2 = ((p_ref[dx] + 16) >> 5); CLIP_RESULT(result2) /* 3/4 pel, no need to clip */ result = (result + result2 + 1); result = (result >> 1); pkres |= (result << 16); /* fourth pixel */ r2 = *p_ref++; result = (r3 + r2); r3 = (r4 + r1); result -= (r3 * 5);//result -= r3; result -= (r3<<2); r3 = (r5 + r0); result += (r3 * 20);//result += (r3<<4); result += (r3<<2); result = (result + 512) >> 10; CLIP_RESULT(result) result2 = ((p_ref[dx] + 16) >> 5); CLIP_RESULT(result2) /* 3/4 pel, no need to clip */ result = (result + result2 + 1); result = (result >> 1); pkres |= (result << 24); *p_cur++ = pkres; /* write 4 pixels */ p_ref -= 3; /* offset back to the middle of filter */ } p_cur += curr_offset; /* move to the next line */ p_ref += ref_offset; /* move to the next line */ } } else { for (j = blkheight; j > 0 ; j--) { tmp = (uint32)(p_ref + blkwidth); for (; (uint32)p_ref < tmp;) { r0 = p_ref[-2]; r1 = p_ref[-1]; r2 = *p_ref++; r3 = *p_ref++; r4 = *p_ref++; /* first pixel */ r5 = *p_ref++; result = (r0 + r5); r0 = (r1 + r4); result -= (r0 * 5);//result -= r0; result -= (r0<<2); r0 = (r2 + r3); result += (r0 * 20);//result += (r0<<4); result += (r0<<2); result = (result + 512) >> 10; CLIP_RESULT(result) pkres = result; /* second pixel */ r0 = *p_ref++; result = (r1 + r0); r1 = (r2 + r5); result -= (r1 * 5);//result -= r1; result -= (r1<<2); r1 = (r3 + r4); result += (r1 * 20);//result += (r1<<4); result += (r1<<2); result = (result + 512) >> 10; CLIP_RESULT(result) pkres |= (result << 8); /* third pixel */ r1 = *p_ref++; result = (r2 + r1); r2 = (r3 + r0); result -= (r2 * 5);//result -= r2; result -= (r2<<2); r2 = (r4 + r5); result += (r2 * 20);//result += (r2<<4); result += (r2<<2); result = (result + 512) >> 10; CLIP_RESULT(result) pkres |= (result << 16); /* fourth pixel */ r2 = *p_ref++; result = (r3 + r2); r3 = (r4 + r1); result -= (r3 * 5);//result -= r3; result -= (r3<<2); r3 = (r5 + r0); result += (r3 * 20);//result += (r3<<4); result += (r3<<2); result = (result + 512) >> 10; CLIP_RESULT(result) pkres |= (result << 24); *p_cur++ = pkres; /* write 4 pixels */ p_ref -= 3; /* offset back to the middle of filter */ } p_cur += curr_offset; /* move to the next line */ p_ref += ref_offset; /* move to the next line */ } } return ; } void HorzInterp3MC(uint8 *in, int inpitch, int *out, int outpitch, int blkwidth, int blkheight) { uint8 *p_ref; int *p_cur; uint32 tmp; int result, curr_offset, ref_offset; int j, r0, r1, r2, r3, r4, r5; p_cur = out; curr_offset = (outpitch - blkwidth); p_ref = in; ref_offset = inpitch - blkwidth; for (j = blkheight; j > 0 ; j--) { tmp = (uint32)(p_ref + blkwidth); for (; (uint32)p_ref < tmp;) { r0 = p_ref[-2]; r1 = p_ref[-1]; r2 = *p_ref++; r3 = *p_ref++; r4 = *p_ref++; /* first pixel */ r5 = *p_ref++; result = (r0 + r5); r0 = (r1 + r4); result -= (r0 * 5);//result -= r0; result -= (r0<<2); r0 = (r2 + r3); result += (r0 * 20);//result += (r0<<4); result += (r0<<2); *p_cur++ = result; /* second pixel */ r0 = *p_ref++; result = (r1 + r0); r1 = (r2 + r5); result -= (r1 * 5);//result -= r1; result -= (r1<<2); r1 = (r3 + r4); result += (r1 * 20);//result += (r1<<4); result += (r1<<2); *p_cur++ = result; /* third pixel */ r1 = *p_ref++; result = (r2 + r1); r2 = (r3 + r0); result -= (r2 * 5);//result -= r2; result -= (r2<<2); r2 = (r4 + r5); result += (r2 * 20);//result += (r2<<4); result += (r2<<2); *p_cur++ = result; /* fourth pixel */ r2 = *p_ref++; result = (r3 + r2); r3 = (r4 + r1); result -= (r3 * 5);//result -= r3; result -= (r3<<2); r3 = (r5 + r0); result += (r3 * 20);//result += (r3<<4); result += (r3<<2); *p_cur++ = result; p_ref -= 3; /* move back to the middle of the filter */ } p_cur += curr_offset; /* move to the next line */ p_ref += ref_offset; } return ; } void VertInterp1MC(uint8 *in, int inpitch, uint8 *out, int outpitch, int blkwidth, int blkheight, int dy) { uint8 *p_cur, *p_ref; uint32 tmp; int result, curr_offset, ref_offset; int j, i; int32 r0, r1, r2, r3, r4, r5, r6, r7, r8, r13; uint8 tmp_in[24][24]; /* not word-aligned */ if (((uint32)in)&0x3) { CreateAlign(in, inpitch, -2, &tmp_in[0][0], blkwidth, blkheight + 5); in = &tmp_in[2][0]; inpitch = 24; } p_cur = out; curr_offset = 1 - outpitch * (blkheight - 1); /* offset vertically back up and one pixel to right */ ref_offset = blkheight * inpitch; /* for limit */ curr_offset += 3; if (dy&1) { dy = (dy >> 1) ? 0 : -inpitch; for (j = 0; j < blkwidth; j += 4, in += 4) { r13 = 0; p_ref = in; p_cur -= outpitch; /* compensate for the first offset */ tmp = (uint32)(p_ref + ref_offset); /* limit */ while ((uint32)p_ref < tmp) /* the loop un-rolled */ { r0 = *((uint32*)(p_ref - (inpitch << 1))); /* load 4 bytes */ p_ref += inpitch; r6 = (r0 >> 8) & 0xFF00FF; /* second and fourth byte */ r0 &= 0xFF00FF; r1 = *((uint32*)(p_ref + (inpitch << 1))); /* r1, r7, ref[3] */ r7 = (r1 >> 8) & 0xFF00FF; r1 &= 0xFF00FF; r0 += r1; r6 += r7; r2 = *((uint32*)p_ref); /* r2, r8, ref[1] */ r8 = (r2 >> 8) & 0xFF00FF; r2 &= 0xFF00FF; r1 = *((uint32*)(p_ref - inpitch)); /* r1, r7, ref[0] */ r7 = (r1 >> 8) & 0xFF00FF; r1 &= 0xFF00FF; r1 += r2; r7 += r8; r0 += 20 * r1; r6 += 20 * r7; r0 += 0x100010; r6 += 0x100010; r2 = *((uint32*)(p_ref - (inpitch << 1))); /* r2, r8, ref[-1] */ r8 = (r2 >> 8) & 0xFF00FF; r2 &= 0xFF00FF; r1 = *((uint32*)(p_ref + inpitch)); /* r1, r7, ref[2] */ r7 = (r1 >> 8) & 0xFF00FF; r1 &= 0xFF00FF; r1 += r2; r7 += r8; r0 -= 5 * r1; r6 -= 5 * r7; r0 >>= 5; r6 >>= 5; /* clip */ r13 |= r6; r13 |= r0; //CLIPPACK(r6,result) r1 = *((uint32*)(p_ref + dy)); r2 = (r1 >> 8) & 0xFF00FF; r1 &= 0xFF00FF; r0 += r1; r6 += r2; r0 += 0x10001; r6 += 0x10001; r0 = (r0 >> 1) & 0xFF00FF; r6 = (r6 >> 1) & 0xFF00FF; r0 |= (r6 << 8); /* pack it back */ *((uint32*)(p_cur += outpitch)) = r0; } p_cur += curr_offset; /* offset to the next pixel */ if (r13 & 0xFF000700) /* this column need clipping */ { p_cur -= 4; for (i = 0; i < 4; i++) { p_ref = in + i; p_cur -= outpitch; /* compensate for the first offset */ tmp = (uint32)(p_ref + ref_offset); /* limit */ while ((uint32)p_ref < tmp) { /* loop un-rolled */ r0 = *(p_ref - (inpitch << 1)); r1 = *(p_ref - inpitch); r2 = *p_ref; r3 = *(p_ref += inpitch); /* modify pointer before loading */ r4 = *(p_ref += inpitch); /* first pixel */ r5 = *(p_ref += inpitch); result = (r0 + r5); r0 = (r1 + r4); result -= (r0 * 5);//result -= r0; result -= (r0<<2); r0 = (r2 + r3); result += (r0 * 20);//result += (r0<<4); result += (r0<<2); result = (result + 16) >> 5; CLIP_RESULT(result) /* 3/4 pel, no need to clip */ result = (result + p_ref[dy-(inpitch<<1)] + 1); result = (result >> 1); *(p_cur += outpitch) = result; /* second pixel */ r0 = *(p_ref += inpitch); result = (r1 + r0); r1 = (r2 + r5); result -= (r1 * 5);//result -= r1; result -= (r1<<2); r1 = (r3 + r4); result += (r1 * 20);//result += (r1<<4); result += (r1<<2); result = (result + 16) >> 5; CLIP_RESULT(result) /* 3/4 pel, no need to clip */ result = (result + p_ref[dy-(inpitch<<1)] + 1); result = (result >> 1); *(p_cur += outpitch) = result; /* third pixel */ r1 = *(p_ref += inpitch); result = (r2 + r1); r2 = (r3 + r0); result -= (r2 * 5);//result -= r2; result -= (r2<<2); r2 = (r4 + r5); result += (r2 * 20);//result += (r2<<4); result += (r2<<2); result = (result + 16) >> 5; CLIP_RESULT(result) /* 3/4 pel, no need to clip */ result = (result + p_ref[dy-(inpitch<<1)] + 1); result = (result >> 1); *(p_cur += outpitch) = result; /* fourth pixel */ r2 = *(p_ref += inpitch); result = (r3 + r2); r3 = (r4 + r1); result -= (r3 * 5);//result -= r3; result -= (r3<<2); r3 = (r5 + r0); result += (r3 * 20);//result += (r3<<4); result += (r3<<2); result = (result + 16) >> 5; CLIP_RESULT(result) /* 3/4 pel, no need to clip */ result = (result + p_ref[dy-(inpitch<<1)] + 1); result = (result >> 1); *(p_cur += outpitch) = result; p_ref -= (inpitch << 1); /* move back to center of the filter of the next one */ } p_cur += (curr_offset - 3); } } } } else { for (j = 0; j < blkwidth; j += 4, in += 4) { r13 = 0; p_ref = in; p_cur -= outpitch; /* compensate for the first offset */ tmp = (uint32)(p_ref + ref_offset); /* limit */ while ((uint32)p_ref < tmp) /* the loop un-rolled */ { r0 = *((uint32*)(p_ref - (inpitch << 1))); /* load 4 bytes */ p_ref += inpitch; r6 = (r0 >> 8) & 0xFF00FF; /* second and fourth byte */ r0 &= 0xFF00FF; r1 = *((uint32*)(p_ref + (inpitch << 1))); /* r1, r7, ref[3] */ r7 = (r1 >> 8) & 0xFF00FF; r1 &= 0xFF00FF; r0 += r1; r6 += r7; r2 = *((uint32*)p_ref); /* r2, r8, ref[1] */ r8 = (r2 >> 8) & 0xFF00FF; r2 &= 0xFF00FF; r1 = *((uint32*)(p_ref - inpitch)); /* r1, r7, ref[0] */ r7 = (r1 >> 8) & 0xFF00FF; r1 &= 0xFF00FF; r1 += r2; r7 += r8; r0 += 20 * r1; r6 += 20 * r7; r0 += 0x100010; r6 += 0x100010; r2 = *((uint32*)(p_ref - (inpitch << 1))); /* r2, r8, ref[-1] */ r8 = (r2 >> 8) & 0xFF00FF; r2 &= 0xFF00FF; r1 = *((uint32*)(p_ref + inpitch)); /* r1, r7, ref[2] */ r7 = (r1 >> 8) & 0xFF00FF; r1 &= 0xFF00FF; r1 += r2; r7 += r8; r0 -= 5 * r1; r6 -= 5 * r7; r0 >>= 5; r6 >>= 5; /* clip */ r13 |= r6; r13 |= r0; //CLIPPACK(r6,result) r0 &= 0xFF00FF; r6 &= 0xFF00FF; r0 |= (r6 << 8); /* pack it back */ *((uint32*)(p_cur += outpitch)) = r0; } p_cur += curr_offset; /* offset to the next pixel */ if (r13 & 0xFF000700) /* this column need clipping */ { p_cur -= 4; for (i = 0; i < 4; i++) { p_ref = in + i; p_cur -= outpitch; /* compensate for the first offset */ tmp = (uint32)(p_ref + ref_offset); /* limit */ while ((uint32)p_ref < tmp) { /* loop un-rolled */ r0 = *(p_ref - (inpitch << 1)); r1 = *(p_ref - inpitch); r2 = *p_ref; r3 = *(p_ref += inpitch); /* modify pointer before loading */ r4 = *(p_ref += inpitch); /* first pixel */ r5 = *(p_ref += inpitch); result = (r0 + r5); r0 = (r1 + r4); result -= (r0 * 5);//result -= r0; result -= (r0<<2); r0 = (r2 + r3); result += (r0 * 20);//result += (r0<<4); result += (r0<<2); result = (result + 16) >> 5; CLIP_RESULT(result) *(p_cur += outpitch) = result; /* second pixel */ r0 = *(p_ref += inpitch); result = (r1 + r0); r1 = (r2 + r5); result -= (r1 * 5);//result -= r1; result -= (r1<<2); r1 = (r3 + r4); result += (r1 * 20);//result += (r1<<4); result += (r1<<2); result = (result + 16) >> 5; CLIP_RESULT(result) *(p_cur += outpitch) = result; /* third pixel */ r1 = *(p_ref += inpitch); result = (r2 + r1); r2 = (r3 + r0); result -= (r2 * 5);//result -= r2; result -= (r2<<2); r2 = (r4 + r5); result += (r2 * 20);//result += (r2<<4); result += (r2<<2); result = (result + 16) >> 5; CLIP_RESULT(result) *(p_cur += outpitch) = result; /* fourth pixel */ r2 = *(p_ref += inpitch); result = (r3 + r2); r3 = (r4 + r1); result -= (r3 * 5);//result -= r3; result -= (r3<<2); r3 = (r5 + r0); result += (r3 * 20);//result += (r3<<4); result += (r3<<2); result = (result + 16) >> 5; CLIP_RESULT(result) *(p_cur += outpitch) = result; p_ref -= (inpitch << 1); /* move back to center of the filter of the next one */ } p_cur += (curr_offset - 3); } } } } return ; } void VertInterp2MC(uint8 *in, int inpitch, int *out, int outpitch, int blkwidth, int blkheight) { int *p_cur; uint8 *p_ref; uint32 tmp; int result, curr_offset, ref_offset; int j, r0, r1, r2, r3, r4, r5; p_cur = out; curr_offset = 1 - outpitch * (blkheight - 1); /* offset vertically back up and one pixel to right */ ref_offset = blkheight * inpitch; /* for limit */ for (j = 0; j < blkwidth; j++) { p_cur -= outpitch; /* compensate for the first offset */ p_ref = in++; tmp = (uint32)(p_ref + ref_offset); /* limit */ while ((uint32)p_ref < tmp) { /* loop un-rolled */ r0 = *(p_ref - (inpitch << 1)); r1 = *(p_ref - inpitch); r2 = *p_ref; r3 = *(p_ref += inpitch); /* modify pointer before loading */ r4 = *(p_ref += inpitch); /* first pixel */ r5 = *(p_ref += inpitch); result = (r0 + r5); r0 = (r1 + r4); result -= (r0 * 5);//result -= r0; result -= (r0<<2); r0 = (r2 + r3); result += (r0 * 20);//result += (r0<<4); result += (r0<<2); *(p_cur += outpitch) = result; /* second pixel */ r0 = *(p_ref += inpitch); result = (r1 + r0); r1 = (r2 + r5); result -= (r1 * 5);//result -= r1; result -= (r1<<2); r1 = (r3 + r4); result += (r1 * 20);//result += (r1<<4); result += (r1<<2); *(p_cur += outpitch) = result; /* third pixel */ r1 = *(p_ref += inpitch); result = (r2 + r1); r2 = (r3 + r0); result -= (r2 * 5);//result -= r2; result -= (r2<<2); r2 = (r4 + r5); result += (r2 * 20);//result += (r2<<4); result += (r2<<2); *(p_cur += outpitch) = result; /* fourth pixel */ r2 = *(p_ref += inpitch); result = (r3 + r2); r3 = (r4 + r1); result -= (r3 * 5);//result -= r3; result -= (r3<<2); r3 = (r5 + r0); result += (r3 * 20);//result += (r3<<4); result += (r3<<2); *(p_cur += outpitch) = result; p_ref -= (inpitch << 1); /* move back to center of the filter of the next one */ } p_cur += curr_offset; } return ; } void VertInterp3MC(int *in, int inpitch, uint8 *out, int outpitch, int blkwidth, int blkheight, int dy) { uint8 *p_cur; int *p_ref; uint32 tmp; int result, result2, curr_offset, ref_offset; int j, r0, r1, r2, r3, r4, r5; p_cur = out; curr_offset = 1 - outpitch * (blkheight - 1); /* offset vertically back up and one pixel to right */ ref_offset = blkheight * inpitch; /* for limit */ if (dy&1) { dy = (dy >> 1) ? -(inpitch << 1) : -(inpitch << 1) - inpitch; for (j = 0; j < blkwidth; j++) { p_cur -= outpitch; /* compensate for the first offset */ p_ref = in++; tmp = (uint32)(p_ref + ref_offset); /* limit */ while ((uint32)p_ref < tmp) { /* loop un-rolled */ r0 = *(p_ref - (inpitch << 1)); r1 = *(p_ref - inpitch); r2 = *p_ref; r3 = *(p_ref += inpitch); /* modify pointer before loading */ r4 = *(p_ref += inpitch); /* first pixel */ r5 = *(p_ref += inpitch); result = (r0 + r5); r0 = (r1 + r4); result -= (r0 * 5);//result -= r0; result -= (r0<<2); r0 = (r2 + r3); result += (r0 * 20);//result += (r0<<4); result += (r0<<2); result = (result + 512) >> 10; CLIP_RESULT(result) result2 = ((p_ref[dy] + 16) >> 5); CLIP_RESULT(result2) /* 3/4 pel, no need to clip */ result = (result + result2 + 1); result = (result >> 1); *(p_cur += outpitch) = result; /* second pixel */ r0 = *(p_ref += inpitch); result = (r1 + r0); r1 = (r2 + r5); result -= (r1 * 5);//result -= r1; result -= (r1<<2); r1 = (r3 + r4); result += (r1 * 20);//result += (r1<<4); result += (r1<<2); result = (result + 512) >> 10; CLIP_RESULT(result) result2 = ((p_ref[dy] + 16) >> 5); CLIP_RESULT(result2) /* 3/4 pel, no need to clip */ result = (result + result2 + 1); result = (result >> 1); *(p_cur += outpitch) = result; /* third pixel */ r1 = *(p_ref += inpitch); result = (r2 + r1); r2 = (r3 + r0); result -= (r2 * 5);//result -= r2; result -= (r2<<2); r2 = (r4 + r5); result += (r2 * 20);//result += (r2<<4); result += (r2<<2); result = (result + 512) >> 10; CLIP_RESULT(result) result2 = ((p_ref[dy] + 16) >> 5); CLIP_RESULT(result2) /* 3/4 pel, no need to clip */ result = (result + result2 + 1); result = (result >> 1); *(p_cur += outpitch) = result; /* fourth pixel */ r2 = *(p_ref += inpitch); result = (r3 + r2); r3 = (r4 + r1); result -= (r3 * 5);//result -= r3; result -= (r3<<2); r3 = (r5 + r0); result += (r3 * 20);//result += (r3<<4); result += (r3<<2); result = (result + 512) >> 10; CLIP_RESULT(result) result2 = ((p_ref[dy] + 16) >> 5); CLIP_RESULT(result2) /* 3/4 pel, no need to clip */ result = (result + result2 + 1); result = (result >> 1); *(p_cur += outpitch) = result; p_ref -= (inpitch << 1); /* move back to center of the filter of the next one */ } p_cur += curr_offset; } } else { for (j = 0; j < blkwidth; j++) { p_cur -= outpitch; /* compensate for the first offset */ p_ref = in++; tmp = (uint32)(p_ref + ref_offset); /* limit */ while ((uint32)p_ref < tmp) { /* loop un-rolled */ r0 = *(p_ref - (inpitch << 1)); r1 = *(p_ref - inpitch); r2 = *p_ref; r3 = *(p_ref += inpitch); /* modify pointer before loading */ r4 = *(p_ref += inpitch); /* first pixel */ r5 = *(p_ref += inpitch); result = (r0 + r5); r0 = (r1 + r4); result -= (r0 * 5);//result -= r0; result -= (r0<<2); r0 = (r2 + r3); result += (r0 * 20);//result += (r0<<4); result += (r0<<2); result = (result + 512) >> 10; CLIP_RESULT(result) *(p_cur += outpitch) = result; /* second pixel */ r0 = *(p_ref += inpitch); result = (r1 + r0); r1 = (r2 + r5); result -= (r1 * 5);//result -= r1; result -= (r1<<2); r1 = (r3 + r4); result += (r1 * 20);//result += (r1<<4); result += (r1<<2); result = (result + 512) >> 10; CLIP_RESULT(result) *(p_cur += outpitch) = result; /* third pixel */ r1 = *(p_ref += inpitch); result = (r2 + r1); r2 = (r3 + r0); result -= (r2 * 5);//result -= r2; result -= (r2<<2); r2 = (r4 + r5); result += (r2 * 20);//result += (r2<<4); result += (r2<<2); result = (result + 512) >> 10; CLIP_RESULT(result) *(p_cur += outpitch) = result; /* fourth pixel */ r2 = *(p_ref += inpitch); result = (r3 + r2); r3 = (r4 + r1); result -= (r3 * 5);//result -= r3; result -= (r3<<2); r3 = (r5 + r0); result += (r3 * 20);//result += (r3<<4); result += (r3<<2); result = (result + 512) >> 10; CLIP_RESULT(result) *(p_cur += outpitch) = result; p_ref -= (inpitch << 1); /* move back to center of the filter of the next one */ } p_cur += curr_offset; } } return ; } void DiagonalInterpMC(uint8 *in1, uint8 *in2, int inpitch, uint8 *out, int outpitch, int blkwidth, int blkheight) { int j, i; int result; uint8 *p_cur, *p_ref, *p_tmp8; int curr_offset, ref_offset; uint8 tmp_res[24][24], tmp_in[24][24]; uint32 *p_tmp; uint32 tmp, pkres, tmp_result; int32 r0, r1, r2, r3, r4, r5; int32 r6, r7, r8, r9, r10, r13; void *tmp_void; ref_offset = inpitch - blkwidth; p_ref = in1 - 2; /* perform horizontal interpolation */ /* not word-aligned */ /* It is faster to read 1 byte at time to avoid calling CreateAlign */ /* if(((uint32)p_ref)&0x3) { CreateAlign(p_ref,inpitch,0,&tmp_in[0][0],blkwidth+8,blkheight); p_ref = &tmp_in[0][0]; ref_offset = 24-blkwidth; }*/ tmp_void = (void*) & (tmp_res[0][0]); p_tmp = (uint32*) tmp_void; for (j = blkheight; j > 0; j--) { r13 = 0; tmp = (uint32)(p_ref + blkwidth); //r0 = *((uint32*)p_ref); /* d,c,b,a */ //r1 = (r0>>8)&0xFF00FF; /* 0,d,0,b */ //r0 &= 0xFF00FF; /* 0,c,0,a */ /* It is faster to read 1 byte at a time, */ r0 = p_ref[0]; r1 = p_ref[2]; r0 |= (r1 << 16); /* 0,c,0,a */ r1 = p_ref[1]; r2 = p_ref[3]; r1 |= (r2 << 16); /* 0,d,0,b */ while ((uint32)p_ref < tmp) { //r2 = *((uint32*)(p_ref+=4));/* h,g,f,e */ //r3 = (r2>>8)&0xFF00FF; /* 0,h,0,f */ //r2 &= 0xFF00FF; /* 0,g,0,e */ /* It is faster to read 1 byte at a time, */ r2 = *(p_ref += 4); r3 = p_ref[2]; r2 |= (r3 << 16); /* 0,g,0,e */ r3 = p_ref[1]; r4 = p_ref[3]; r3 |= (r4 << 16); /* 0,h,0,f */ r4 = r0 + r3; /* c+h, a+f */ r5 = r0 + r1; /* c+d, a+b */ r6 = r2 + r3; /* g+h, e+f */ r5 >>= 16; r5 |= (r6 << 16); /* e+f, c+d */ r4 += r5 * 20; /* c+20*e+20*f+h, a+20*c+20*d+f */ r4 += 0x100010; /* +16, +16 */ r5 = r1 + r2; /* d+g, b+e */ r4 -= r5 * 5; /* c-5*d+20*e+20*f-5*g+h, a-5*b+20*c+20*d-5*e+f */ r4 >>= 5; r13 |= r4; /* check clipping */ r4 &= 0xFF00FF; /* mask */ r5 = p_ref[4]; /* i */ r6 = (r5 << 16); r5 = r6 | (r2 >> 16);/* 0,i,0,g */ r5 += r1; /* d+i, b+g */ /* r5 not free */ r1 >>= 16; r1 |= (r3 << 16); /* 0,f,0,d */ /* r1 has changed */ r1 += r2; /* f+g, d+e */ r5 += 20 * r1; /* d+20f+20g+i, b+20d+20e+g */ r0 >>= 16; r0 |= (r2 << 16); /* 0,e,0,c */ /* r0 has changed */ r0 += r3; /* e+h, c+f */ r5 += 0x100010; /* 16,16 */ r5 -= r0 * 5; /* d-5e+20f+20g-5h+i, b-5c+20d+20e-5f+g */ r5 >>= 5; r13 |= r5; /* check clipping */ r5 &= 0xFF00FF; /* mask */ r4 |= (r5 << 8); /* pack them together */ *p_tmp++ = r4; r1 = r3; r0 = r2; } p_tmp += ((24 - blkwidth) >> 2); /* move to the next line */ p_ref += ref_offset; /* ref_offset = inpitch-blkwidth; */ if (r13&0xFF000700) /* need clipping */ { /* move back to the beginning of the line */ p_ref -= (ref_offset + blkwidth); /* input */ p_tmp -= 6; /* intermediate output */ tmp = (uint32)(p_ref + blkwidth); while ((uint32)p_ref < tmp) { r0 = *p_ref++; r1 = *p_ref++; r2 = *p_ref++; r3 = *p_ref++; r4 = *p_ref++; /* first pixel */ r5 = *p_ref++; result = (r0 + r5); r0 = (r1 + r4); result -= (r0 * 5);//result -= r0; result -= (r0<<2); r0 = (r2 + r3); result += (r0 * 20);//result += (r0<<4); result += (r0<<2); result = (result + 16) >> 5; CLIP_RESULT(result) pkres = result; /* second pixel */ r0 = *p_ref++; result = (r1 + r0); r1 = (r2 + r5); result -= (r1 * 5);//result -= r1; result -= (r1<<2); r1 = (r3 + r4); result += (r1 * 20);//result += (r1<<4); result += (r1<<2); result = (result + 16) >> 5; CLIP_RESULT(result) pkres |= (result << 8); /* third pixel */ r1 = *p_ref++; result = (r2 + r1); r2 = (r3 + r0); result -= (r2 * 5);//result -= r2; result -= (r2<<2); r2 = (r4 + r5); result += (r2 * 20);//result += (r2<<4); result += (r2<<2); result = (result + 16) >> 5; CLIP_RESULT(result) pkres |= (result << 16); /* fourth pixel */ r2 = *p_ref++; result = (r3 + r2); r3 = (r4 + r1); result -= (r3 * 5);//result -= r3; result -= (r3<<2); r3 = (r5 + r0); result += (r3 * 20);//result += (r3<<4); result += (r3<<2); result = (result + 16) >> 5; CLIP_RESULT(result) pkres |= (result << 24); *p_tmp++ = pkres; /* write 4 pixel */ p_ref -= 5; } p_tmp += ((24 - blkwidth) >> 2); /* move to the next line */ p_ref += ref_offset; /* ref_offset = inpitch-blkwidth; */ } } /* perform vertical interpolation */ /* not word-aligned */ if (((uint32)in2)&0x3) { CreateAlign(in2, inpitch, -2, &tmp_in[0][0], blkwidth, blkheight + 5); in2 = &tmp_in[2][0]; inpitch = 24; } p_cur = out; curr_offset = 1 - outpitch * (blkheight - 1); /* offset vertically up and one pixel right */ pkres = blkheight * inpitch; /* reuse it for limit */ curr_offset += 3; for (j = 0; j < blkwidth; j += 4, in2 += 4) { r13 = 0; p_ref = in2; p_tmp8 = &(tmp_res[0][j]); /* intermediate result */ p_tmp8 -= 24; /* compensate for the first offset */ p_cur -= outpitch; /* compensate for the first offset */ tmp = (uint32)(p_ref + pkres); /* limit */ while ((uint32)p_ref < tmp) /* the loop un-rolled */ { /* Read 1 byte at a time is too slow, too many read and pack ops, need to call CreateAlign, */ /*p_ref8 = p_ref-(inpitch<<1); r0 = p_ref8[0]; r1 = p_ref8[2]; r0 |= (r1<<16); r6 = p_ref8[1]; r1 = p_ref8[3]; r6 |= (r1<<16); p_ref+=inpitch; */ r0 = *((uint32*)(p_ref - (inpitch << 1))); /* load 4 bytes */ p_ref += inpitch; r6 = (r0 >> 8) & 0xFF00FF; /* second and fourth byte */ r0 &= 0xFF00FF; /*p_ref8 = p_ref+(inpitch<<1); r1 = p_ref8[0]; r7 = p_ref8[2]; r1 |= (r7<<16); r7 = p_ref8[1]; r2 = p_ref8[3]; r7 |= (r2<<16);*/ r1 = *((uint32*)(p_ref + (inpitch << 1))); /* r1, r7, ref[3] */ r7 = (r1 >> 8) & 0xFF00FF; r1 &= 0xFF00FF; r0 += r1; r6 += r7; /*r2 = p_ref[0]; r8 = p_ref[2]; r2 |= (r8<<16); r8 = p_ref[1]; r1 = p_ref[3]; r8 |= (r1<<16);*/ r2 = *((uint32*)p_ref); /* r2, r8, ref[1] */ r8 = (r2 >> 8) & 0xFF00FF; r2 &= 0xFF00FF; /*p_ref8 = p_ref-inpitch; r1 = p_ref8[0]; r7 = p_ref8[2]; r1 |= (r7<<16); r1 += r2; r7 = p_ref8[1]; r2 = p_ref8[3]; r7 |= (r2<<16);*/ r1 = *((uint32*)(p_ref - inpitch)); /* r1, r7, ref[0] */ r7 = (r1 >> 8) & 0xFF00FF; r1 &= 0xFF00FF; r1 += r2; r7 += r8; r0 += 20 * r1; r6 += 20 * r7; r0 += 0x100010; r6 += 0x100010; /*p_ref8 = p_ref-(inpitch<<1); r2 = p_ref8[0]; r8 = p_ref8[2]; r2 |= (r8<<16); r8 = p_ref8[1]; r1 = p_ref8[3]; r8 |= (r1<<16);*/ r2 = *((uint32*)(p_ref - (inpitch << 1))); /* r2, r8, ref[-1] */ r8 = (r2 >> 8) & 0xFF00FF; r2 &= 0xFF00FF; /*p_ref8 = p_ref+inpitch; r1 = p_ref8[0]; r7 = p_ref8[2]; r1 |= (r7<<16); r1 += r2; r7 = p_ref8[1]; r2 = p_ref8[3]; r7 |= (r2<<16);*/ r1 = *((uint32*)(p_ref + inpitch)); /* r1, r7, ref[2] */ r7 = (r1 >> 8) & 0xFF00FF; r1 &= 0xFF00FF; r1 += r2; r7 += r8; r0 -= 5 * r1; r6 -= 5 * r7; r0 >>= 5; r6 >>= 5; /* clip */ r13 |= r6; r13 |= r0; //CLIPPACK(r6,result) /* add with horizontal results */ r10 = *((uint32*)(p_tmp8 += 24)); r9 = (r10 >> 8) & 0xFF00FF; r10 &= 0xFF00FF; r0 += r10; r0 += 0x10001; r0 = (r0 >> 1) & 0xFF00FF; /* mask to 8 bytes */ r6 += r9; r6 += 0x10001; r6 = (r6 >> 1) & 0xFF00FF; /* mask to 8 bytes */ r0 |= (r6 << 8); /* pack it back */ *((uint32*)(p_cur += outpitch)) = r0; } p_cur += curr_offset; /* offset to the next pixel */ if (r13 & 0xFF000700) /* this column need clipping */ { p_cur -= 4; for (i = 0; i < 4; i++) { p_ref = in2 + i; p_tmp8 = &(tmp_res[0][j+i]); /* intermediate result */ p_tmp8 -= 24; /* compensate for the first offset */ p_cur -= outpitch; /* compensate for the first offset */ tmp = (uint32)(p_ref + pkres); /* limit */ while ((uint32)p_ref < tmp) /* the loop un-rolled */ { r0 = *(p_ref - (inpitch << 1)); r1 = *(p_ref - inpitch); r2 = *p_ref; r3 = *(p_ref += inpitch); /* modify pointer before loading */ r4 = *(p_ref += inpitch); /* first pixel */ r5 = *(p_ref += inpitch); result = (r0 + r5); r0 = (r1 + r4); result -= (r0 * 5);//result -= r0; result -= (r0<<2); r0 = (r2 + r3); result += (r0 * 20);//result += (r0<<4); result += (r0<<2); result = (result + 16) >> 5; CLIP_RESULT(result) tmp_result = *(p_tmp8 += 24); /* modify pointer before loading */ result = (result + tmp_result + 1); /* no clip */ result = (result >> 1); *(p_cur += outpitch) = result; /* second pixel */ r0 = *(p_ref += inpitch); result = (r1 + r0); r1 = (r2 + r5); result -= (r1 * 5);//result -= r1; result -= (r1<<2); r1 = (r3 + r4); result += (r1 * 20);//result += (r1<<4); result += (r1<<2); result = (result + 16) >> 5; CLIP_RESULT(result) tmp_result = *(p_tmp8 += 24); /* intermediate result */ result = (result + tmp_result + 1); /* no clip */ result = (result >> 1); *(p_cur += outpitch) = result; /* third pixel */ r1 = *(p_ref += inpitch); result = (r2 + r1); r2 = (r3 + r0); result -= (r2 * 5);//result -= r2; result -= (r2<<2); r2 = (r4 + r5); result += (r2 * 20);//result += (r2<<4); result += (r2<<2); result = (result + 16) >> 5; CLIP_RESULT(result) tmp_result = *(p_tmp8 += 24); /* intermediate result */ result = (result + tmp_result + 1); /* no clip */ result = (result >> 1); *(p_cur += outpitch) = result; /* fourth pixel */ r2 = *(p_ref += inpitch); result = (r3 + r2); r3 = (r4 + r1); result -= (r3 * 5);//result -= r3; result -= (r3<<2); r3 = (r5 + r0); result += (r3 * 20);//result += (r3<<4); result += (r3<<2); result = (result + 16) >> 5; CLIP_RESULT(result) tmp_result = *(p_tmp8 += 24); /* intermediate result */ result = (result + tmp_result + 1); /* no clip */ result = (result >> 1); *(p_cur += outpitch) = result; p_ref -= (inpitch << 1); /* move back to center of the filter of the next one */ } p_cur += (curr_offset - 3); } } } return ; } /* position G */ void FullPelMC(uint8 *in, int inpitch, uint8 *out, int outpitch, int blkwidth, int blkheight) { int i, j; int offset_in = inpitch - blkwidth; int offset_out = outpitch - blkwidth; uint32 temp; uint8 byte; if (((uint32)in)&3) { for (j = blkheight; j > 0; j--) { for (i = blkwidth; i > 0; i -= 4) { temp = *in++; byte = *in++; temp |= (byte << 8); byte = *in++; temp |= (byte << 16); byte = *in++; temp |= (byte << 24); *((uint32*)out) = temp; /* write 4 bytes */ out += 4; } out += offset_out; in += offset_in; } } else { for (j = blkheight; j > 0; j--) { for (i = blkwidth; i > 0; i -= 4) { temp = *((uint32*)in); *((uint32*)out) = temp; in += 4; out += 4; } out += offset_out; in += offset_in; } } return ; } void ChromaMotionComp(uint8 *ref, int picwidth, int picheight, int x_pos, int y_pos, uint8 *pred, int pred_pitch, int blkwidth, int blkheight) { int dx, dy; int offset_dx, offset_dy; int index; uint8 temp[24][24]; dx = x_pos & 7; dy = y_pos & 7; offset_dx = (dx + 7) >> 3; offset_dy = (dy + 7) >> 3; x_pos = x_pos >> 3; /* round it to full-pel resolution */ y_pos = y_pos >> 3; if ((x_pos >= 0 && x_pos + blkwidth + offset_dx <= picwidth) && (y_pos >= 0 && y_pos + blkheight + offset_dy <= picheight)) { ref += y_pos * picwidth + x_pos; } else { CreatePad(ref, picwidth, picheight, x_pos, y_pos, &temp[0][0], blkwidth + offset_dx, blkheight + offset_dy); ref = &temp[0][0]; picwidth = 24; } index = offset_dx + (offset_dy << 1) + ((blkwidth << 1) & 0x7); (*(ChromaMC_SIMD[index]))(ref, picwidth , dx, dy, pred, pred_pitch, blkwidth, blkheight); return ; } /* SIMD routines, unroll the loops in vertical direction, decreasing loops (things to be done) */ void ChromaDiagonalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy, uint8 *pOut, int predPitch, int blkwidth, int blkheight) { int32 r0, r1, r2, r3, result0, result1; uint8 temp[288]; uint8 *ref, *out; int i, j; int dx_8 = 8 - dx; int dy_8 = 8 - dy; /* horizontal first */ out = temp; for (i = 0; i < blkheight + 1; i++) { ref = pRef; r0 = ref[0]; for (j = 0; j < blkwidth; j += 4) { r0 |= (ref[2] << 16); result0 = dx_8 * r0; r1 = ref[1] | (ref[3] << 16); result0 += dx * r1; *(int32 *)out = result0; result0 = dx_8 * r1; r2 = ref[4]; r0 = r0 >> 16; r1 = r0 | (r2 << 16); result0 += dx * r1; *(int32 *)(out + 16) = result0; ref += 4; out += 4; r0 = r2; } pRef += srcPitch; out += (32 - blkwidth); } // pRef -= srcPitch*(blkheight+1); ref = temp; for (j = 0; j < blkwidth; j += 4) { r0 = *(int32 *)ref; r1 = *(int32 *)(ref + 16); ref += 32; out = pOut; for (i = 0; i < (blkheight >> 1); i++) { result0 = dy_8 * r0 + 0x00200020; r2 = *(int32 *)ref; result0 += dy * r2; result0 >>= 6; result0 &= 0x00FF00FF; r0 = r2; result1 = dy_8 * r1 + 0x00200020; r3 = *(int32 *)(ref + 16); result1 += dy * r3; result1 >>= 6; result1 &= 0x00FF00FF; r1 = r3; *(int32 *)out = result0 | (result1 << 8); out += predPitch; ref += 32; result0 = dy_8 * r0 + 0x00200020; r2 = *(int32 *)ref; result0 += dy * r2; result0 >>= 6; result0 &= 0x00FF00FF; r0 = r2; result1 = dy_8 * r1 + 0x00200020; r3 = *(int32 *)(ref + 16); result1 += dy * r3; result1 >>= 6; result1 &= 0x00FF00FF; r1 = r3; *(int32 *)out = result0 | (result1 << 8); out += predPitch; ref += 32; } pOut += 4; ref = temp + 4; /* since it can only iterate twice max */ } return; } void ChromaHorizontalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy, uint8 *pOut, int predPitch, int blkwidth, int blkheight) { OSCL_UNUSED_ARG(dy); int32 r0, r1, r2, result0, result1; uint8 *ref, *out; int i, j; int dx_8 = 8 - dx; /* horizontal first */ for (i = 0; i < blkheight; i++) { ref = pRef; out = pOut; r0 = ref[0]; for (j = 0; j < blkwidth; j += 4) { r0 |= (ref[2] << 16); result0 = dx_8 * r0 + 0x00040004; r1 = ref[1] | (ref[3] << 16); result0 += dx * r1; result0 >>= 3; result0 &= 0x00FF00FF; result1 = dx_8 * r1 + 0x00040004; r2 = ref[4]; r0 = r0 >> 16; r1 = r0 | (r2 << 16); result1 += dx * r1; result1 >>= 3; result1 &= 0x00FF00FF; *(int32 *)out = result0 | (result1 << 8); ref += 4; out += 4; r0 = r2; } pRef += srcPitch; pOut += predPitch; } return; } void ChromaVerticalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy, uint8 *pOut, int predPitch, int blkwidth, int blkheight) { OSCL_UNUSED_ARG(dx); int32 r0, r1, r2, r3, result0, result1; int i, j; uint8 *ref, *out; int dy_8 = 8 - dy; /* vertical first */ for (i = 0; i < blkwidth; i += 4) { ref = pRef; out = pOut; r0 = ref[0] | (ref[2] << 16); r1 = ref[1] | (ref[3] << 16); ref += srcPitch; for (j = 0; j < blkheight; j++) { result0 = dy_8 * r0 + 0x00040004; r2 = ref[0] | (ref[2] << 16); result0 += dy * r2; result0 >>= 3; result0 &= 0x00FF00FF; r0 = r2; result1 = dy_8 * r1 + 0x00040004; r3 = ref[1] | (ref[3] << 16); result1 += dy * r3; result1 >>= 3; result1 &= 0x00FF00FF; r1 = r3; *(int32 *)out = result0 | (result1 << 8); ref += srcPitch; out += predPitch; } pOut += 4; pRef += 4; } return; } void ChromaDiagonalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy, uint8 *pOut, int predPitch, int blkwidth, int blkheight) { OSCL_UNUSED_ARG(blkwidth); int32 r0, r1, temp0, temp1, result; int32 temp[9]; int32 *out; int i, r_temp; int dy_8 = 8 - dy; /* horizontal first */ out = temp; for (i = 0; i < blkheight + 1; i++) { r_temp = pRef[1]; temp0 = (pRef[0] << 3) + dx * (r_temp - pRef[0]); temp1 = (r_temp << 3) + dx * (pRef[2] - r_temp); r0 = temp0 | (temp1 << 16); *out++ = r0; pRef += srcPitch; } pRef -= srcPitch * (blkheight + 1); out = temp; r0 = *out++; for (i = 0; i < blkheight; i++) { result = dy_8 * r0 + 0x00200020; r1 = *out++; result += dy * r1; result >>= 6; result &= 0x00FF00FF; *(int16 *)pOut = (result >> 8) | (result & 0xFF); r0 = r1; pOut += predPitch; } return; } void ChromaHorizontalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy, uint8 *pOut, int predPitch, int blkwidth, int blkheight) { OSCL_UNUSED_ARG(dy); OSCL_UNUSED_ARG(blkwidth); int i, temp, temp0, temp1; /* horizontal first */ for (i = 0; i < blkheight; i++) { temp = pRef[1]; temp0 = ((pRef[0] << 3) + dx * (temp - pRef[0]) + 4) >> 3; temp1 = ((temp << 3) + dx * (pRef[2] - temp) + 4) >> 3; *(int16 *)pOut = temp0 | (temp1 << 8); pRef += srcPitch; pOut += predPitch; } return; } void ChromaVerticalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy, uint8 *pOut, int predPitch, int blkwidth, int blkheight) { OSCL_UNUSED_ARG(dx); OSCL_UNUSED_ARG(blkwidth); int32 r0, r1, result; int i; int dy_8 = 8 - dy; r0 = pRef[0] | (pRef[1] << 16); pRef += srcPitch; for (i = 0; i < blkheight; i++) { result = dy_8 * r0 + 0x00040004; r1 = pRef[0] | (pRef[1] << 16); result += dy * r1; result >>= 3; result &= 0x00FF00FF; *(int16 *)pOut = (result >> 8) | (result & 0xFF); r0 = r1; pRef += srcPitch; pOut += predPitch; } return; } void ChromaFullMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy, uint8 *pOut, int predPitch, int blkwidth, int blkheight) { OSCL_UNUSED_ARG(dx); OSCL_UNUSED_ARG(dy); int i, j; int offset_in = srcPitch - blkwidth; int offset_out = predPitch - blkwidth; uint16 temp; uint8 byte; if (((uint32)pRef)&1) { for (j = blkheight; j > 0; j--) { for (i = blkwidth; i > 0; i -= 2) { temp = *pRef++; byte = *pRef++; temp |= (byte << 8); *((uint16*)pOut) = temp; /* write 2 bytes */ pOut += 2; } pOut += offset_out; pRef += offset_in; } } else { for (j = blkheight; j > 0; j--) { for (i = blkwidth; i > 0; i -= 2) { temp = *((uint16*)pRef); *((uint16*)pOut) = temp; pRef += 2; pOut += 2; } pOut += offset_out; pRef += offset_in; } } return ; } ================================================ FILE: RtspCamera/jni/avc_h264/dec/src/pred_intra.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "avcdec_lib.h" #include "oscl_mem.h" #define CLIP_COMP *comp++ = (uint8)(((uint)temp>0xFF)? 0xFF&(~(temp>>31)): temp) #define CLIP_RESULT(x) if((uint)x > 0xFF){ \ x = 0xFF & (~(x>>31));} /* We should combine the Intra4x4 functions with residual decoding and compensation */ AVCStatus IntraMBPrediction(AVCCommonObj *video) { int component, SubBlock_indx, temp; AVCStatus status; AVCMacroblock *currMB = video->currMB; AVCPictureData *currPic = video->currPic; uint8 *curL, *curCb, *curCr; uint8 *comp; int block_x, block_y, offset; int16 *dataBlock = video->block; uint8 *predCb, *predCr; #ifdef USE_PRED_BLOCK uint8 *pred; #endif int pitch = currPic->pitch; uint32 cbp4x4 = video->cbp4x4; offset = (video->mb_y << 4) * pitch + (video->mb_x << 4); curL = currPic->Sl + offset; #ifdef USE_PRED_BLOCK video->pred_block = video->pred + 84; /* point to separate prediction memory */ pred = video->pred_block; video->pred_pitch = 20; #else video->pred_block = curL; /* point directly to the frame buffer */ video->pred_pitch = pitch; #endif if (currMB->mbMode == AVC_I4) { /* luminance first */ block_x = block_y = 0; for (component = 0; component < 4; component++) { block_x = ((component & 1) << 1); block_y = ((component >> 1) << 1); comp = curL;// + (block_x<<2) + (block_y<<2)*currPic->pitch; for (SubBlock_indx = 0; SubBlock_indx < 4; SubBlock_indx++) { status = Intra_4x4(video, block_x, block_y, comp); if (status != AVC_SUCCESS) { return status; } /* transform following the 4x4 prediction, can't be SIMD with other blocks. */ #ifdef USE_PRED_BLOCK if (cbp4x4&(1 << ((block_y << 2) + block_x))) { itrans(dataBlock, pred, pred, 20); } #else if (cbp4x4&(1 << ((block_y << 2) + block_x))) { itrans(dataBlock, comp, comp, pitch); } #endif temp = SubBlock_indx & 1; if (temp) { block_y++; block_x--; dataBlock += 60; #ifdef USE_PRED_BLOCK pred += 76; #else comp += ((pitch << 2) - 4); #endif } else { block_x++; dataBlock += 4; #ifdef USE_PRED_BLOCK pred += 4; #else comp += 4; #endif } } if (component&1) { #ifdef USE_PRED_BLOCK pred -= 8; #else curL += (pitch << 3) - 8; #endif dataBlock -= 8; } else { #ifdef USE_PRED_BLOCK pred -= 152; #else curL += 8; #endif dataBlock -= 120; } } cbp4x4 >>= 16; } else /* AVC_I16 */ { #ifdef MB_BASED_DEBLOCK video->pintra_pred_top = video->intra_pred_top + (video->mb_x << 4); video->pintra_pred_left = video->intra_pred_left + 1; video->intra_pred_topleft = video->intra_pred_left[0]; pitch = 1; #else video->pintra_pred_top = curL - pitch; video->pintra_pred_left = curL - 1; if (video->mb_y) { video->intra_pred_topleft = *(curL - pitch - 1); } #endif switch (currMB->i16Mode) { case AVC_I16_Vertical: /* Intra_16x16_Vertical */ /* check availability of top */ if (video->intraAvailB) { Intra_16x16_Vertical(video); } else { return AVC_FAIL; } break; case AVC_I16_Horizontal: /* Intra_16x16_Horizontal */ /* check availability of left */ if (video->intraAvailA) { Intra_16x16_Horizontal(video, pitch); } else { return AVC_FAIL; } break; case AVC_I16_DC: /* Intra_16x16_DC */ Intra_16x16_DC(video, pitch); break; case AVC_I16_Plane: /* Intra_16x16_Plane */ if (video->intraAvailA && video->intraAvailB && video->intraAvailD) { Intra_16x16_Plane(video, pitch); } else { return AVC_FAIL; } break; default: break; } pitch = currPic->pitch; /* transform */ /* can go in raster scan order now */ /* can be done in SIMD, */ for (block_y = 4; block_y > 0; block_y--) { for (block_x = 4; block_x > 0; block_x--) { #ifdef USE_PRED_BLOCK if (cbp4x4&1) { itrans(dataBlock, pred, pred, 20); } #else if (cbp4x4&1) { itrans(dataBlock, curL, curL, pitch); } #endif cbp4x4 >>= 1; dataBlock += 4; #ifdef USE_PRED_BLOCK pred += 4; #else curL += 4; #endif } dataBlock += 48; #ifdef USE_PRED_BLOCK pred += 64; #else curL += ((pitch << 2) - 16); #endif } } offset = (offset >> 2) + (video->mb_x << 2); //((video->mb_y << 3)* pitch + (video->mb_x << 3)); curCb = currPic->Scb + offset; curCr = currPic->Scr + offset; #ifdef MB_BASED_DEBLOCK video->pintra_pred_top_cb = video->intra_pred_top_cb + (video->mb_x << 3); video->pintra_pred_left_cb = video->intra_pred_left_cb + 1; video->intra_pred_topleft_cb = video->intra_pred_left_cb[0]; video->pintra_pred_top_cr = video->intra_pred_top_cr + (video->mb_x << 3); video->pintra_pred_left_cr = video->intra_pred_left_cr + 1; video->intra_pred_topleft_cr = video->intra_pred_left_cr[0]; pitch = 1; #else pitch >>= 1; video->pintra_pred_top_cb = curCb - pitch; video->pintra_pred_left_cb = curCb - 1; video->pintra_pred_top_cr = curCr - pitch; video->pintra_pred_left_cr = curCr - 1; if (video->mb_y) { video->intra_pred_topleft_cb = *(curCb - pitch - 1); video->intra_pred_topleft_cr = *(curCr - pitch - 1); } #endif #ifdef USE_PRED_BLOCK predCb = video->pred + 452; predCr = predCb + 144; video->pred_pitch = 12; #else predCb = curCb; predCr = curCr; video->pred_pitch = currPic->pitch >> 1; #endif /* chrominance */ switch (currMB->intra_chroma_pred_mode) { case AVC_IC_DC: /* Intra_Chroma_DC */ Intra_Chroma_DC(video, pitch, predCb, predCr); break; case AVC_IC_Horizontal: /* Intra_Chroma_Horizontal */ if (video->intraAvailA) { /* check availability of left */ Intra_Chroma_Horizontal(video, pitch, predCb, predCr); } else { return AVC_FAIL; } break; case AVC_IC_Vertical: /* Intra_Chroma_Vertical */ if (video->intraAvailB) { /* check availability of top */ Intra_Chroma_Vertical(video, predCb, predCr); } else { return AVC_FAIL; } break; case AVC_IC_Plane: /* Intra_Chroma_Plane */ if (video->intraAvailA && video->intraAvailB && video->intraAvailD) { /* check availability of top and left */ Intra_Chroma_Plane(video, pitch, predCb, predCr); } else { return AVC_FAIL; } break; default: break; } /* transform, done in raster scan manner */ pitch = currPic->pitch >> 1; for (block_y = 2; block_y > 0; block_y--) { for (block_x = 2; block_x > 0; block_x--) { #ifdef USE_PRED_BLOCK if (cbp4x4&1) { ictrans(dataBlock, predCb, predCb, 12); } #else if (cbp4x4&1) { ictrans(dataBlock, curCb, curCb, pitch); } #endif cbp4x4 >>= 1; dataBlock += 4; #ifdef USE_PRED_BLOCK predCb += 4; #else curCb += 4; #endif } for (block_x = 2; block_x > 0; block_x--) { #ifdef USE_PRED_BLOCK if (cbp4x4&1) { ictrans(dataBlock, predCr, predCr, 12); } #else if (cbp4x4&1) { ictrans(dataBlock, curCr, curCr, pitch); } #endif cbp4x4 >>= 1; dataBlock += 4; #ifdef USE_PRED_BLOCK predCr += 4; #else curCr += 4; #endif } dataBlock += 48; #ifdef USE_PRED_BLOCK predCb += 40; predCr += 40; #else curCb += ((pitch << 2) - 8); curCr += ((pitch << 2) - 8); #endif } #ifdef MB_BASED_DEBLOCK SaveNeighborForIntraPred(video, offset); #endif return AVC_SUCCESS; } #ifdef MB_BASED_DEBLOCK void SaveNeighborForIntraPred(AVCCommonObj *video, int offset) { AVCPictureData *currPic = video->currPic; int pitch; uint8 *pred, *predCb, *predCr; uint8 *tmp_ptr, tmp_byte; uint32 tmp_word; int mb_x = video->mb_x; /* save the value for intra prediction */ #ifdef USE_PRED_BLOCK pitch = 20; pred = video->pred + 384; /* bottom line for Y */ predCb = pred + 152; /* bottom line for Cb */ predCr = predCb + 144; /* bottom line for Cr */ #else pitch = currPic->pitch; tmp_word = offset + (pitch << 2) - (pitch >> 1); predCb = currPic->Scb + tmp_word;/* bottom line for Cb */ predCr = currPic->Scr + tmp_word;/* bottom line for Cr */ offset = (offset << 2) - (mb_x << 4); pred = currPic->Sl + offset + (pitch << 4) - pitch;/* bottom line for Y */ #endif video->intra_pred_topleft = video->intra_pred_top[(mb_x<<4)+15]; video->intra_pred_topleft_cb = video->intra_pred_top_cb[(mb_x<<3)+7]; video->intra_pred_topleft_cr = video->intra_pred_top_cr[(mb_x<<3)+7]; /* then copy to video->intra_pred_top, intra_pred_top_cb, intra_pred_top_cr */ /*oscl_memcpy(video->intra_pred_top + (mb_x<<4), pred, 16); oscl_memcpy(video->intra_pred_top_cb + (mb_x<<3), predCb, 8); oscl_memcpy(video->intra_pred_top_cr + (mb_x<<3), predCr, 8);*/ tmp_ptr = video->intra_pred_top + (mb_x << 4); *((uint32*)tmp_ptr) = *((uint32*)pred); *((uint32*)(tmp_ptr + 4)) = *((uint32*)(pred + 4)); *((uint32*)(tmp_ptr + 8)) = *((uint32*)(pred + 8)); *((uint32*)(tmp_ptr + 12)) = *((uint32*)(pred + 12)); tmp_ptr = video->intra_pred_top_cb + (mb_x << 3); *((uint32*)tmp_ptr) = *((uint32*)predCb); *((uint32*)(tmp_ptr + 4)) = *((uint32*)(predCb + 4)); tmp_ptr = video->intra_pred_top_cr + (mb_x << 3); *((uint32*)tmp_ptr) = *((uint32*)predCr); *((uint32*)(tmp_ptr + 4)) = *((uint32*)(predCr + 4)); /* now save last column */ #ifdef USE_PRED_BLOCK pred = video->pred + 99; /* last column*/ #else pred -= ((pitch << 4) - pitch - 15); /* last column */ #endif tmp_ptr = video->intra_pred_left; tmp_word = video->intra_pred_topleft; tmp_byte = *(pred); tmp_word |= (tmp_byte << 8); tmp_byte = *(pred += pitch); tmp_word |= (tmp_byte << 16); tmp_byte = *(pred += pitch); tmp_word |= (tmp_byte << 24); *((uint32*)tmp_ptr) = tmp_word; tmp_word = *(pred += pitch); tmp_byte = *(pred += pitch); tmp_word |= (tmp_byte << 8); tmp_byte = *(pred += pitch); tmp_word |= (tmp_byte << 16); tmp_byte = *(pred += pitch); tmp_word |= (tmp_byte << 24); *((uint32*)(tmp_ptr += 4)) = tmp_word; tmp_word = *(pred += pitch); tmp_byte = *(pred += pitch); tmp_word |= (tmp_byte << 8); tmp_byte = *(pred += pitch); tmp_word |= (tmp_byte << 16); tmp_byte = *(pred += pitch); tmp_word |= (tmp_byte << 24); *((uint32*)(tmp_ptr += 4)) = tmp_word; tmp_word = *(pred += pitch); tmp_byte = *(pred += pitch); tmp_word |= (tmp_byte << 8); tmp_byte = *(pred += pitch); tmp_word |= (tmp_byte << 16); tmp_byte = *(pred += pitch); tmp_word |= (tmp_byte << 24); *((uint32*)(tmp_ptr += 4)) = tmp_word; *(tmp_ptr += 4) = *(pred += pitch); /* now for Cb */ #ifdef USE_PRED_BLOCK predCb = video->pred + 459; pitch = 12; #else pitch >>= 1; predCb -= (7 * pitch - 7); #endif tmp_ptr = video->intra_pred_left_cb; tmp_word = video->intra_pred_topleft_cb; tmp_byte = *(predCb); tmp_word |= (tmp_byte << 8); tmp_byte = *(predCb += pitch); tmp_word |= (tmp_byte << 16); tmp_byte = *(predCb += pitch); tmp_word |= (tmp_byte << 24); *((uint32*)tmp_ptr) = tmp_word; tmp_word = *(predCb += pitch); tmp_byte = *(predCb += pitch); tmp_word |= (tmp_byte << 8); tmp_byte = *(predCb += pitch); tmp_word |= (tmp_byte << 16); tmp_byte = *(predCb += pitch); tmp_word |= (tmp_byte << 24); *((uint32*)(tmp_ptr += 4)) = tmp_word; *(tmp_ptr += 4) = *(predCb += pitch); /* now for Cr */ #ifdef USE_PRED_BLOCK predCr = video->pred + 603; #else predCr -= (7 * pitch - 7); #endif tmp_ptr = video->intra_pred_left_cr; tmp_word = video->intra_pred_topleft_cr; tmp_byte = *(predCr); tmp_word |= (tmp_byte << 8); tmp_byte = *(predCr += pitch); tmp_word |= (tmp_byte << 16); tmp_byte = *(predCr += pitch); tmp_word |= (tmp_byte << 24); *((uint32*)tmp_ptr) = tmp_word; tmp_word = *(predCr += pitch); tmp_byte = *(predCr += pitch); tmp_word |= (tmp_byte << 8); tmp_byte = *(predCr += pitch); tmp_word |= (tmp_byte << 16); tmp_byte = *(predCr += pitch); tmp_word |= (tmp_byte << 24); *((uint32*)(tmp_ptr += 4)) = tmp_word; *(tmp_ptr += 4) = *(predCr += pitch); return ; } #endif /* MB_BASED_DEBLOCK */ AVCStatus Intra_4x4(AVCCommonObj *video, int block_x, int block_y, uint8 *comp) { AVCMacroblock *currMB = video->currMB; int block_offset; AVCNeighborAvailability availability; int pitch = video->currPic->pitch; #ifdef USE_PRED_BLOCK block_offset = (block_y * 80) + (block_x << 2); #else block_offset = (block_y << 2) * pitch + (block_x << 2); #endif #ifdef MB_BASED_DEBLOCK /* boundary blocks use video->pred_intra_top, pred_intra_left, pred_intra_topleft */ if (!block_x) { video->pintra_pred_left = video->intra_pred_left + 1 + (block_y << 2); pitch = 1; } else { video->pintra_pred_left = video->pred_block + block_offset - 1; pitch = video->pred_pitch; } if (!block_y) { video->pintra_pred_top = video->intra_pred_top + (block_x << 2) + (video->mb_x << 4); } else { video->pintra_pred_top = video->pred_block + block_offset - video->pred_pitch; } if (!block_x) { video->intra_pred_topleft = video->intra_pred_left[block_y<<2]; } else if (!block_y) { video->intra_pred_topleft = video->intra_pred_top[(video->mb_x<<4)+(block_x<<2)-1]; } else { video->intra_pred_topleft = video->pred_block[block_offset - video->pred_pitch - 1]; } #else /* normal case */ video->pintra_pred_top = comp - pitch; video->pintra_pred_left = comp - 1; if (video->mb_y || block_y) { video->intra_pred_topleft = *(comp - pitch - 1); } #endif switch (currMB->i4Mode[(block_y << 2) + block_x]) { case AVC_I4_Vertical: /* Intra_4x4_Vertical */ if (block_y > 0 || video->intraAvailB)/* to prevent out-of-bound access*/ { Intra_4x4_Vertical(video, block_offset); } else { return AVC_FAIL; } break; case AVC_I4_Horizontal: /* Intra_4x4_Horizontal */ if (block_x || video->intraAvailA) /* to prevent out-of-bound access */ { Intra_4x4_Horizontal(video, pitch, block_offset); } else { return AVC_FAIL; } break; case AVC_I4_DC: /* Intra_4x4_DC */ availability.left = TRUE; availability.top = TRUE; if (!block_y) { /* check availability up */ availability.top = video->intraAvailB ; } if (!block_x) { /* check availability left */ availability.left = video->intraAvailA ; } Intra_4x4_DC(video, pitch, block_offset, &availability); break; case AVC_I4_Diagonal_Down_Left: /* Intra_4x4_Diagonal_Down_Left */ /* lookup table will be more appropriate for this case */ if (block_y == 0 && !video->intraAvailB) { return AVC_FAIL; } availability.top_right = BlkTopRight[(block_y<<2) + block_x]; if (availability.top_right == 2) { availability.top_right = video->intraAvailB; } else if (availability.top_right == 3) { availability.top_right = video->intraAvailC; } Intra_4x4_Down_Left(video, block_offset, &availability); break; case AVC_I4_Diagonal_Down_Right: /* Intra_4x4_Diagonal_Down_Right */ if ((block_y && block_x) /* to prevent out-of-bound access */ || (block_y && video->intraAvailA) || (block_x && video->intraAvailB) || (video->intraAvailA && video->intraAvailD && video->intraAvailB)) { Intra_4x4_Diagonal_Down_Right(video, pitch, block_offset); } else { return AVC_FAIL; } break; case AVC_I4_Vertical_Right: /* Intra_4x4_Vertical_Right */ if ((block_y && block_x) /* to prevent out-of-bound access */ || (block_y && video->intraAvailA) || (block_x && video->intraAvailB) || (video->intraAvailA && video->intraAvailD && video->intraAvailB)) { Intra_4x4_Diagonal_Vertical_Right(video, pitch, block_offset); } else { return AVC_FAIL; } break; case AVC_I4_Horizontal_Down: /* Intra_4x4_Horizontal_Down */ if ((block_y && block_x) /* to prevent out-of-bound access */ || (block_y && video->intraAvailA) || (block_x && video->intraAvailB) || (video->intraAvailA && video->intraAvailD && video->intraAvailB)) { Intra_4x4_Diagonal_Horizontal_Down(video, pitch, block_offset); } else { return AVC_FAIL; } break; case AVC_I4_Vertical_Left: /* Intra_4x4_Vertical_Left */ /* lookup table may be more appropriate for this case */ if (block_y == 0 && !video->intraAvailB) { return AVC_FAIL; } availability.top_right = BlkTopRight[(block_y<<2) + block_x]; if (availability.top_right == 2) { availability.top_right = video->intraAvailB; } else if (availability.top_right == 3) { availability.top_right = video->intraAvailC; } Intra_4x4_Vertical_Left(video, block_offset, &availability); break; case AVC_I4_Horizontal_Up: /* Intra_4x4_Horizontal_Up */ if (block_x || video->intraAvailA) { Intra_4x4_Horizontal_Up(video, pitch, block_offset); } else { return AVC_FAIL; } break; default: break; } return AVC_SUCCESS; } /* =============================== BEGIN 4x4 MODES======================================*/ void Intra_4x4_Vertical(AVCCommonObj *video, int block_offset) { uint8 *comp_ref = video->pintra_pred_top; uint32 temp; uint8 *pred = video->pred_block + block_offset; int pred_pitch = video->pred_pitch; /*P = (int) *comp_ref++; Q = (int) *comp_ref++; R = (int) *comp_ref++; S = (int) *comp_ref++; temp = S|(R<<8)|(Q<<16)|(P<<24);*/ temp = *((uint32*)comp_ref); *((uint32*)pred) = temp; /* write 4 at a time */ pred += pred_pitch; *((uint32*)pred) = temp; pred += pred_pitch; *((uint32*)pred) = temp; pred += pred_pitch; *((uint32*)pred) = temp; return ; } void Intra_4x4_Horizontal(AVCCommonObj *video, int pitch, int block_offset) { uint8 *comp_ref = video->pintra_pred_left; uint32 temp; int P; uint8 *pred = video->pred_block + block_offset; int pred_pitch = video->pred_pitch; P = *comp_ref; temp = P | (P << 8); temp = temp | (temp << 16); *((uint32*)pred) = temp; pred += pred_pitch; comp_ref += pitch; P = *comp_ref; temp = P | (P << 8); temp = temp | (temp << 16); *((uint32*)pred) = temp; pred += pred_pitch; comp_ref += pitch; P = *comp_ref; temp = P | (P << 8); temp = temp | (temp << 16); *((uint32*)pred) = temp; pred += pred_pitch; comp_ref += pitch; P = *comp_ref; temp = P | (P << 8); temp = temp | (temp << 16); *((uint32*)pred) = temp; return ; } void Intra_4x4_DC(AVCCommonObj *video, int pitch, int block_offset, AVCNeighborAvailability *availability) { uint8 *comp_ref = video->pintra_pred_left; uint32 temp; int DC; uint8 *pred = video->pred_block + block_offset; int pred_pitch = video->pred_pitch; if (availability->left) { DC = *comp_ref; comp_ref += pitch; DC += *comp_ref; comp_ref += pitch; DC += *comp_ref; comp_ref += pitch; DC += *comp_ref; comp_ref = video->pintra_pred_top; if (availability->top) { DC = (comp_ref[0] + comp_ref[1] + comp_ref[2] + comp_ref[3] + DC + 4) >> 3; } else { DC = (DC + 2) >> 2; } } else if (availability->top) { comp_ref = video->pintra_pred_top; DC = (comp_ref[0] + comp_ref[1] + comp_ref[2] + comp_ref[3] + 2) >> 2; } else { DC = 128; } temp = DC | (DC << 8); temp = temp | (temp << 16); *((uint32*)pred) = temp; pred += pred_pitch; *((uint32*)pred) = temp; pred += pred_pitch; *((uint32*)pred) = temp; pred += pred_pitch; *((uint32*)pred) = temp; return ; } void Intra_4x4_Down_Left(AVCCommonObj *video, int block_offset, AVCNeighborAvailability *availability) { uint8 *comp_refx = video->pintra_pred_top; uint32 temp; int r0, r1, r2, r3, r4, r5, r6, r7; uint8 *pred = video->pred_block + block_offset; int pred_pitch = video->pred_pitch; r0 = *comp_refx++; r1 = *comp_refx++; r2 = *comp_refx++; r3 = *comp_refx++; if (availability->top_right) { r4 = *comp_refx++; r5 = *comp_refx++; r6 = *comp_refx++; r7 = *comp_refx++; } else { r4 = r3; r5 = r3; r6 = r3; r7 = r3; } r0 += (r1 << 1); r0 += r2; r0 += 2; r0 >>= 2; r1 += (r2 << 1); r1 += r3; r1 += 2; r1 >>= 2; r2 += (r3 << 1); r2 += r4; r2 += 2; r2 >>= 2; r3 += (r4 << 1); r3 += r5; r3 += 2; r3 >>= 2; r4 += (r5 << 1); r4 += r6; r4 += 2; r4 >>= 2; r5 += (r6 << 1); r5 += r7; r5 += 2; r5 >>= 2; r6 += (3 * r7); r6 += 2; r6 >>= 2; temp = r0 | (r1 << 8); temp |= (r2 << 16); temp |= (r3 << 24); *((uint32*)pred) = temp; pred += pred_pitch; temp = (temp >> 8) | (r4 << 24); *((uint32*)pred) = temp; pred += pred_pitch; temp = (temp >> 8) | (r5 << 24); *((uint32*)pred) = temp; pred += pred_pitch; temp = (temp >> 8) | (r6 << 24); *((uint32*)pred) = temp; return ; } void Intra_4x4_Diagonal_Down_Right(AVCCommonObj *video, int pitch, int block_offset) { uint8 *comp_refx = video->pintra_pred_top; uint8 *comp_refy = video->pintra_pred_left; uint32 temp; int P_x, Q_x, R_x, P_y, Q_y, R_y, D; int x0, x1, x2; uint8 *pred = video->pred_block + block_offset; int pred_pitch = video->pred_pitch; temp = *((uint32*)comp_refx); /* read 4 bytes */ x0 = temp & 0xFF; x1 = (temp >> 8) & 0xFF; x2 = (temp >> 16) & 0xFF; Q_x = (x0 + 2 * x1 + x2 + 2) >> 2; R_x = (x1 + 2 * x2 + (temp >> 24) + 2) >> 2; x2 = video->intra_pred_topleft; /* re-use x2 instead of y0 */ P_x = (x2 + 2 * x0 + x1 + 2) >> 2; x1 = *comp_refy; comp_refy += pitch; /* re-use x1 instead of y1 */ D = (x0 + 2 * x2 + x1 + 2) >> 2; x0 = *comp_refy; comp_refy += pitch; /* re-use x0 instead of y2 */ P_y = (x2 + 2 * x1 + x0 + 2) >> 2; x2 = *comp_refy; comp_refy += pitch; /* re-use x2 instead of y3 */ Q_y = (x1 + 2 * x0 + x2 + 2) >> 2; x1 = *comp_refy; /* re-use x1 instead of y4 */ R_y = (x0 + 2 * x2 + x1 + 2) >> 2; /* we can pack these */ temp = D | (P_x << 8); //[D P_x Q_x R_x] //[P_y D P_x Q_x] temp |= (Q_x << 16); //[Q_y P_y D P_x] temp |= (R_x << 24); //[R_y Q_y P_y D ] *((uint32*)pred) = temp; pred += pred_pitch; temp = P_y | (D << 8); temp |= (P_x << 16); temp |= (Q_x << 24); *((uint32*)pred) = temp; pred += pred_pitch; temp = Q_y | (P_y << 8); temp |= (D << 16); temp |= (P_x << 24); *((uint32*)pred) = temp; pred += pred_pitch; temp = R_y | (Q_y << 8); temp |= (P_y << 16); temp |= (D << 24); *((uint32*)pred) = temp; return ; } void Intra_4x4_Diagonal_Vertical_Right(AVCCommonObj *video, int pitch, int block_offset) { uint8 *comp_refx = video->pintra_pred_top; uint8 *comp_refy = video->pintra_pred_left; uint32 temp; int P0, Q0, R0, S0, P1, Q1, R1, P2, Q2, D; int x0, x1, x2; uint8 *pred = video->pred_block + block_offset; int pred_pitch = video->pred_pitch; x0 = *comp_refx++; x1 = *comp_refx++; Q0 = x0 + x1 + 1; x2 = *comp_refx++; R0 = x1 + x2 + 1; x1 = *comp_refx++; /* reuse x1 instead of x3 */ S0 = x2 + x1 + 1; x1 = video->intra_pred_topleft; /* reuse x1 instead of y0 */ P0 = x1 + x0 + 1; x2 = *comp_refy; comp_refy += pitch; /* reuse x2 instead of y1 */ D = (x2 + 2 * x1 + x0 + 2) >> 2; P1 = (P0 + Q0) >> 2; Q1 = (Q0 + R0) >> 2; R1 = (R0 + S0) >> 2; P0 >>= 1; Q0 >>= 1; R0 >>= 1; S0 >>= 1; x0 = *comp_refy; comp_refy += pitch; /* reuse x0 instead of y2 */ P2 = (x1 + 2 * x2 + x0 + 2) >> 2; x1 = *comp_refy; comp_refy += pitch; /* reuse x1 instead of y3 */ Q2 = (x2 + 2 * x0 + x1 + 2) >> 2; temp = P0 | (Q0 << 8); //[P0 Q0 R0 S0] //[D P1 Q1 R1] temp |= (R0 << 16); //[P2 P0 Q0 R0] temp |= (S0 << 24); //[Q2 D P1 Q1] *((uint32*)pred) = temp; pred += pred_pitch; temp = D | (P1 << 8); temp |= (Q1 << 16); temp |= (R1 << 24); *((uint32*)pred) = temp; pred += pred_pitch; temp = P2 | (P0 << 8); temp |= (Q0 << 16); temp |= (R0 << 24); *((uint32*)pred) = temp; pred += pred_pitch; temp = Q2 | (D << 8); temp |= (P1 << 16); temp |= (Q1 << 24); *((uint32*)pred) = temp; return ; } void Intra_4x4_Diagonal_Horizontal_Down(AVCCommonObj *video, int pitch, int block_offset) { uint8 *comp_refx = video->pintra_pred_top; uint8 *comp_refy = video->pintra_pred_left; uint32 temp; int P0, Q0, R0, S0, P1, Q1, R1, P2, Q2, D; int x0, x1, x2; uint8 *pred = video->pred_block + block_offset; int pred_pitch = video->pred_pitch; x0 = *comp_refx++; x1 = *comp_refx++; x2 = *comp_refx++; Q2 = (x0 + 2 * x1 + x2 + 2) >> 2; x2 = video->intra_pred_topleft; /* reuse x2 instead of y0 */ P2 = (x2 + 2 * x0 + x1 + 2) >> 2; x1 = *comp_refy; comp_refy += pitch; /* reuse x1 instead of y1 */ D = (x1 + 2 * x2 + x0 + 2) >> 2; P0 = x2 + x1 + 1; x0 = *comp_refy; comp_refy += pitch; /* reuse x0 instead of y2 */ Q0 = x1 + x0 + 1; x1 = *comp_refy; comp_refy += pitch; /* reuse x1 instead of y3 */ R0 = x0 + x1 + 1; x2 = *comp_refy; /* reuse x2 instead of y4 */ S0 = x1 + x2 + 1; P1 = (P0 + Q0) >> 2; Q1 = (Q0 + R0) >> 2; R1 = (R0 + S0) >> 2; P0 >>= 1; Q0 >>= 1; R0 >>= 1; S0 >>= 1; /* we can pack these */ temp = P0 | (D << 8); //[P0 D P2 Q2] //[Q0 P1 P0 D ] temp |= (P2 << 16); //[R0 Q1 Q0 P1] temp |= (Q2 << 24); //[S0 R1 R0 Q1] *((uint32*)pred) = temp; pred += pred_pitch; temp = Q0 | (P1 << 8); temp |= (P0 << 16); temp |= (D << 24); *((uint32*)pred) = temp; pred += pred_pitch; temp = R0 | (Q1 << 8); temp |= (Q0 << 16); temp |= (P1 << 24); *((uint32*)pred) = temp; pred += pred_pitch; temp = S0 | (R1 << 8); temp |= (R0 << 16); temp |= (Q1 << 24); *((uint32*)pred) = temp; return ; } void Intra_4x4_Vertical_Left(AVCCommonObj *video, int block_offset, AVCNeighborAvailability *availability) { uint8 *comp_refx = video->pintra_pred_top; uint32 temp1, temp2; int x0, x1, x2, x3, x4, x5, x6; uint8 *pred = video->pred_block + block_offset; int pred_pitch = video->pred_pitch; x0 = *comp_refx++; x1 = *comp_refx++; x2 = *comp_refx++; x3 = *comp_refx++; if (availability->top_right) { x4 = *comp_refx++; x5 = *comp_refx++; x6 = *comp_refx++; } else { x4 = x3; x5 = x3; x6 = x3; } x0 += x1 + 1; x1 += x2 + 1; x2 += x3 + 1; x3 += x4 + 1; x4 += x5 + 1; x5 += x6 + 1; temp1 = (x0 >> 1); temp1 |= ((x1 >> 1) << 8); temp1 |= ((x2 >> 1) << 16); temp1 |= ((x3 >> 1) << 24); *((uint32*)pred) = temp1; pred += pred_pitch; temp2 = ((x0 + x1) >> 2); temp2 |= (((x1 + x2) >> 2) << 8); temp2 |= (((x2 + x3) >> 2) << 16); temp2 |= (((x3 + x4) >> 2) << 24); *((uint32*)pred) = temp2; pred += pred_pitch; temp1 = (temp1 >> 8) | ((x4 >> 1) << 24); /* rotate out old value */ *((uint32*)pred) = temp1; pred += pred_pitch; temp2 = (temp2 >> 8) | (((x4 + x5) >> 2) << 24); /* rotate out old value */ *((uint32*)pred) = temp2; pred += pred_pitch; return ; } void Intra_4x4_Horizontal_Up(AVCCommonObj *video, int pitch, int block_offset) { uint8 *comp_refy = video->pintra_pred_left; uint32 temp; int Q0, R0, Q1, D0, D1, P0, P1; int y0, y1, y2, y3; uint8 *pred = video->pred_block + block_offset; int pred_pitch = video->pred_pitch; y0 = *comp_refy; comp_refy += pitch; y1 = *comp_refy; comp_refy += pitch; y2 = *comp_refy; comp_refy += pitch; y3 = *comp_refy; Q0 = (y1 + y2 + 1) >> 1; Q1 = (y1 + (y2 << 1) + y3 + 2) >> 2; P0 = ((y0 + y1 + 1) >> 1); P1 = ((y0 + (y1 << 1) + y2 + 2) >> 2); temp = P0 | (P1 << 8); // [P0 P1 Q0 Q1] temp |= (Q0 << 16); // [Q0 Q1 R0 DO] temp |= (Q1 << 24); // [R0 D0 D1 D1] *((uint32*)pred) = temp; // [D1 D1 D1 D1] pred += pred_pitch; D0 = (y2 + 3 * y3 + 2) >> 2; R0 = (y2 + y3 + 1) >> 1; temp = Q0 | (Q1 << 8); temp |= (R0 << 16); temp |= (D0 << 24); *((uint32*)pred) = temp; pred += pred_pitch; D1 = y3; temp = R0 | (D0 << 8); temp |= (D1 << 16); temp |= (D1 << 24); *((uint32*)pred) = temp; pred += pred_pitch; temp = D1 | (D1 << 8); temp |= (temp << 16); *((uint32*)pred) = temp; return ; } /* =============================== END 4x4 MODES======================================*/ void Intra_16x16_Vertical(AVCCommonObj *video) { int i; uint32 temp1, temp2, temp3, temp4; uint8 *comp_ref = video->pintra_pred_top; uint8 *pred = video->pred_block; int pred_pitch = video->pred_pitch; temp1 = *((uint32*)comp_ref); comp_ref += 4; temp2 = *((uint32*)comp_ref); comp_ref += 4; temp3 = *((uint32*)comp_ref); comp_ref += 4; temp4 = *((uint32*)comp_ref); comp_ref += 4; i = 16; while (i > 0) { *((uint32*)pred) = temp1; *((uint32*)(pred + 4)) = temp2; *((uint32*)(pred + 8)) = temp3; *((uint32*)(pred + 12)) = temp4; pred += pred_pitch; i--; } return ; } void Intra_16x16_Horizontal(AVCCommonObj *video, int pitch) { int i; uint32 temp; uint8 *comp_ref = video->pintra_pred_left; uint8 *pred = video->pred_block; int pred_pitch = video->pred_pitch; for (i = 0; i < 16; i++) { temp = *comp_ref; temp |= (temp << 8); temp |= (temp << 16); *((uint32*)pred) = temp; *((uint32*)(pred + 4)) = temp; *((uint32*)(pred + 8)) = temp; *((uint32*)(pred + 12)) = temp; pred += pred_pitch; comp_ref += pitch; } } void Intra_16x16_DC(AVCCommonObj *video, int pitch) { int i; uint32 temp, temp2; uint8 *comp_ref_x = video->pintra_pred_top; uint8 *comp_ref_y = video->pintra_pred_left; int sum = 0; uint8 *pred = video->pred_block; int pred_pitch = video->pred_pitch; if (video->intraAvailB) { temp = *((uint32*)comp_ref_x); comp_ref_x += 4; temp2 = (temp >> 8) & 0xFF00FF; temp &= 0xFF00FF; temp += temp2; sum = temp + (temp >> 16); temp = *((uint32*)comp_ref_x); comp_ref_x += 4; temp2 = (temp >> 8) & 0xFF00FF; temp &= 0xFF00FF; temp += temp2; sum += temp + (temp >> 16); temp = *((uint32*)comp_ref_x); comp_ref_x += 4; temp2 = (temp >> 8) & 0xFF00FF; temp &= 0xFF00FF; temp += temp2; sum += temp + (temp >> 16); temp = *((uint32*)comp_ref_x); comp_ref_x += 4; temp2 = (temp >> 8) & 0xFF00FF; temp &= 0xFF00FF; temp += temp2; sum += temp + (temp >> 16); sum &= 0xFFFF; if (video->intraAvailA) { for (i = 0; i < 16; i++) { sum += (*comp_ref_y); comp_ref_y += pitch; } sum = (sum + 16) >> 5; } else { sum = (sum + 8) >> 4; } } else if (video->intraAvailA) { for (i = 0; i < 16; i++) { sum += *comp_ref_y; comp_ref_y += pitch; } sum = (sum + 8) >> 4; } else { sum = 128; } temp = sum | (sum << 8); temp |= (temp << 16); for (i = 0; i < 16; i++) { *((uint32*)pred) = temp; *((uint32*)(pred + 4)) = temp; *((uint32*)(pred + 8)) = temp; *((uint32*)(pred + 12)) = temp; pred += pred_pitch; } } void Intra_16x16_Plane(AVCCommonObj *video, int pitch) { int i, a_16, b, c, factor_c; uint8 *comp_ref_x = video->pintra_pred_top; uint8 *comp_ref_y = video->pintra_pred_left; uint8 *comp_ref_x0, *comp_ref_x1, *comp_ref_y0, *comp_ref_y1; int H = 0, V = 0 , tmp; uint8 *pred = video->pred_block; uint32 temp; uint8 byte1, byte2, byte3; int value; int pred_pitch = video->pred_pitch; comp_ref_x0 = comp_ref_x + 8; comp_ref_x1 = comp_ref_x + 6; comp_ref_y0 = comp_ref_y + (pitch << 3); comp_ref_y1 = comp_ref_y + 6 * pitch; for (i = 1; i < 8; i++) { H += i * (*comp_ref_x0++ - *comp_ref_x1--); V += i * (*comp_ref_y0 - *comp_ref_y1); comp_ref_y0 += pitch; comp_ref_y1 -= pitch; } H += i * (*comp_ref_x0++ - video->intra_pred_topleft); V += i * (*comp_ref_y0 - *comp_ref_y1); a_16 = ((*(comp_ref_x + 15) + *(comp_ref_y + 15 * pitch)) << 4) + 16;; b = (5 * H + 32) >> 6; c = (5 * V + 32) >> 6; tmp = 0; for (i = 0; i < 16; i++) { factor_c = a_16 + c * (tmp++ - 7); factor_c -= 7 * b; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) byte1 = value; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) byte2 = value; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) byte3 = value; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) temp = byte1 | (byte2 << 8); temp |= (byte3 << 16); temp |= (value << 24); *((uint32*)pred) = temp; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) byte1 = value; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) byte2 = value; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) byte3 = value; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) temp = byte1 | (byte2 << 8); temp |= (byte3 << 16); temp |= (value << 24); *((uint32*)(pred + 4)) = temp; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) byte1 = value; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) byte2 = value; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) byte3 = value; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) temp = byte1 | (byte2 << 8); temp |= (byte3 << 16); temp |= (value << 24); *((uint32*)(pred + 8)) = temp; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) byte1 = value; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) byte2 = value; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) byte3 = value; value = factor_c >> 5; CLIP_RESULT(value) temp = byte1 | (byte2 << 8); temp |= (byte3 << 16); temp |= (value << 24); *((uint32*)(pred + 12)) = temp; pred += pred_pitch; } } /************** Chroma intra prediction *********************/ void Intra_Chroma_DC(AVCCommonObj *video, int pitch, uint8 *predCb, uint8 *predCr) { int i; uint32 temp, temp2, pred_a, pred_b; uint8 *comp_ref_x, *comp_ref_y; uint8 *comp_ref_cb_x = video->pintra_pred_top_cb; uint8 *comp_ref_cb_y = video->pintra_pred_left_cb; uint8 *comp_ref_cr_x = video->pintra_pred_top_cr; uint8 *comp_ref_cr_y = video->pintra_pred_left_cr; int component, j; int sum_x0, sum_x1, sum_y0, sum_y1; int pred_0[2], pred_1[2], pred_2[2], pred_3[2]; int pred_pitch = video->pred_pitch; uint8 *pred; if (video->intraAvailB & video->intraAvailA) { comp_ref_x = comp_ref_cb_x; comp_ref_y = comp_ref_cb_y; for (i = 0; i < 2; i++) { temp = *((uint32*)comp_ref_x); comp_ref_x += 4; temp2 = (temp >> 8) & 0xFF00FF; temp &= 0xFF00FF; temp += temp2; temp += (temp >> 16); sum_x0 = temp & 0xFFFF; temp = *((uint32*)comp_ref_x); temp2 = (temp >> 8) & 0xFF00FF; temp &= 0xFF00FF; temp += temp2; temp += (temp >> 16); sum_x1 = temp & 0xFFFF; pred_1[i] = (sum_x1 + 2) >> 2; sum_y0 = *comp_ref_y; sum_y0 += *(comp_ref_y += pitch); sum_y0 += *(comp_ref_y += pitch); sum_y0 += *(comp_ref_y += pitch); sum_y1 = *(comp_ref_y += pitch); sum_y1 += *(comp_ref_y += pitch); sum_y1 += *(comp_ref_y += pitch); sum_y1 += *(comp_ref_y += pitch); pred_2[i] = (sum_y1 + 2) >> 2; pred_0[i] = (sum_y0 + sum_x0 + 4) >> 3; pred_3[i] = (sum_y1 + sum_x1 + 4) >> 3; comp_ref_x = comp_ref_cr_x; comp_ref_y = comp_ref_cr_y; } } else if (video->intraAvailA) { comp_ref_y = comp_ref_cb_y; for (i = 0; i < 2; i++) { sum_y0 = *comp_ref_y; sum_y0 += *(comp_ref_y += pitch); sum_y0 += *(comp_ref_y += pitch); sum_y0 += *(comp_ref_y += pitch); sum_y1 = *(comp_ref_y += pitch); sum_y1 += *(comp_ref_y += pitch); sum_y1 += *(comp_ref_y += pitch); sum_y1 += *(comp_ref_y += pitch); pred_0[i] = pred_1[i] = (sum_y0 + 2) >> 2; pred_2[i] = pred_3[i] = (sum_y1 + 2) >> 2; comp_ref_y = comp_ref_cr_y; } } else if (video->intraAvailB) { comp_ref_x = comp_ref_cb_x; for (i = 0; i < 2; i++) { temp = *((uint32*)comp_ref_x); comp_ref_x += 4; temp2 = (temp >> 8) & 0xFF00FF; temp &= 0xFF00FF; temp += temp2; temp += (temp >> 16); sum_x0 = temp & 0xFFFF; temp = *((uint32*)comp_ref_x); temp2 = (temp >> 8) & 0xFF00FF; temp &= 0xFF00FF; temp += temp2; temp += (temp >> 16); sum_x1 = temp & 0xFFFF; pred_0[i] = pred_2[i] = (sum_x0 + 2) >> 2; pred_1[i] = pred_3[i] = (sum_x1 + 2) >> 2; comp_ref_x = comp_ref_cr_x; } } else { pred_0[0] = pred_0[1] = pred_1[0] = pred_1[1] = pred_2[0] = pred_2[1] = pred_3[0] = pred_3[1] = 128; } pred = predCb; for (component = 0; component < 2; component++) { pred_a = pred_0[component]; pred_b = pred_1[component]; pred_a |= (pred_a << 8); pred_a |= (pred_a << 16); pred_b |= (pred_b << 8); pred_b |= (pred_b << 16); for (i = 4; i < 6; i++) { for (j = 0; j < 4; j++) /* 4 lines */ { *((uint32*)pred) = pred_a; *((uint32*)(pred + 4)) = pred_b; pred += pred_pitch; /* move to the next line */ } pred_a = pred_2[component]; pred_b = pred_3[component]; pred_a |= (pred_a << 8); pred_a |= (pred_a << 16); pred_b |= (pred_b << 8); pred_b |= (pred_b << 16); } pred = predCr; /* point to cr */ } } void Intra_Chroma_Horizontal(AVCCommonObj *video, int pitch, uint8 *predCb, uint8 *predCr) { int i; uint32 temp; uint8 *comp_ref_cb_y = video->pintra_pred_left_cb; uint8 *comp_ref_cr_y = video->pintra_pred_left_cr; uint8 *comp; int component, j; int pred_pitch = video->pred_pitch; uint8 *pred; comp = comp_ref_cb_y; pred = predCb; for (component = 0; component < 2; component++) { for (i = 4; i < 6; i++) { for (j = 0; j < 4; j++) { temp = *comp; comp += pitch; temp |= (temp << 8); temp |= (temp << 16); *((uint32*)pred) = temp; *((uint32*)(pred + 4)) = temp; pred += pred_pitch; } } comp = comp_ref_cr_y; pred = predCr; /* point to cr */ } } void Intra_Chroma_Vertical(AVCCommonObj *video, uint8 *predCb, uint8 *predCr) { uint32 temp1, temp2; uint8 *comp_ref_cb_x = video->pintra_pred_top_cb; uint8 *comp_ref_cr_x = video->pintra_pred_top_cr; uint8 *comp_ref; int component, j; int pred_pitch = video->pred_pitch; uint8 *pred; comp_ref = comp_ref_cb_x; pred = predCb; for (component = 0; component < 2; component++) { temp1 = *((uint32*)comp_ref); temp2 = *((uint32*)(comp_ref + 4)); for (j = 0; j < 8; j++) { *((uint32*)pred) = temp1; *((uint32*)(pred + 4)) = temp2; pred += pred_pitch; } comp_ref = comp_ref_cr_x; pred = predCr; /* point to cr */ } } void Intra_Chroma_Plane(AVCCommonObj *video, int pitch, uint8 *predCb, uint8 *predCr) { int i; int a_16_C[2], b_C[2], c_C[2], a_16, b, c, factor_c; uint8 *comp_ref_x, *comp_ref_y, *comp_ref_x0, *comp_ref_x1, *comp_ref_y0, *comp_ref_y1; int component, j; int H, V, tmp; uint32 temp; uint8 byte1, byte2, byte3; int value; uint8 topleft; int pred_pitch = video->pred_pitch; uint8 *pred; comp_ref_x = video->pintra_pred_top_cb; comp_ref_y = video->pintra_pred_left_cb; topleft = video->intra_pred_topleft_cb; for (component = 0; component < 2; component++) { H = V = 0; comp_ref_x0 = comp_ref_x + 4; comp_ref_x1 = comp_ref_x + 2; comp_ref_y0 = comp_ref_y + (pitch << 2); comp_ref_y1 = comp_ref_y + (pitch << 1); for (i = 1; i < 4; i++) { H += i * (*comp_ref_x0++ - *comp_ref_x1--); V += i * (*comp_ref_y0 - *comp_ref_y1); comp_ref_y0 += pitch; comp_ref_y1 -= pitch; } H += i * (*comp_ref_x0++ - topleft); V += i * (*comp_ref_y0 - *comp_ref_y1); a_16_C[component] = ((*(comp_ref_x + 7) + *(comp_ref_y + 7 * pitch)) << 4) + 16; b_C[component] = (17 * H + 16) >> 5; c_C[component] = (17 * V + 16) >> 5; comp_ref_x = video->pintra_pred_top_cr; comp_ref_y = video->pintra_pred_left_cr; topleft = video->intra_pred_topleft_cr; } pred = predCb; for (component = 0; component < 2; component++) { a_16 = a_16_C[component]; b = b_C[component]; c = c_C[component]; tmp = 0; for (i = 4; i < 6; i++) { for (j = 0; j < 4; j++) { factor_c = a_16 + c * (tmp++ - 3); factor_c -= 3 * b; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) byte1 = value; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) byte2 = value; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) byte3 = value; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) temp = byte1 | (byte2 << 8); temp |= (byte3 << 16); temp |= (value << 24); *((uint32*)pred) = temp; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) byte1 = value; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) byte2 = value; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) byte3 = value; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) temp = byte1 | (byte2 << 8); temp |= (byte3 << 16); temp |= (value << 24); *((uint32*)(pred + 4)) = temp; pred += pred_pitch; } } pred = predCr; /* point to cr */ } } ================================================ FILE: RtspCamera/jni/avc_h264/dec/src/pvavcdecoder.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "pvavcdecoder.h" #include "oscl_mem.h" // xxx pa #define LOG_TAG "pvavcdecoder" #include "android/log.h" /* global static functions */ void CbAvcDecDebugLog(uint32 *userData, AVCLogType type, char *string1, int val1, int val2) { OSCL_UNUSED_ARG(userData); OSCL_UNUSED_ARG(type); OSCL_UNUSED_ARG(string1); OSCL_UNUSED_ARG(val1); OSCL_UNUSED_ARG(val2); return ; } int CbAvcDecMalloc(void *userData, int32 size, int attribute) { OSCL_UNUSED_ARG(userData); OSCL_UNUSED_ARG(attribute); uint8 *mem; mem = (uint8*) oscl_malloc(size); return (int)mem; } void CbAvcDecFree(void *userData, int mem) { OSCL_UNUSED_ARG(userData); oscl_free((void*)mem); return ; } int CbAvcDecDPBAlloc(void *userData, uint frame_size_in_mbs, uint num_buffers) { PVAVCDecoder *pAvcDec = (PVAVCDecoder*) userData; return pAvcDec->AVC_DPBAlloc(frame_size_in_mbs, num_buffers); } void CbAvcDecFrameUnbind(void *userData, int indx) { PVAVCDecoder *pAvcDec = (PVAVCDecoder*) userData; pAvcDec->AVC_FrameUnbind(indx); return ; } int CbAvcDecFrameBind(void *userData, int indx, uint8 **yuv) { PVAVCDecoder *pAvcDec = (PVAVCDecoder*) userData; return pAvcDec->AVC_FrameBind(indx, yuv); } /* ///////////////////////////////////////////////////////////////////////// */ PVAVCDecoder::PVAVCDecoder() { //iDecoderControl } /* ///////////////////////////////////////////////////////////////////////// */ PVAVCDecoder::~PVAVCDecoder() { CleanUpAVCDecoder(); } /* ///////////////////////////////////////////////////////////////////////// */ PVAVCDecoder* PVAVCDecoder::New(void) { PVAVCDecoder* self = new PVAVCDecoder; if (self && self->Construct()) return self; if (self) delete self; return NULL; } /* ///////////////////////////////////////////////////////////////////////// */ bool PVAVCDecoder::Construct() { oscl_memset((void*)&iAvcHandle, 0, sizeof(AVCHandle)); // xxx pa callback setter iAvcHandle.CBAVC_DPBAlloc = &CbAvcDecDPBAlloc; iAvcHandle.CBAVC_FrameBind = &CbAvcDecFrameBind; iAvcHandle.CBAVC_FrameUnbind = &CbAvcDecFrameUnbind; iAvcHandle.CBAVC_Free = &CbAvcDecFree; iAvcHandle.CBAVC_Malloc = &CbAvcDecMalloc; iAvcHandle.CBAVC_DebugLog = &CbAvcDecDebugLog; iAvcHandle.userData = this; iFramePtr = NULL; iDPB = NULL; iFrameUsed = NULL; iNumFrames = NULL; return true; } ///////////////////////////////////////////////////////////////////////////// void PVAVCDecoder::CleanUpAVCDecoder(void) { PVAVCCleanUpDecoder((AVCHandle *)&iAvcHandle); } void PVAVCDecoder::ResetAVCDecoder(void) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "ResetAVCDecoder START"); PVAVCDecReset((AVCHandle *)&iAvcHandle); __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "ResetAVCDecoder END"); } ///////////////////////////////////////////////////////////////////////////// int32 PVAVCDecoder::DecodeSPS(uint8 *bitstream, int32 buffer_size) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "DecodeSPS"); return PVAVCDecSeqParamSet((AVCHandle *)&iAvcHandle, bitstream, buffer_size); } int32 PVAVCDecoder::DecodePPS(uint8 *bitstream, int32 buffer_size) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "DecodePPS"); return PVAVCDecPicParamSet((AVCHandle *)&iAvcHandle, bitstream, buffer_size); } int32 PVAVCDecoder::DecodeAVCSlice(uint8 *bitstream, int32 *buffer_size) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "DecodeAVCSlice"); return (PVAVCDecodeSlice((AVCHandle *)&iAvcHandle, bitstream, *buffer_size)); } bool PVAVCDecoder::GetDecOutput(int *indx, int *release, AVCFrameIO* output) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "GetDecOutput"); return (PVAVCDecGetOutput((AVCHandle *)&iAvcHandle, indx, release, output) != AVCDEC_SUCCESS) ? false : true; } void PVAVCDecoder::GetVideoDimensions(int32 *width, int32 *height, int32 *top, int32 *left, int32 *bottom, int32 *right) { AVCDecSPSInfo seqInfo; PVAVCDecGetSeqInfo((AVCHandle *)&iAvcHandle, &seqInfo); *width = seqInfo.FrameWidth; *height = seqInfo.FrameHeight; /* assuming top left corner aligned */ *top = seqInfo.frame_crop_top; *left = seqInfo.frame_crop_left; *bottom = seqInfo.frame_crop_bottom; *right = seqInfo.frame_crop_right; } /* ///////////////////////////////////////////////////////////////////////// */ int PVAVCDecoder::AVC_DPBAlloc(uint frame_size_in_mbs, uint num_buffers) { int ii; uint frame_size = (frame_size_in_mbs << 8) + (frame_size_in_mbs << 7); if (iDPB) oscl_free(iDPB); // free previous one first iDPB = (uint8*) oscl_malloc(sizeof(uint8) * frame_size * num_buffers); if (iDPB == NULL) { return 0; } iNumFrames = num_buffers; if (iFrameUsed) oscl_free(iFrameUsed); // free previous one iFrameUsed = (bool*) oscl_malloc(sizeof(bool) * num_buffers); if (iFrameUsed == NULL) { return 0; } if (iFramePtr) oscl_free(iFramePtr); // free previous one iFramePtr = (uint8**) oscl_malloc(sizeof(uint8*) * num_buffers); if (iFramePtr == NULL) { return 0; } iFramePtr[0] = iDPB; iFrameUsed[0] = false; for (ii = 1; ii < (int)num_buffers; ii++) { iFrameUsed[ii] = false; iFramePtr[ii] = iFramePtr[ii-1] + frame_size; } return 1; } /* ///////////////////////////////////////////////////////////////////////// */ void PVAVCDecoder::AVC_FrameUnbind(int indx) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "AVC_FrameUnbind(%d)", indx); if (indx < iNumFrames) { iFrameUsed[indx] = false; __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "AVC_FrameUnbind iFrameUsed[indx(%d)] = false;", indx); } return ; } /* ///////////////////////////////////////////////////////////////////////// */ int PVAVCDecoder::AVC_FrameBind(int indx, uint8** yuv) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "AVC_FrameBind(%d)", indx); if ((iFrameUsed[indx] == true) || (indx >= iNumFrames)) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "AVC_FrameBind return 0: (iFrameUsed[indx] == true) --> %d // (indx (%d) >= iNumFrames (%d)) --> %d", (iFrameUsed[indx] == true), indx, iNumFrames, (indx >= iNumFrames)); return 0; // already in used } iFrameUsed[indx] = true; __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "AVC_FrameBind iFrameUsed[indx(%d)] = true;", indx); *yuv = iFramePtr[indx]; __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "AVC_FrameBind final return 1"); return 1; } ================================================ FILE: RtspCamera/jni/avc_h264/dec/src/pvavcdecoder_factory.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /** * @file pvavcdecoder_factory.cpp * @brief Singleton factory for PVAVCDecoder */ #include "oscl_base.h" #include "pvavcdecoder.h" #include "pvavcdecoder_factory.h" #include "oscl_error_codes.h" #include "oscl_exception.h" // Use default DLL entry point #include "oscl_dll.h" OSCL_DLL_ENTRY_POINT_DEFAULT() //////////////////////////////////////////////////////////////////////////// OSCL_EXPORT_REF PVAVCDecoderInterface* PVAVCDecoderFactory::CreatePVAVCDecoder() { PVAVCDecoderInterface* videodec = NULL; videodec = PVAVCDecoder::New(); if (videodec == NULL) { OSCL_LEAVE(OsclErrNoMemory); } return videodec; } //////////////////////////////////////////////////////////////////////////// OSCL_EXPORT_REF bool PVAVCDecoderFactory::DeletePVAVCDecoder(PVAVCDecoderInterface* aVideoDec) { if (aVideoDec) { OSCL_DELETE(aVideoDec); return true; } return false; } ================================================ FILE: RtspCamera/jni/avc_h264/dec/src/residual.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "avcdec_lib.h" #include "avcdec_bitstream.h" #include "oscl_mem.h" AVCDec_Status DecodeIntraPCM(AVCCommonObj *video, AVCDecBitstream *stream) { AVCDec_Status status; int j; int mb_x, mb_y, offset1; uint8 *pDst; uint32 byte0, byte1; int pitch; mb_x = video->mb_x; mb_y = video->mb_y; #ifdef USE_PRED_BLOCK pDst = video->pred_block + 84; pitch = 20; #else offset1 = (mb_x << 4) + (mb_y << 4) * video->PicWidthInSamplesL; pDst = video->currPic->Sl + offset1; pitch = video->currPic->pitch; #endif /* at this point bitstream is byte-aligned */ j = 16; while (j > 0) { status = BitstreamReadBits(stream, 8, (uint*) & byte0); status = BitstreamReadBits(stream, 8, (uint*) & byte1); byte0 |= (byte1 << 8); status = BitstreamReadBits(stream, 8, (uint*) & byte1); byte0 |= (byte1 << 16); status = BitstreamReadBits(stream, 8, (uint*) & byte1); byte0 |= (byte1 << 24); *((uint32*)pDst) = byte0; status = BitstreamReadBits(stream, 8, (uint*) & byte0); status = BitstreamReadBits(stream, 8, (uint*) & byte1); byte0 |= (byte1 << 8); status = BitstreamReadBits(stream, 8, (uint*) & byte1); byte0 |= (byte1 << 16); status = BitstreamReadBits(stream, 8, (uint*) & byte1); byte0 |= (byte1 << 24); *((uint32*)(pDst + 4)) = byte0; status = BitstreamReadBits(stream, 8, (uint*) & byte0); status = BitstreamReadBits(stream, 8, (uint*) & byte1); byte0 |= (byte1 << 8); status = BitstreamReadBits(stream, 8, (uint*) & byte1); byte0 |= (byte1 << 16); status = BitstreamReadBits(stream, 8, (uint*) & byte1); byte0 |= (byte1 << 24); *((uint32*)(pDst + 8)) = byte0; status = BitstreamReadBits(stream, 8, (uint*) & byte0); status = BitstreamReadBits(stream, 8, (uint*) & byte1); byte0 |= (byte1 << 8); status = BitstreamReadBits(stream, 8, (uint*) & byte1); byte0 |= (byte1 << 16); status = BitstreamReadBits(stream, 8, (uint*) & byte1); byte0 |= (byte1 << 24); *((uint32*)(pDst + 12)) = byte0; j--; pDst += pitch; if (status != AVCDEC_SUCCESS) /* check only once per line */ return status; } #ifdef USE_PRED_BLOCK pDst = video->pred_block + 452; pitch = 12; #else offset1 = (offset1 >> 2) + (mb_x << 2); pDst = video->currPic->Scb + offset1; pitch >>= 1; #endif j = 8; while (j > 0) { status = BitstreamReadBits(stream, 8, (uint*) & byte0); status = BitstreamReadBits(stream, 8, (uint*) & byte1); byte0 |= (byte1 << 8); status = BitstreamReadBits(stream, 8, (uint*) & byte1); byte0 |= (byte1 << 16); status = BitstreamReadBits(stream, 8, (uint*) & byte1); byte0 |= (byte1 << 24); *((uint32*)pDst) = byte0; status = BitstreamReadBits(stream, 8, (uint*) & byte0); status = BitstreamReadBits(stream, 8, (uint*) & byte1); byte0 |= (byte1 << 8); status = BitstreamReadBits(stream, 8, (uint*) & byte1); byte0 |= (byte1 << 16); status = BitstreamReadBits(stream, 8, (uint*) & byte1); byte0 |= (byte1 << 24); *((uint32*)(pDst + 4)) = byte0; j--; pDst += pitch; if (status != AVCDEC_SUCCESS) /* check only once per line */ return status; } #ifdef USE_PRED_BLOCK pDst = video->pred_block + 596; pitch = 12; #else pDst = video->currPic->Scr + offset1; #endif j = 8; while (j > 0) { status = BitstreamReadBits(stream, 8, (uint*) & byte0); status = BitstreamReadBits(stream, 8, (uint*) & byte1); byte0 |= (byte1 << 8); status = BitstreamReadBits(stream, 8, (uint*) & byte1); byte0 |= (byte1 << 16); status = BitstreamReadBits(stream, 8, (uint*) & byte1); byte0 |= (byte1 << 24); *((uint32*)pDst) = byte0; status = BitstreamReadBits(stream, 8, (uint*) & byte0); status = BitstreamReadBits(stream, 8, (uint*) & byte1); byte0 |= (byte1 << 8); status = BitstreamReadBits(stream, 8, (uint*) & byte1); byte0 |= (byte1 << 16); status = BitstreamReadBits(stream, 8, (uint*) & byte1); byte0 |= (byte1 << 24); *((uint32*)(pDst + 4)) = byte0; j--; pDst += pitch; if (status != AVCDEC_SUCCESS) /* check only once per line */ return status; } #ifdef MB_BASED_DEBLOCK SaveNeighborForIntraPred(video, offset1); #endif return AVCDEC_SUCCESS; } /* see subclause 7.3.5.3 and readCBPandCoeffsFromNAL() in JM*/ AVCDec_Status residual(AVCDecObject *decvid, AVCMacroblock *currMB) { AVCCommonObj *video = decvid->common; int16 *block; int level[16], run[16], numcoeff; /* output from residual_block_cavlc */ int block_x, i, j, k, idx, iCbCr; int mbPartIdx, subMbPartIdx, mbPartIdx_X, mbPartIdx_Y; int nC, maxNumCoeff = 16; int coeffNum, start_scan = 0; uint8 *zz_scan; int Rq, Qq; uint32 cbp4x4 = 0; /* in 8.5.4, it only says if it's field macroblock. */ zz_scan = (uint8*) ZZ_SCAN_BLOCK; /* see 8.5.8 for the initialization of these values */ Qq = video->QPy_div_6; Rq = video->QPy_mod_6; oscl_memset(video->block, 0, sizeof(int16)*NUM_PIXELS_IN_MB); if (currMB->mbMode == AVC_I16) { nC = predict_nnz(video, 0, 0); decvid->residual_block(decvid, nC, 16, level, run, &numcoeff); /* then performs zigzag and transform */ block = video->block; coeffNum = -1; for (i = numcoeff - 1; i >= 0; i--) { coeffNum += run[i] + 1; if (coeffNum > 15) { return AVCDEC_FAIL; } idx = zz_scan[coeffNum] << 2; /* idx = ((idx>>2)<<6) + ((idx&3)<<2); */ block[idx] = level[i]; } /* inverse transform on Intra16x16DCLevel */ if (numcoeff) { Intra16DCTrans(block, Qq, Rq); cbp4x4 = 0xFFFF; } maxNumCoeff = 15; start_scan = 1; } oscl_memset(currMB->nz_coeff, 0, sizeof(uint8)*24); for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++) { mbPartIdx_X = (mbPartIdx & 1) << 1; mbPartIdx_Y = mbPartIdx & -2; if (currMB->CBP&(1 << mbPartIdx)) { for (subMbPartIdx = 0; subMbPartIdx < 4; subMbPartIdx++) { i = mbPartIdx_X + (subMbPartIdx & 1); // check this j = mbPartIdx_Y + (subMbPartIdx >> 1); block = video->block + (j << 6) + (i << 2); // nC = predict_nnz(video, i, j); decvid->residual_block(decvid, nC, maxNumCoeff, level, run, &numcoeff); /* convert to raster scan and quantize*/ /* Note: for P mb in SP slice and SI mb in SI slice, the quantization cannot be done here. block[idx] should be assigned with level[k]. itrans will be done after the prediction. There will be transformation on the predicted value, then addition with block[idx], then this quantization and transform.*/ coeffNum = -1 + start_scan; for (k = numcoeff - 1; k >= 0; k--) { coeffNum += run[k] + 1; if (coeffNum > 15) { return AVCDEC_FAIL; } idx = zz_scan[coeffNum]; block[idx] = (level[k] * dequant_coefres[Rq][coeffNum]) << Qq ; } currMB->nz_coeff[(j<<2)+i] = numcoeff; if (numcoeff) { cbp4x4 |= (1 << ((j << 2) + i)); } } } } Qq = video->QPc_div_6; Rq = video->QPc_mod_6; if (currMB->CBP & (3 << 4)) /* chroma DC residual present */ { for (iCbCr = 0; iCbCr < 2; iCbCr++) { decvid->residual_block(decvid, -1, 4, level, run, &numcoeff); block = video->block + 256 + (iCbCr << 3); coeffNum = -1; for (i = numcoeff - 1; i >= 0; i--) { coeffNum += run[i] + 1; if (coeffNum > 3) { return AVCDEC_FAIL; } block[(coeffNum>>1)*64 + (coeffNum&1)*4] = level[i]; } /* inverse transform on chroma DC */ /* for P in SP and SI in SI, this function can't be done here, must do prediction transform/quant first. */ if (numcoeff) { ChromaDCTrans(block, Qq, Rq); cbp4x4 |= (iCbCr ? 0xcc0000 : 0x330000); } } } if (currMB->CBP & (2 << 4)) { for (block_x = 0; block_x < 4; block_x += 2) /* for iCbCr */ { for (j = 4; j < 6; j++) /* for each block inside Cb or Cr */ { for (i = block_x; i < block_x + 2; i++) { block = video->block + (j << 6) + (i << 2); nC = predict_nnz_chroma(video, i, j); decvid->residual_block(decvid, nC, 15, level, run, &numcoeff); /* convert to raster scan and quantize */ /* for P MB in SP slice and SI MB in SI slice, the dequant and transform cannot be done here. It needs the prediction values. */ coeffNum = 0; for (k = numcoeff - 1; k >= 0; k--) { coeffNum += run[k] + 1; if (coeffNum > 15) { return AVCDEC_FAIL; } idx = zz_scan[coeffNum]; block[idx] = (level[k] * dequant_coefres[Rq][coeffNum]) << Qq; } /* then transform */ // itrans(block); /* transform */ currMB->nz_coeff[(j<<2)+i] = numcoeff; // if (numcoeff) { cbp4x4 |= (1 << ((j << 2) + i)); } } } } } video->cbp4x4 = cbp4x4; return AVCDEC_SUCCESS; } /* see subclause 7.3.5.3.1 and 9.2 and readCoeff4x4_CAVLC() in JM */ AVCDec_Status residual_block_cavlc(AVCDecObject *decvid, int nC, int maxNumCoeff, int *level, int *run, int *numcoeff) { int i, j; int TrailingOnes, TotalCoeff; AVCDecBitstream *stream = decvid->bitstream; int suffixLength; uint trailing_ones_sign_flag, level_prefix, level_suffix; int levelCode, levelSuffixSize, zerosLeft; int run_before; if (nC >= 0) { ce_TotalCoeffTrailingOnes(stream, &TrailingOnes, &TotalCoeff, nC); } else { ce_TotalCoeffTrailingOnesChromaDC(stream, &TrailingOnes, &TotalCoeff); } *numcoeff = TotalCoeff; /* This part is done quite differently in ReadCoef4x4_CAVLC() */ if (TotalCoeff == 0) { return AVCDEC_SUCCESS; } if (TrailingOnes) /* keep reading the sign of those trailing ones */ { /* instead of reading one bit at a time, read the whole thing at once */ BitstreamReadBits(stream, TrailingOnes, &trailing_ones_sign_flag); trailing_ones_sign_flag <<= 1; for (i = 0; i < TrailingOnes; i++) { level[i] = 1 - ((trailing_ones_sign_flag >> (TrailingOnes - i - 1)) & 2); } } i = TrailingOnes; suffixLength = 1; if (TotalCoeff > TrailingOnes) { ce_LevelPrefix(stream, &level_prefix); if (TotalCoeff < 11 || TrailingOnes == 3) { if (level_prefix < 14) { // levelSuffixSize = 0; levelCode = level_prefix; } else if (level_prefix == 14) { // levelSuffixSize = 4; BitstreamReadBits(stream, 4, &level_suffix); levelCode = 14 + level_suffix; } else /* if (level_prefix == 15) */ { // levelSuffixSize = 12; BitstreamReadBits(stream, 12, &level_suffix); levelCode = 30 + level_suffix; } } else { /* suffixLength = 1; */ if (level_prefix < 15) { levelSuffixSize = suffixLength; } else { levelSuffixSize = 12; } BitstreamReadBits(stream, levelSuffixSize, &level_suffix); levelCode = (level_prefix << 1) + level_suffix; } if (TrailingOnes < 3) { levelCode += 2; } level[i] = (levelCode + 2) >> 1; if (level[i] > 3) { suffixLength = 2; } if (levelCode & 1) { level[i] = -level[i]; } i++; } for (j = TotalCoeff - i; j > 0 ; j--) { ce_LevelPrefix(stream, &level_prefix); if (level_prefix < 15) { levelSuffixSize = suffixLength; } else { levelSuffixSize = 12; } BitstreamReadBits(stream, levelSuffixSize, &level_suffix); levelCode = (level_prefix << suffixLength) + level_suffix; level[i] = (levelCode >> 1) + 1; if (level[i] > (3 << (suffixLength - 1)) && suffixLength < 6) { suffixLength++; } if (levelCode & 1) { level[i] = -level[i]; } i++; } if (TotalCoeff < maxNumCoeff) { if (nC >= 0) { ce_TotalZeros(stream, &zerosLeft, TotalCoeff); } else { ce_TotalZerosChromaDC(stream, &zerosLeft, TotalCoeff); } } else { zerosLeft = 0; } for (i = 0; i < TotalCoeff - 1; i++) { if (zerosLeft > 0) { ce_RunBefore(stream, &run_before, zerosLeft); run[i] = run_before; } else { run[i] = 0; zerosLeft = 0; // could be negative under error conditions } zerosLeft = zerosLeft - run[i]; } if (zerosLeft < 0) { zerosLeft = 0; // return AVCDEC_FAIL; } run[TotalCoeff-1] = zerosLeft; /* leave the inverse zigzag scan part for the caller */ return AVCDEC_SUCCESS; } ================================================ FILE: RtspCamera/jni/avc_h264/dec/src/slice.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /* Note for optimization: syntax decoding or operations related to B_SLICE should be commented out by macro definition or function pointers. */ #include "oscl_mem.h" #include "avcdec_lib.h" #include "avcdec_bitstream.h" const static int mbPart2raster[3][4] = {{0, 0, 0, 0}, {1, 1, 0, 0}, {1, 0, 1, 0}}; /* decode_frame_slice() */ /* decode_one_slice() */ AVCDec_Status DecodeSlice(AVCDecObject *decvid) { AVCDec_Status status; AVCCommonObj *video = decvid->common; AVCSliceHeader *sliceHdr = video->sliceHdr; AVCMacroblock *currMB ; AVCDecBitstream *stream = decvid->bitstream; uint slice_group_id; uint CurrMbAddr, moreDataFlag; /* set the first mb in slice */ CurrMbAddr = sliceHdr->first_mb_in_slice; slice_group_id = video->MbToSliceGroupMap[CurrMbAddr]; if ((CurrMbAddr && (CurrMbAddr != (uint)(video->mbNum + 1))) && video->currSeqParams->constrained_set1_flag == 1) { ConcealSlice(decvid, video->mbNum, CurrMbAddr); } moreDataFlag = 1; video->mb_skip_run = -1; /* while loop , see subclause 7.3.4 */ do { if (CurrMbAddr >= video->PicSizeInMbs) { return AVCDEC_FAIL; } currMB = video->currMB = &(video->mblock[CurrMbAddr]); video->mbNum = CurrMbAddr; currMB->slice_id = video->slice_id; // slice /* we can remove this check if we don't support Mbaff. */ /* we can wrap below into an initMB() function which will also do necessary reset of macroblock related parameters. */ video->mb_x = CurrMbAddr % video->PicWidthInMbs; video->mb_y = CurrMbAddr / video->PicWidthInMbs; /* check the availability of neighboring macroblocks */ InitNeighborAvailability(video, CurrMbAddr); /* read_macroblock and decode_one_macroblock() */ status = DecodeMB(decvid); if (status != AVCDEC_SUCCESS) { return status; } #ifdef MB_BASED_DEBLOCK if (video->currPicParams->num_slice_groups_minus1 == 0) { MBInLoopDeblock(video); /* MB-based deblocking */ } else /* this mode cannot be used if the number of slice group is not one. */ { return AVCDEC_FAIL; } #endif video->numMBs--; moreDataFlag = more_rbsp_data(stream); /* go to next MB */ while (++CurrMbAddr < video->PicSizeInMbs && video->MbToSliceGroupMap[CurrMbAddr] != (int)slice_group_id) { } } while ((moreDataFlag && video->numMBs > 0) || video->mb_skip_run > 0); /* even if no more data, but last few MBs are skipped */ if (video->numMBs == 0) { video->newPic = TRUE; video->mbNum = 0; // _Conceal return AVCDEC_PICTURE_READY; } return AVCDEC_SUCCESS; } /* read MB mode and motion vectors */ /* perform Intra/Inter prediction and residue */ /* update video->mb_skip_run */ AVCDec_Status DecodeMB(AVCDecObject *decvid) { AVCDec_Status status; AVCCommonObj *video = decvid->common; AVCDecBitstream *stream = decvid->bitstream; AVCMacroblock *currMB = video->currMB; uint mb_type; int slice_type = video->slice_type; int temp; currMB->QPy = video->QPy; currMB->QPc = video->QPc; if (slice_type == AVC_P_SLICE) { if (video->mb_skip_run < 0) { ue_v(stream, (uint *)&(video->mb_skip_run)); } if (video->mb_skip_run == 0) { /* this will not handle the case where the slice ends with a mb_skip_run == 0 and no following MB data */ ue_v(stream, &mb_type); if (mb_type > 30) { return AVCDEC_FAIL; } InterpretMBModeP(currMB, mb_type); video->mb_skip_run = -1; } else { /* see subclause 7.4.4 for more details on how mb_field_decoding_flag is derived in case of skipped MB */ currMB->mb_intra = FALSE; currMB->mbMode = AVC_SKIP; currMB->MbPartWidth = currMB->MbPartHeight = 16; currMB->NumMbPart = 1; currMB->NumSubMbPart[0] = currMB->NumSubMbPart[1] = currMB->NumSubMbPart[2] = currMB->NumSubMbPart[3] = 1; // currMB->SubMbPartWidth[0] = currMB->SubMbPartWidth[1] = currMB->SubMbPartWidth[2] = currMB->SubMbPartWidth[3] = currMB->MbPartWidth; currMB->SubMbPartHeight[0] = currMB->SubMbPartHeight[1] = currMB->SubMbPartHeight[2] = currMB->SubMbPartHeight[3] = currMB->MbPartHeight; oscl_memset(currMB->nz_coeff, 0, sizeof(uint8)*NUM_BLKS_IN_MB); currMB->CBP = 0; video->cbp4x4 = 0; /* for skipped MB, always look at the first entry in RefPicList */ currMB->RefIdx[0] = currMB->RefIdx[1] = currMB->RefIdx[2] = currMB->RefIdx[3] = video->RefPicList0[0]->RefIdx; InterMBPrediction(video); video->mb_skip_run--; return AVCDEC_SUCCESS; } } else { /* Then decode mode and MV */ ue_v(stream, &mb_type); if (mb_type > 25) { return AVCDEC_FAIL; } InterpretMBModeI(currMB, mb_type); } if (currMB->mbMode != AVC_I_PCM) { if (currMB->mbMode == AVC_P8 || currMB->mbMode == AVC_P8ref0) { status = sub_mb_pred(video, currMB, stream); } else { status = mb_pred(video, currMB, stream) ; } if (status != AVCDEC_SUCCESS) { return status; } if (currMB->mbMode != AVC_I16) { /* decode coded_block_pattern */ status = DecodeCBP(currMB, stream); if (status != AVCDEC_SUCCESS) { return status; } } if (currMB->CBP > 0 || currMB->mbMode == AVC_I16) { se_v(stream, &temp); if (temp) { temp += (video->QPy + 52); currMB->QPy = video->QPy = temp - 52 * (temp * 79 >> 12); if (currMB->QPy > 51 || currMB->QPy < 0) { video->QPy = AVC_CLIP3(0, 51, video->QPy); // return AVCDEC_FAIL; } video->QPy_div_6 = (video->QPy * 43) >> 8; video->QPy_mod_6 = video->QPy - 6 * video->QPy_div_6; currMB->QPc = video->QPc = mapQPi2QPc[AVC_CLIP3(0, 51, video->QPy + video->currPicParams->chroma_qp_index_offset)]; video->QPc_div_6 = (video->QPc * 43) >> 8; video->QPc_mod_6 = video->QPc - 6 * video->QPc_div_6; } } /* decode residue and inverse transform */ status = residual(decvid, currMB); if (status != AVCDEC_SUCCESS) { return status; } } else { if (stream->bitcnt & 7) { BitstreamByteAlign(stream); } /* decode pcm_byte[i] */ DecodeIntraPCM(video, stream); currMB->QPy = 0; /* necessary for deblocking */ // _OPTIMIZE currMB->QPc = mapQPi2QPc[AVC_CLIP3(0, 51, video->currPicParams->chroma_qp_index_offset)]; /* default values, don't know if really needed */ currMB->CBP = 0x3F; video->cbp4x4 = 0xFFFF; currMB->mb_intra = TRUE; oscl_memset(currMB->nz_coeff, 16, sizeof(uint8)*NUM_BLKS_IN_MB); return AVCDEC_SUCCESS; } /* do Intra/Inter prediction, together with the residue compensation */ /* This part should be common between the skip and no-skip */ if (currMB->mbMode == AVC_I4 || currMB->mbMode == AVC_I16) { IntraMBPrediction(video); } else { InterMBPrediction(video); } return AVCDEC_SUCCESS; } /* see subclause 7.3.5.1 */ AVCDec_Status mb_pred(AVCCommonObj *video, AVCMacroblock *currMB, AVCDecBitstream *stream) { int mbPartIdx; AVCSliceHeader *sliceHdr = video->sliceHdr; uint max_ref_idx; const int *temp_0; int16 *temp_1; uint code; if (currMB->mbMode == AVC_I4 || currMB->mbMode == AVC_I16) { video->intraAvailA = video->intraAvailB = video->intraAvailC = video->intraAvailD = 0; if (!video->currPicParams->constrained_intra_pred_flag) { video->intraAvailA = video->mbAvailA; video->intraAvailB = video->mbAvailB; video->intraAvailC = video->mbAvailC; video->intraAvailD = video->mbAvailD; } else { if (video->mbAvailA) { video->intraAvailA = video->mblock[video->mbAddrA].mb_intra; } if (video->mbAvailB) { video->intraAvailB = video->mblock[video->mbAddrB].mb_intra ; } if (video->mbAvailC) { video->intraAvailC = video->mblock[video->mbAddrC].mb_intra; } if (video->mbAvailD) { video->intraAvailD = video->mblock[video->mbAddrD].mb_intra; } } if (currMB->mbMode == AVC_I4) { /* perform prediction to get the actual intra 4x4 pred mode */ DecodeIntra4x4Mode(video, currMB, stream); /* output will be in currMB->i4Mode[4][4] */ } ue_v(stream, &code); if (code > 3) { return AVCDEC_FAIL; /* out of range */ } currMB->intra_chroma_pred_mode = (AVCIntraChromaPredMode)code; } else { oscl_memset(currMB->ref_idx_L0, 0, sizeof(int16)*4); /* see subclause 7.4.5.1 for the range of ref_idx_lX */ // max_ref_idx = sliceHdr->num_ref_idx_l0_active_minus1; max_ref_idx = video->refList0Size - 1; /* decode ref index for L0 */ if (sliceHdr->num_ref_idx_l0_active_minus1 > 0) { for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++) { te_v(stream, &code, max_ref_idx); if (code > (uint)max_ref_idx) { return AVCDEC_FAIL; } currMB->ref_idx_L0[mbPartIdx] = code; } } /* populate ref_idx_L0 */ temp_0 = &mbPart2raster[currMB->mbMode-AVC_P16][0]; temp_1 = &currMB->ref_idx_L0[3]; *temp_1-- = currMB->ref_idx_L0[*temp_0++]; *temp_1-- = currMB->ref_idx_L0[*temp_0++]; *temp_1-- = currMB->ref_idx_L0[*temp_0++]; *temp_1-- = currMB->ref_idx_L0[*temp_0++]; /* Global reference index, these values are used in deblock */ currMB->RefIdx[0] = video->RefPicList0[currMB->ref_idx_L0[0]]->RefIdx; currMB->RefIdx[1] = video->RefPicList0[currMB->ref_idx_L0[1]]->RefIdx; currMB->RefIdx[2] = video->RefPicList0[currMB->ref_idx_L0[2]]->RefIdx; currMB->RefIdx[3] = video->RefPicList0[currMB->ref_idx_L0[3]]->RefIdx; /* see subclause 7.4.5.1 for the range of ref_idx_lX */ max_ref_idx = sliceHdr->num_ref_idx_l1_active_minus1; /* decode mvd_l0 */ for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++) { se_v(stream, &(video->mvd_l0[mbPartIdx][0][0])); se_v(stream, &(video->mvd_l0[mbPartIdx][0][1])); } } return AVCDEC_SUCCESS; } /* see subclause 7.3.5.2 */ AVCDec_Status sub_mb_pred(AVCCommonObj *video, AVCMacroblock *currMB, AVCDecBitstream *stream) { int mbPartIdx, subMbPartIdx; AVCSliceHeader *sliceHdr = video->sliceHdr; uint max_ref_idx; uint sub_mb_type[4]; uint code; oscl_memset(currMB->ref_idx_L0, 0, sizeof(int16)*4); for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++) { ue_v(stream, &(sub_mb_type[mbPartIdx])); if (sub_mb_type[mbPartIdx] > 3) { return AVCDEC_FAIL; } } /* we have to check the values to make sure they are valid */ /* assign values to currMB->sub_mb_type[], currMB->MBPartPredMode[][x] */ InterpretSubMBModeP(currMB, sub_mb_type); /* see subclause 7.4.5.1 for the range of ref_idx_lX */ // max_ref_idx = sliceHdr->num_ref_idx_l0_active_minus1; max_ref_idx = video->refList0Size - 1; if (sliceHdr->num_ref_idx_l0_active_minus1 > 0 && currMB->mbMode != AVC_P8ref0) { for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++) { te_v(stream, (uint*)&code, max_ref_idx); if (code > max_ref_idx) { return AVCDEC_FAIL; } currMB->ref_idx_L0[mbPartIdx] = code; } } /* see subclause 7.4.5.1 for the range of ref_idx_lX */ max_ref_idx = sliceHdr->num_ref_idx_l1_active_minus1; /* if(video->MbaffFrameFlag && currMB->mb_field_decoding_flag) max_ref_idx = 2*sliceHdr->num_ref_idx_l1_active_minus1 + 1;*/ for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++) { for (subMbPartIdx = 0; subMbPartIdx < currMB->NumSubMbPart[mbPartIdx]; subMbPartIdx++) { se_v(stream, &(video->mvd_l0[mbPartIdx][subMbPartIdx][0])); se_v(stream, &(video->mvd_l0[mbPartIdx][subMbPartIdx][1])); } /* used in deblocking */ currMB->RefIdx[mbPartIdx] = video->RefPicList0[currMB->ref_idx_L0[mbPartIdx]]->RefIdx; } return AVCDEC_SUCCESS; } void InterpretMBModeI(AVCMacroblock *mblock, uint mb_type) { mblock->NumMbPart = 1; mblock->mb_intra = TRUE; if (mb_type == 0) /* I_4x4 */ { mblock->mbMode = AVC_I4; } else if (mb_type < 25) /* I_PCM */ { mblock->mbMode = AVC_I16; mblock->i16Mode = (AVCIntra16x16PredMode)((mb_type - 1) & 0x3); if (mb_type > 12) { mblock->CBP = (((mb_type - 13) >> 2) << 4) + 0x0F; } else { mblock->CBP = ((mb_type - 1) >> 2) << 4; } } else { mblock->mbMode = AVC_I_PCM; } return ; } void InterpretMBModeP(AVCMacroblock *mblock, uint mb_type) { const static int map2PartWidth[5] = {16, 16, 8, 8, 8}; const static int map2PartHeight[5] = {16, 8, 16, 8, 8}; const static int map2NumPart[5] = {1, 2, 2, 4, 4}; const static AVCMBMode map2mbMode[5] = {AVC_P16, AVC_P16x8, AVC_P8x16, AVC_P8, AVC_P8ref0}; mblock->mb_intra = FALSE; if (mb_type < 5) { mblock->mbMode = map2mbMode[mb_type]; mblock->MbPartWidth = map2PartWidth[mb_type]; mblock->MbPartHeight = map2PartHeight[mb_type]; mblock->NumMbPart = map2NumPart[mb_type]; mblock->NumSubMbPart[0] = mblock->NumSubMbPart[1] = mblock->NumSubMbPart[2] = mblock->NumSubMbPart[3] = 1; mblock->SubMbPartWidth[0] = mblock->SubMbPartWidth[1] = mblock->SubMbPartWidth[2] = mblock->SubMbPartWidth[3] = mblock->MbPartWidth; mblock->SubMbPartHeight[0] = mblock->SubMbPartHeight[1] = mblock->SubMbPartHeight[2] = mblock->SubMbPartHeight[3] = mblock->MbPartHeight; } else { InterpretMBModeI(mblock, mb_type - 5); /* set MV and Ref_Idx codes of Intra blocks in P-slices */ oscl_memset(mblock->mvL0, 0, sizeof(int32)*16); mblock->ref_idx_L0[0] = mblock->ref_idx_L0[1] = mblock->ref_idx_L0[2] = mblock->ref_idx_L0[3] = -1; } return ; } void InterpretMBModeB(AVCMacroblock *mblock, uint mb_type) { const static int map2PartWidth[23] = {8, 16, 16, 16, 16, 8, 16, 8, 16, 8, 16, 8, 16, 8, 16, 8, 16, 8, 16, 8, 16, 8, 8 }; const static int map2PartHeight[23] = {8, 16, 16, 16, 8, 16, 8, 16, 8, 16, 8, 16, 8, 16, 8, 16, 8, 16, 8, 16, 8, 16, 8 }; /* see enum AVCMBType declaration */ const static AVCMBMode map2mbMode[23] = {AVC_BDirect16, AVC_P16, AVC_P16, AVC_P16, AVC_P16x8, AVC_P8x16, AVC_P16x8, AVC_P8x16, AVC_P16x8, AVC_P8x16, AVC_P16x8, AVC_P8x16, AVC_P16x8, AVC_P8x16, AVC_P16x8, AVC_P8x16, AVC_P16x8, AVC_P8x16, AVC_P16x8, AVC_P8x16, AVC_P16x8, AVC_P8x16, AVC_P8 }; const static int map2PredMode1[23] = {3, 0, 1, 2, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 2, 2, 2, 2, 2, 2, -1}; const static int map2PredMode2[23] = { -1, -1, -1, -1, 0, 0, 1, 1, 1, 1, 0, 0, 2, 2, 2, 2, 0, 0, 1, 1, 2, 2, -1}; const static int map2NumPart[23] = { -1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4}; mblock->mb_intra = FALSE; if (mb_type < 23) { mblock->mbMode = map2mbMode[mb_type]; mblock->NumMbPart = map2NumPart[mb_type]; mblock->MBPartPredMode[0][0] = (AVCPredMode)map2PredMode1[mb_type]; if (mblock->NumMbPart > 1) { mblock->MBPartPredMode[1][0] = (AVCPredMode)map2PredMode2[mb_type]; } mblock->MbPartWidth = map2PartWidth[mb_type]; mblock->MbPartHeight = map2PartHeight[mb_type]; } else { InterpretMBModeI(mblock, mb_type - 23); } return ; } void InterpretMBModeSI(AVCMacroblock *mblock, uint mb_type) { mblock->mb_intra = TRUE; if (mb_type == 0) { mblock->mbMode = AVC_SI4; /* other values are N/A */ } else { InterpretMBModeI(mblock, mb_type - 1); } return ; } /* input is mblock->sub_mb_type[] */ void InterpretSubMBModeP(AVCMacroblock *mblock, uint *sub_mb_type) { int i, sub_type; /* see enum AVCMBType declaration */ // const static AVCSubMBMode map2subMbMode[4] = {AVC_8x8,AVC_8x4,AVC_4x8,AVC_4x4}; const static int map2subPartWidth[4] = {8, 8, 4, 4}; const static int map2subPartHeight[4] = {8, 4, 8, 4}; const static int map2numSubPart[4] = {1, 2, 2, 4}; for (i = 0; i < 4 ; i++) { sub_type = (int) sub_mb_type[i]; // mblock->subMbMode[i] = map2subMbMode[sub_type]; mblock->NumSubMbPart[i] = map2numSubPart[sub_type]; mblock->SubMbPartWidth[i] = map2subPartWidth[sub_type]; mblock->SubMbPartHeight[i] = map2subPartHeight[sub_type]; } return ; } void InterpretSubMBModeB(AVCMacroblock *mblock, uint *sub_mb_type) { int i, j, sub_type; /* see enum AVCMBType declaration */ const static AVCSubMBMode map2subMbMode[13] = {AVC_BDirect8, AVC_8x8, AVC_8x8, AVC_8x8, AVC_8x4, AVC_4x8, AVC_8x4, AVC_4x8, AVC_8x4, AVC_4x8, AVC_4x4, AVC_4x4, AVC_4x4 }; const static int map2subPartWidth[13] = {4, 8, 8, 8, 8, 4, 8, 4, 8, 4, 4, 4, 4}; const static int map2subPartHeight[13] = {4, 8, 8, 8, 4, 8, 4, 8, 4, 8, 4, 4, 4}; const static int map2numSubPart[13] = {1, 1, 1, 2, 2, 2, 2, 2, 2, 4, 4, 4}; const static int map2predMode[13] = {3, 0, 1, 2, 0, 0, 1, 1, 2, 2, 0, 1, 2}; for (i = 0; i < 4 ; i++) { sub_type = (int) sub_mb_type[i]; mblock->subMbMode[i] = map2subMbMode[sub_type]; mblock->NumSubMbPart[i] = map2numSubPart[sub_type]; mblock->SubMbPartWidth[i] = map2subPartWidth[sub_type]; mblock->SubMbPartHeight[i] = map2subPartHeight[sub_type]; for (j = 0; j < 4; j++) { mblock->MBPartPredMode[i][j] = (AVCPredMode)map2predMode[sub_type]; } } return ; } /* see subclause 8.3.1 */ AVCDec_Status DecodeIntra4x4Mode(AVCCommonObj *video, AVCMacroblock *currMB, AVCDecBitstream *stream) { int intra4x4PredModeA = 0, intra4x4PredModeB = 0, predIntra4x4PredMode = 0; int component, SubBlock_indx, block_x, block_y; int dcOnlyPredictionFlag; uint prev_intra4x4_pred_mode_flag[16]; int rem_intra4x4_pred_mode[16]; int bindx = 0; for (component = 0; component < 4; component++) /* partition index */ { block_x = ((component & 1) << 1); block_y = ((component >> 1) << 1); for (SubBlock_indx = 0; SubBlock_indx < 4; SubBlock_indx++) /* sub-partition index */ { BitstreamRead1Bit(stream, &(prev_intra4x4_pred_mode_flag[bindx])); if (!prev_intra4x4_pred_mode_flag[bindx]) { BitstreamReadBits(stream, 3, (uint*)&(rem_intra4x4_pred_mode[bindx])); } dcOnlyPredictionFlag = 0; if (block_x > 0) { intra4x4PredModeA = currMB->i4Mode[(block_y << 2) + block_x - 1 ]; } else { if (video->intraAvailA) { if (video->mblock[video->mbAddrA].mbMode == AVC_I4) { intra4x4PredModeA = video->mblock[video->mbAddrA].i4Mode[(block_y << 2) + 3]; } else { intra4x4PredModeA = AVC_I4_DC; } } else { dcOnlyPredictionFlag = 1; } } if (block_y > 0) { intra4x4PredModeB = currMB->i4Mode[((block_y-1) << 2) + block_x]; } else { if (video->intraAvailB) { if (video->mblock[video->mbAddrB].mbMode == AVC_I4) { intra4x4PredModeB = video->mblock[video->mbAddrB].i4Mode[(3 << 2) + block_x]; } else { intra4x4PredModeB = AVC_I4_DC; } } else { dcOnlyPredictionFlag = 1; } } if (dcOnlyPredictionFlag) { intra4x4PredModeA = intra4x4PredModeB = AVC_I4_DC; } predIntra4x4PredMode = AVC_MIN(intra4x4PredModeA, intra4x4PredModeB); if (prev_intra4x4_pred_mode_flag[bindx]) { currMB->i4Mode[(block_y<<2)+block_x] = (AVCIntra4x4PredMode)predIntra4x4PredMode; } else { if (rem_intra4x4_pred_mode[bindx] < predIntra4x4PredMode) { currMB->i4Mode[(block_y<<2)+block_x] = (AVCIntra4x4PredMode)rem_intra4x4_pred_mode[bindx]; } else { currMB->i4Mode[(block_y<<2)+block_x] = (AVCIntra4x4PredMode)(rem_intra4x4_pred_mode[bindx] + 1); } } bindx++; block_y += (SubBlock_indx & 1) ; block_x += (1 - 2 * (SubBlock_indx & 1)) ; } } return AVCDEC_SUCCESS; } AVCDec_Status ConcealSlice(AVCDecObject *decvid, int mbnum_start, int mbnum_end) { AVCCommonObj *video = decvid->common; AVCMacroblock *currMB ; int CurrMbAddr; if (video->RefPicList0[0] == NULL) { return AVCDEC_FAIL; } for (CurrMbAddr = mbnum_start; CurrMbAddr < mbnum_end; CurrMbAddr++) { currMB = video->currMB = &(video->mblock[CurrMbAddr]); video->mbNum = CurrMbAddr; currMB->slice_id = video->slice_id++; // slice /* we can remove this check if we don't support Mbaff. */ /* we can wrap below into an initMB() function which will also do necessary reset of macroblock related parameters. */ video->mb_x = CurrMbAddr % video->PicWidthInMbs; video->mb_y = CurrMbAddr / video->PicWidthInMbs; /* check the availability of neighboring macroblocks */ InitNeighborAvailability(video, CurrMbAddr); currMB->mb_intra = FALSE; currMB->mbMode = AVC_SKIP; currMB->MbPartWidth = currMB->MbPartHeight = 16; currMB->NumMbPart = 1; currMB->NumSubMbPart[0] = currMB->NumSubMbPart[1] = currMB->NumSubMbPart[2] = currMB->NumSubMbPart[3] = 1; currMB->SubMbPartWidth[0] = currMB->SubMbPartWidth[1] = currMB->SubMbPartWidth[2] = currMB->SubMbPartWidth[3] = currMB->MbPartWidth; currMB->SubMbPartHeight[0] = currMB->SubMbPartHeight[1] = currMB->SubMbPartHeight[2] = currMB->SubMbPartHeight[3] = currMB->MbPartHeight; currMB->QPy = 26; currMB->QPc = 26; oscl_memset(currMB->nz_coeff, 0, sizeof(uint8)*NUM_BLKS_IN_MB); currMB->CBP = 0; video->cbp4x4 = 0; /* for skipped MB, always look at the first entry in RefPicList */ currMB->RefIdx[0] = currMB->RefIdx[1] = currMB->RefIdx[2] = currMB->RefIdx[3] = video->RefPicList0[0]->RefIdx; InterMBPrediction(video); video->numMBs--; } return AVCDEC_SUCCESS; } ================================================ FILE: RtspCamera/jni/avc_h264/dec/src/vlc.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "avcdec_lib.h" #include "avcdec_bitstream.h" //#define PV_ARM_V5 #ifdef PV_ARM_V5 #define PV_CLZ(A,B) __asm{CLZ (A),(B)} \ A -= 16; #else #define PV_CLZ(A,B) while (((B) & 0x8000) == 0) {(B) <<=1; A++;} #endif #define PV_NO_CLZ #ifndef PV_NO_CLZ typedef struct tagVLCNumCoeffTrail { int trailing; int total_coeff; int length; } VLCNumCoeffTrail; typedef struct tagShiftOffset { int shift; int offset; } ShiftOffset; const VLCNumCoeffTrail NumCoeffTrailOnes[3][67] = { {{0, 0, 1}, {1, 1, 2}, {2, 2, 3}, {1, 2, 6}, {0, 1, 6}, {3, 3, 5}, {3, 3, 5}, {3, 5, 7}, {2, 3, 7}, {3, 4, 6}, {3, 4, 6}, {3, 6, 8}, {2, 4, 8}, {1, 3, 8}, {0, 2, 8}, {3, 7, 9}, {2, 5, 9}, {1, 4, 9}, {0, 3, 9}, {3, 8, 10}, {2, 6, 10}, {1, 5, 10}, {0, 4, 10}, {3, 9, 11}, {2, 7, 11}, {1, 6, 11}, {0, 5, 11}, {0, 8, 13}, {2, 9, 13}, {1, 8, 13}, {0, 7, 13}, {3, 10, 13}, {2, 8, 13}, {1, 7, 13}, {0, 6, 13}, {3, 12, 14}, {2, 11, 14}, {1, 10, 14}, {0, 10, 14}, {3, 11, 14}, {2, 10, 14}, {1, 9, 14}, {0, 9, 14}, {3, 14, 15}, {2, 13, 15}, {1, 12, 15}, {0, 12, 15}, {3, 13, 15}, {2, 12, 15}, {1, 11, 15}, {0, 11, 15}, {3, 16, 16}, {2, 15, 16}, {1, 15, 16}, {0, 14, 16}, {3, 15, 16}, {2, 14, 16}, {1, 14, 16}, {0, 13, 16}, {0, 16, 16}, {2, 16, 16}, {1, 16, 16}, {0, 15, 16}, {1, 13, 15}, { -1, -1, -1}, { -1, -1, -1}, { -1, -1, -1}}, {{1, 1, 2}, {0, 0, 2}, {3, 4, 4}, {3, 3, 4}, {2, 2, 3}, {2, 2, 3}, {3, 6, 6}, {2, 3, 6}, {1, 3, 6}, {0, 1, 6}, {3, 5, 5}, {3, 5, 5}, {1, 2, 5}, {1, 2, 5}, {3, 7, 6}, {2, 4, 6}, {1, 4, 6}, {0, 2, 6}, {3, 8, 7}, {2, 5, 7}, {1, 5, 7}, {0, 3, 7}, {0, 5, 8}, {2, 6, 8}, {1, 6, 8}, {0, 4, 8}, {3, 9, 9}, {2, 7, 9}, {1, 7, 9}, {0, 6, 9}, {3, 11, 11}, {2, 9, 11}, {1, 9, 11}, {0, 8, 11}, {3, 10, 11}, {2, 8, 11}, {1, 8, 11}, {0, 7, 11}, {0, 11, 12}, {2, 11, 12}, {1, 11, 12}, {0, 10, 12}, {3, 12, 12}, {2, 10, 12}, {1, 10, 12}, {0, 9, 12}, {3, 14, 13}, {2, 13, 13}, {1, 13, 13}, {0, 13, 13}, {3, 13, 13}, {2, 12, 13}, {1, 12, 13}, {0, 12, 13}, {1, 15, 14}, {0, 15, 14}, {2, 15, 14}, {1, 14, 14}, {2, 14, 13}, {2, 14, 13}, {0, 14, 13}, {0, 14, 13}, {3, 16, 14}, {2, 16, 14}, {1, 16, 14}, {0, 16, 14}, {3, 15, 13}}, {{3, 7, 4}, {3, 6, 4}, {3, 5, 4}, {3, 4, 4}, {3, 3, 4}, {2, 2, 4}, {1, 1, 4}, {0, 0, 4}, {1, 5, 5}, {2, 5, 5}, {1, 4, 5}, {2, 4, 5}, {1, 3, 5}, {3, 8, 5}, {2, 3, 5}, {1, 2, 5}, {0, 3, 6}, {2, 7, 6}, {1, 7, 6}, {0, 2, 6}, {3, 9, 6}, {2, 6, 6}, {1, 6, 6}, {0, 1, 6}, {0, 7, 7}, {0, 6, 7}, {2, 9, 7}, {0, 5, 7}, {3, 10, 7}, {2, 8, 7}, {1, 8, 7}, {0, 4, 7}, {3, 12, 8}, {2, 11, 8}, {1, 10, 8}, {0, 9, 8}, {3, 11, 8}, {2, 10, 8}, {1, 9, 8}, {0, 8, 8}, {0, 12, 9}, {2, 13, 9}, {1, 12, 9}, {0, 11, 9}, {3, 13, 9}, {2, 12, 9}, {1, 11, 9}, {0, 10, 9}, {1, 15, 10}, {0, 14, 10}, {3, 14, 10}, {2, 14, 10}, {1, 14, 10}, {0, 13, 10}, {1, 13, 9}, {1, 13, 9}, {1, 16, 10}, {0, 15, 10}, {3, 15, 10}, {2, 15, 10}, {3, 16, 10}, {2, 16, 10}, {0, 16, 10}, { -1, -1, -1}, { -1, -1, -1}, { -1, -1, -1}, { -1, -1, -1}} }; const ShiftOffset NumCoeffTrailOnes_indx[3][15] = { {{15, -1}, {14, 0}, {13, 1}, {10, -1}, {9, 3}, {8, 7}, {7, 11}, {6, 15}, {5, 19}, {3, 19}, {2, 27}, {1, 35}, {0, 43}, {0, 55}, {1, 62}}, {{14, -2}, {12, -2}, {10, -2}, {10, 10}, {9, 14}, {8, 18}, {7, 22}, {5, 22}, {4, 30}, {3, 38}, {2, 46}, {2, 58}, {3, 65}, {16, 0}, {16, 0}}, {{12, -8}, {11, 0}, {10, 8}, {9, 16}, {8, 24}, {7, 32}, {6, 40}, {6, 52}, {6, 58}, {6, 61}, {16, 0}, {16, 0}, {16, 0}, {16, 0}, {16, 0}} }; const static int nC_table[8] = {0, 0, 1, 1, 2, 2, 2, 2}; #endif /** See algorithm in subclause 9.1, Table 9-1, Table 9-2. */ AVCDec_Status ue_v(AVCDecBitstream *bitstream, uint *codeNum) { uint temp, tmp_cnt; int leading_zeros = 0; BitstreamShowBits(bitstream, 16, &temp); tmp_cnt = temp | 0x1; PV_CLZ(leading_zeros, tmp_cnt) if (leading_zeros < 8) { *codeNum = (temp >> (15 - (leading_zeros << 1))) - 1; BitstreamFlushBits(bitstream, (leading_zeros << 1) + 1); } else { BitstreamReadBits(bitstream, (leading_zeros << 1) + 1, &temp); *codeNum = temp - 1; } return AVCDEC_SUCCESS; } /** See subclause 9.1.1, Table 9-3 */ AVCDec_Status se_v(AVCDecBitstream *bitstream, int *value) { uint temp, tmp_cnt; int leading_zeros = 0; BitstreamShowBits(bitstream, 16, &temp); tmp_cnt = temp | 0x1; PV_CLZ(leading_zeros, tmp_cnt) if (leading_zeros < 8) { temp >>= (15 - (leading_zeros << 1)); BitstreamFlushBits(bitstream, (leading_zeros << 1) + 1); } else { BitstreamReadBits(bitstream, (leading_zeros << 1) + 1, &temp); } *value = temp >> 1; if (temp & 0x01) // lsb is signed bit *value = -(*value); // leading_zeros = temp >> 1; // *value = leading_zeros - (leading_zeros*2*(temp&1)); return AVCDEC_SUCCESS; } AVCDec_Status se_v32bit(AVCDecBitstream *bitstream, int32 *value) { int leadingZeros; uint32 infobits; uint32 codeNum; if (AVCDEC_SUCCESS != GetEGBitstring32bit(bitstream, &leadingZeros, &infobits)) return AVCDEC_FAIL; codeNum = (1 << leadingZeros) - 1 + infobits; *value = (codeNum + 1) / 2; if ((codeNum & 0x01) == 0) // lsb is signed bit *value = -(*value); return AVCDEC_SUCCESS; } AVCDec_Status te_v(AVCDecBitstream *bitstream, uint *value, uint range) { if (range > 1) { ue_v(bitstream, value); } else { BitstreamRead1Bit(bitstream, value); *value = 1 - (*value); } return AVCDEC_SUCCESS; } /* This function is only used for syntax with range from -2^31 to 2^31-1 */ /* only a few of them in the SPS and PPS */ AVCDec_Status GetEGBitstring32bit(AVCDecBitstream *bitstream, int *leadingZeros, uint32 *infobits) { int bit_value; uint info_temp; *leadingZeros = 0; BitstreamRead1Bit(bitstream, (uint*)&bit_value); while (!bit_value) { (*leadingZeros)++; BitstreamRead1Bit(bitstream, (uint*)&bit_value); } if (*leadingZeros > 0) { if (sizeof(uint) == 4) /* 32 bit machine */ { BitstreamReadBits(bitstream, *leadingZeros, (uint*)&info_temp); *infobits = (uint32)info_temp; } else if (sizeof(uint) == 2) /* 16 bit machine */ { *infobits = 0; if (*leadingZeros > 16) { BitstreamReadBits(bitstream, 16, (uint*)&info_temp); (*leadingZeros) -= 16; *infobits = ((uint32)info_temp) << (*leadingZeros); } BitstreamReadBits(bitstream, *leadingZeros, (uint*)&info_temp); *infobits |= (uint32)info_temp ; } } else *infobits = 0; return AVCDEC_SUCCESS; } /* see Table 9-4 assignment of codeNum to values of coded_block_pattern. */ const static uint8 MapCBP[48][2] = { {47, 0}, {31, 16}, {15, 1}, { 0, 2}, {23, 4}, {27, 8}, {29, 32}, {30, 3}, { 7, 5}, {11, 10}, {13, 12}, {14, 15}, {39, 47}, {43, 7}, {45, 11}, {46, 13}, {16, 14}, { 3, 6}, { 5, 9}, {10, 31}, {12, 35}, {19, 37}, {21, 42}, {26, 44}, {28, 33}, {35, 34}, {37, 36}, {42, 40}, {44, 39}, { 1, 43}, { 2, 45}, { 4, 46}, { 8, 17}, {17, 18}, {18, 20}, {20, 24}, {24, 19}, { 6, 21}, { 9, 26}, {22, 28}, {25, 23}, {32, 27}, {33, 29}, {34, 30}, {36, 22}, {40, 25}, {38, 38}, {41, 41}, }; AVCDec_Status DecodeCBP(AVCMacroblock *currMB, AVCDecBitstream *stream) { uint codeNum; uint coded_block_pattern; ue_v(stream, &codeNum); if (codeNum > 47) { return AVCDEC_FAIL; } /* can get rid of the if _OPTIMIZE */ if (currMB->mbMode == AVC_I4) { coded_block_pattern = MapCBP[codeNum][0]; } else { coded_block_pattern = MapCBP[codeNum][1]; } // currMB->cbpL = coded_block_pattern&0xF; /* modulo 16 */ // currMB->cbpC = coded_block_pattern>>4; /* divide 16 */ currMB->CBP = coded_block_pattern; return AVCDEC_SUCCESS; } /* TO BE OPTIMIZED !!!!! */ AVCDec_Status ce_TotalCoeffTrailingOnes(AVCDecBitstream *stream, int *TrailingOnes, int *TotalCoeff, int nC) { #ifdef PV_NO_CLZ const static uint8 TotCofNTrail1[75][3] = {{0, 0, 16}/*error */, {0, 0, 16}/*error */, {1, 13, 15}, {1, 13, 15}, {0, 16, 16}, {2, 16, 16}, {1, 16, 16}, {0, 15, 16}, {3, 16, 16}, {2, 15, 16}, {1, 15, 16}, {0, 14, 16}, {3, 15, 16}, {2, 14, 16}, {1, 14, 16}, {0, 13, 16}, {3, 14, 15}, {2, 13, 15}, {1, 12, 15}, {0, 12, 15}, {3, 13, 15}, {2, 12, 15}, {1, 11, 15}, {0, 11, 15}, {3, 12, 14}, {2, 11, 14}, {1, 10, 14}, {0, 10, 14}, {3, 11, 14}, {2, 10, 14}, {1, 9, 14}, {0, 9, 14}, {0, 8, 13}, {2, 9, 13}, {1, 8, 13}, {0, 7, 13}, {3, 10, 13}, {2, 8, 13}, {1, 7, 13}, {0, 6, 13}, {3, 9, 11}, {2, 7, 11}, {1, 6, 11}, {0, 5, 11}, {3, 8, 10}, {2, 6, 10}, {1, 5, 10}, {0, 4, 10}, {3, 7, 9}, {2, 5, 9}, {1, 4, 9}, {0, 3, 9}, {3, 6, 8}, {2, 4, 8}, {1, 3, 8}, {0, 2, 8}, {3, 5, 7}, {2, 3, 7}, {3, 4, 6}, {3, 4, 6}, {1, 2, 6}, {1, 2, 6}, {0, 1, 6}, {0, 1, 6}, {3, 3, 5}, {3, 3, 5}, {3, 3, 5}, {3, 3, 5}, {2, 2, 3}, {1, 1, 2}, {1, 1, 2}, {0, 0, 1}, {0, 0, 1}, {0, 0, 1}, {0, 0, 1} }; const static uint8 TotCofNTrail2[84][3] = {{0, 0, 14 /* error */}, {0, 0, 14/*error */}, {3, 15, 13}, {3, 15, 13}, {3, 16, 14}, {2, 16, 14}, {1, 16, 14}, {0, 16, 14}, {1, 15, 14}, {0, 15, 14}, {2, 15, 14}, {1, 14, 14}, {2, 14, 13}, {2, 14, 13}, {0, 14, 13}, {0, 14, 13}, {3, 14, 13}, {2, 13, 13}, {1, 13, 13}, {0, 13, 13}, {3, 13, 13}, {2, 12, 13}, {1, 12, 13}, {0, 12, 13}, {0, 11, 12}, {2, 11, 12}, {1, 11, 12}, {0, 10, 12}, {3, 12, 12}, {2, 10, 12}, {1, 10, 12}, {0, 9, 12}, {3, 11, 11}, {2, 9, 11}, {1, 9, 11}, {0, 8, 11}, {3, 10, 11}, {2, 8, 11}, {1, 8, 11}, {0, 7, 11}, {3, 9, 9}, {2, 7, 9}, {1, 7, 9}, {0, 6, 9}, {0, 5, 8}, {0, 5, 8}, {2, 6, 8}, {2, 6, 8}, {1, 6, 8}, {1, 6, 8}, {0, 4, 8}, {0, 4, 8}, {3, 8, 7}, {2, 5, 7}, {1, 5, 7}, {0, 3, 7}, {3, 7, 6}, {3, 7, 6}, {2, 4, 6}, {2, 4, 6}, {1, 4, 6}, {1, 4, 6}, {0, 2, 6}, {0, 2, 6}, {3, 6, 6}, {2, 3, 6}, {1, 3, 6}, {0, 1, 6}, {3, 5, 5}, {3, 5, 5}, {1, 2, 5}, {1, 2, 5}, {3, 4, 4}, {3, 3, 4}, {2, 2, 3}, {2, 2, 3}, {1, 1, 2}, {1, 1, 2}, {1, 1, 2}, {1, 1, 2}, {0, 0, 2}, {0, 0, 2}, {0, 0, 2}, {0, 0, 2} }; const static uint8 TotCofNTrail3[64][3] = {{0, 0, 10/*error*/}, {0, 16, 10}, {3, 16, 10}, {2, 16, 10}, {1, 16, 10}, {0, 15, 10}, {3, 15, 10}, {2, 15, 10}, {1, 15, 10}, {0, 14, 10}, {3, 14, 10}, {2, 14, 10}, {1, 14, 10}, {0, 13, 10}, {1, 13, 9}, {1, 13, 9}, {0, 12, 9}, {2, 13, 9}, {1, 12, 9}, {0, 11, 9}, {3, 13, 9}, {2, 12, 9}, {1, 11, 9}, {0, 10, 9}, {3, 12, 8}, {2, 11, 8}, {1, 10, 8}, {0, 9, 8}, {3, 11, 8}, {2, 10, 8}, {1, 9, 8}, {0, 8, 8}, {0, 7, 7}, {0, 6, 7}, {2, 9, 7}, {0, 5, 7}, {3, 10, 7}, {2, 8, 7}, {1, 8, 7}, {0, 4, 7}, {0, 3, 6}, {2, 7, 6}, {1, 7, 6}, {0, 2, 6}, {3, 9, 6}, {2, 6, 6}, {1, 6, 6}, {0, 1, 6}, {1, 5, 5}, {2, 5, 5}, {1, 4, 5}, {2, 4, 5}, {1, 3, 5}, {3, 8, 5}, {2, 3, 5}, {1, 2, 5}, {3, 7, 4}, {3, 6, 4}, {3, 5, 4}, {3, 4, 4}, {3, 3, 4}, {2, 2, 4}, {1, 1, 4}, {0, 0, 4} }; #endif uint code; #ifdef PV_NO_CLZ uint8 *pcode; if (nC < 2) { BitstreamShowBits(stream, 16, &code); if (code >= 8192) { pcode = (uint8*) & (TotCofNTrail1[(code>>13)+65+2][0]); } else if (code >= 2048) { pcode = (uint8*) & (TotCofNTrail1[(code>>9)+50+2][0]); } else if (code >= 1024) { pcode = (uint8*) & (TotCofNTrail1[(code>>8)+46+2][0]); } else if (code >= 512) { pcode = (uint8*) & (TotCofNTrail1[(code>>7)+42+2][0]); } else if (code >= 256) { pcode = (uint8*) & (TotCofNTrail1[(code>>6)+38+2][0]); } else if (code >= 128) { pcode = (uint8*) & (TotCofNTrail1[(code>>5)+34+2][0]); } else if (code >= 64) { pcode = (uint8*) & (TotCofNTrail1[(code>>3)+22+2][0]); } else if (code >= 32) { pcode = (uint8*) & (TotCofNTrail1[(code>>2)+14+2][0]); } else if (code >= 16) { pcode = (uint8*) & (TotCofNTrail1[(code>>1)+6+2][0]); } else { pcode = (uint8*) & (TotCofNTrail1[(code-2)+2][0]); } *TrailingOnes = pcode[0]; *TotalCoeff = pcode[1]; BitstreamFlushBits(stream, pcode[2]); } else if (nC < 4) { BitstreamShowBits(stream, 14, &code); if (code >= 4096) { pcode = (uint8*) & (TotCofNTrail2[(code>>10)+66+2][0]); } else if (code >= 2048) { pcode = (uint8*) & (TotCofNTrail2[(code>>8)+54+2][0]); } else if (code >= 512) { pcode = (uint8*) & (TotCofNTrail2[(code>>7)+46+2][0]); } else if (code >= 128) { pcode = (uint8*) & (TotCofNTrail2[(code>>5)+34+2][0]); } else if (code >= 64) { pcode = (uint8*) & (TotCofNTrail2[(code>>3)+22+2][0]); } else if (code >= 32) { pcode = (uint8*) & (TotCofNTrail2[(code>>2)+14+2][0]); } else if (code >= 16) { pcode = (uint8*) & (TotCofNTrail2[(code>>1)+6+2][0]); } else { pcode = (uint8*) & (TotCofNTrail2[code-2+2][0]); } *TrailingOnes = pcode[0]; *TotalCoeff = pcode[1]; BitstreamFlushBits(stream, pcode[2]); } else if (nC < 8) { BitstreamShowBits(stream, 10, &code); if (code >= 512) { pcode = (uint8*) & (TotCofNTrail3[(code>>6)+47+1][0]); } else if (code >= 256) { pcode = (uint8*) & (TotCofNTrail3[(code>>5)+39+1][0]); } else if (code >= 128) { pcode = (uint8*) & (TotCofNTrail3[(code>>4)+31+1][0]); } else if (code >= 64) { pcode = (uint8*) & (TotCofNTrail3[(code>>3)+23+1][0]); } else if (code >= 32) { pcode = (uint8*) & (TotCofNTrail3[(code>>2)+15+1][0]); } else if (code >= 16) { pcode = (uint8*) & (TotCofNTrail3[(code>>1)+7+1][0]); } else { pcode = (uint8*) & (TotCofNTrail3[code-1+1][0]); } *TrailingOnes = pcode[0]; *TotalCoeff = pcode[1]; BitstreamFlushBits(stream, pcode[2]); } else { /* read 6 bit FLC */ BitstreamReadBits(stream, 6, &code); *TrailingOnes = code & 3; *TotalCoeff = (code >> 2) + 1; if (*TotalCoeff > 16) { *TotalCoeff = 16; // _ERROR } if (code == 3) { *TrailingOnes = 0; (*TotalCoeff)--; } } #else const VLCNumCoeffTrail *ptr; const ShiftOffset *ptr_indx; uint temp, leading_zeros = 0; if (nC < 8) { BitstreamShowBits(stream, 16, &code); temp = code | 1; PV_CLZ(leading_zeros, temp) temp = nC_table[nC]; ptr_indx = &NumCoeffTrailOnes_indx[temp][leading_zeros]; ptr = &NumCoeffTrailOnes[temp][(code >> ptr_indx->shift) + ptr_indx->offset]; *TrailingOnes = ptr->trailing; *TotalCoeff = ptr->total_coeff; BitstreamFlushBits(stream, ptr->length); } else { /* read 6 bit FLC */ BitstreamReadBits(stream, 6, &code); *TrailingOnes = code & 3; *TotalCoeff = (code >> 2) + 1; if (*TotalCoeff > 16) { *TotalCoeff = 16; // _ERROR } if (code == 3) { *TrailingOnes = 0; (*TotalCoeff)--; } } #endif return AVCDEC_SUCCESS; } /* TO BE OPTIMIZED !!!!! */ AVCDec_Status ce_TotalCoeffTrailingOnesChromaDC(AVCDecBitstream *stream, int *TrailingOnes, int *TotalCoeff) { AVCDec_Status status; const static uint8 TotCofNTrail5[21][3] = { {3, 4, 7}, {3, 4, 7}, {2, 4, 8}, {1, 4, 8}, {2, 3, 7}, {2, 3, 7}, {1, 3, 7}, {1, 3, 7}, {0, 4, 6}, {0, 3, 6}, {0, 2, 6}, {3, 3, 6}, {1, 2, 6}, {0, 1, 6}, {2, 2, 3}, {0, 0, 2}, {0, 0, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1} }; uint code; uint8 *pcode; status = BitstreamShowBits(stream, 8, &code); if (code >= 32) { pcode = (uint8*) & (TotCofNTrail5[(code>>5)+13][0]); } else if (code >= 8) { pcode = (uint8*) & (TotCofNTrail5[(code>>2)+6][0]); } else { pcode = (uint8*) & (TotCofNTrail5[code][0]); } *TrailingOnes = pcode[0]; *TotalCoeff = pcode[1]; BitstreamFlushBits(stream, pcode[2]); return status; } /* see Table 9-6 */ AVCDec_Status ce_LevelPrefix(AVCDecBitstream *stream, uint *code) { uint temp; uint leading_zeros = 0; BitstreamShowBits(stream, 16, &temp); temp |= 1 ; PV_CLZ(leading_zeros, temp) BitstreamFlushBits(stream, leading_zeros + 1); *code = leading_zeros; return AVCDEC_SUCCESS; } /* see Table 9-7 and 9-8 */ AVCDec_Status ce_TotalZeros(AVCDecBitstream *stream, int *code, int TotalCoeff) { const static uint8 TotZero1[28][2] = {{15, 9}, {14, 9}, {13, 9}, {12, 8}, {12, 8}, {11, 8}, {11, 8}, {10, 7}, {9, 7}, {8, 6}, {8, 6}, {7, 6}, {7, 6}, {6, 5}, {6, 5}, {6, 5}, {6, 5}, {5, 5}, {5, 5}, {5, 5}, {5, 5}, {4, 4}, {3, 4}, {2, 3}, {2, 3}, {1, 3}, {1, 3}, {0, 1} }; const static uint8 TotZero2n3[2][18][2] = {{{14, 6}, {13, 6}, {12, 6}, {11, 6}, {10, 5}, {10, 5}, {9, 5}, {9, 5}, {8, 4}, {7, 4}, {6, 4}, {5, 4}, {4, 3}, {4, 3}, {3, 3}, {2, 3}, {1, 3}, {0, 3}}, /*const static uint8 TotZero3[18][2]=*/{{13, 6}, {11, 6}, {12, 5}, {12, 5}, {10, 5}, {10, 5}, {9, 5}, {9, 5}, {8, 4}, {5, 4}, {4, 4}, {0, 4}, {7, 3}, {7, 3}, {6, 3}, {3, 3}, {2, 3}, {1, 3}} }; const static uint8 TotZero4[17][2] = {{12, 5}, {11, 5}, {10, 5}, {0, 5}, {9, 4}, {9, 4}, {7, 4}, {7, 4}, {3, 4}, {3, 4}, {2, 4}, {2, 4}, {8, 3}, {6, 3}, {5, 3}, {4, 3}, {1, 3} }; const static uint8 TotZero5[13][2] = {{11, 5}, {9, 5}, {10, 4}, {8, 4}, {2, 4}, {1, 4}, {0, 4}, {7, 3}, {7, 3}, {6, 3}, {5, 3}, {4, 3}, {3, 3} }; const static uint8 TotZero6to10[5][15][2] = {{{10, 6}, {0, 6}, {1, 5}, {1, 5}, {8, 4}, {8, 4}, {8, 4}, {8, 4}, {9, 3}, {7, 3}, {6, 3}, {5, 3}, {4, 3}, {3, 3}, {2, 3}}, /*const static uint8 TotZero7[15][2]=*/{{9, 6}, {0, 6}, {1, 5}, {1, 5}, {7, 4}, {7, 4}, {7, 4}, {7, 4}, {8, 3}, {6, 3}, {4, 3}, {3, 3}, {2, 3}, {5, 2}, {5, 2}}, /*const static uint8 TotZero8[15][2]=*/{{8, 6}, {0, 6}, {2, 5}, {2, 5}, {1, 4}, {1, 4}, {1, 4}, {1, 4}, {7, 3}, {6, 3}, {3, 3}, {5, 2}, {5, 2}, {4, 2}, {4, 2}}, /*const static uint8 TotZero9[15][2]=*/{{1, 6}, {0, 6}, {7, 5}, {7, 5}, {2, 4}, {2, 4}, {2, 4}, {2, 4}, {5, 3}, {6, 2}, {6, 2}, {4, 2}, {4, 2}, {3, 2}, {3, 2}}, /*const static uint8 TotZero10[11][2]=*/{{1, 5}, {0, 5}, {6, 4}, {6, 4}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {5, 2}, {4, 2}, {3, 2}, {0, 0}, {0, 0}, {0, 0}, {0, 0}} }; const static uint8 TotZero11[7][2] = {{0, 4}, {1, 4}, {2, 3}, {2, 3}, {3, 3}, {5, 3}, {4, 1}}; const static uint8 TotZero12to15[4][5][2] = { {{3, 1}, {2, 2}, {4, 3}, {1, 4}, {0, 4}}, {{2, 1}, {3, 2}, {1, 3}, {0, 3}, {0, 0}}, {{2, 1}, {1, 2}, {0, 2}, {0, 0}, {0, 0}}, {{1, 1}, {0, 1}, {0, 0}, {0, 0}, {0, 0}} }; uint temp, mask; int indx; uint8 *pcode; if (TotalCoeff == 1) { BitstreamShowBits(stream, 9, &temp); if (temp >= 256) { pcode = (uint8*) & (TotZero1[27][0]); } else if (temp >= 64) { pcode = (uint8*) & (TotZero1[(temp>>5)+19][0]); } else if (temp >= 8) { pcode = (uint8*) & (TotZero1[(temp>>2)+5][0]); } else { pcode = (uint8*) & (TotZero1[temp-1][0]); } } else if (TotalCoeff == 2 || TotalCoeff == 3) { BitstreamShowBits(stream, 6, &temp); if (temp >= 32) { pcode = (uint8*) & (TotZero2n3[TotalCoeff-2][(temp>>3)+10][0]); } else if (temp >= 8) { pcode = (uint8*) & (TotZero2n3[TotalCoeff-2][(temp>>2)+6][0]); } else { pcode = (uint8*) & (TotZero2n3[TotalCoeff-2][temp][0]); } } else if (TotalCoeff == 4) { BitstreamShowBits(stream, 5, &temp); if (temp >= 12) { pcode = (uint8*) & (TotZero4[(temp>>2)+9][0]); } else { pcode = (uint8*) & (TotZero4[temp][0]); } } else if (TotalCoeff == 5) { BitstreamShowBits(stream, 5, &temp); if (temp >= 16) { pcode = (uint8*) & (TotZero5[(temp>>2)+5][0]); } else if (temp >= 2) { pcode = (uint8*) & (TotZero5[(temp>>1)+1][0]); } else { pcode = (uint8*) & (TotZero5[temp][0]); } } else if (TotalCoeff >= 6 && TotalCoeff <= 10) { if (TotalCoeff == 10) { BitstreamShowBits(stream, 5, &temp); } else { BitstreamShowBits(stream, 6, &temp); } if (temp >= 8) { pcode = (uint8*) & (TotZero6to10[TotalCoeff-6][(temp>>3)+7][0]); } else { pcode = (uint8*) & (TotZero6to10[TotalCoeff-6][temp][0]); } } else if (TotalCoeff == 11) { BitstreamShowBits(stream, 4, &temp); if (temp >= 8) { pcode = (uint8*) & (TotZero11[6][0]); } else if (temp >= 4) { pcode = (uint8*) & (TotZero11[(temp>>1)+2][0]); } else { pcode = (uint8*) & (TotZero11[temp][0]); } } else { BitstreamShowBits(stream, (16 - TotalCoeff), &temp); mask = 1 << (15 - TotalCoeff); indx = 0; while ((temp&mask) == 0 && indx < (16 - TotalCoeff)) /* search location of 1 bit */ { mask >>= 1; indx++; } pcode = (uint8*) & (TotZero12to15[TotalCoeff-12][indx]); } *code = pcode[0]; BitstreamFlushBits(stream, pcode[1]); return AVCDEC_SUCCESS; } /* see Table 9-9 */ AVCDec_Status ce_TotalZerosChromaDC(AVCDecBitstream *stream, int *code, int TotalCoeff) { const static uint8 TotZeroChrom1to3[3][8][2] = { {{3, 3}, {2, 3}, {1, 2}, {1, 2}, {0, 1}, {0, 1}, {0, 1}, {0, 1}}, {{2, 2}, {2, 2}, {1, 2}, {1, 2}, {0, 1}, {0, 1}, {0, 1}, {0, 1}}, {{1, 1}, {1, 1}, {1, 1}, {1, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}}, }; uint temp; uint8 *pcode; BitstreamShowBits(stream, 3, &temp); pcode = (uint8*) & (TotZeroChrom1to3[TotalCoeff-1][temp]); *code = pcode[0]; BitstreamFlushBits(stream, pcode[1]); return AVCDEC_SUCCESS; } /* see Table 9-10 */ AVCDec_Status ce_RunBefore(AVCDecBitstream *stream, int *code, int zerosLeft) { const static int codlen[6] = {1, 2, 2, 3, 3, 3}; /* num bits to read */ const static uint8 RunBeforeTab[6][8][2] = {{{1, 1}, {0, 1}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}}, /*const static int RunBefore2[4][2]=*/{{2, 2}, {1, 2}, {0, 1}, {0, 1}, {0, 0}, {0, 0}, {0, 0}, {0, 0}}, /*const static int RunBefore3[4][2]=*/{{3, 2}, {2, 2}, {1, 2}, {0, 2}, {0, 0}, {0, 0}, {0, 0}, {0, 0}}, /*const static int RunBefore4[7][2]=*/{{4, 3}, {3, 3}, {2, 2}, {2, 2}, {1, 2}, {1, 2}, {0, 2}, {0, 2}}, /*const static int RunBefore5[7][2]=*/{{5, 3}, {4, 3}, {3, 3}, {2, 3}, {1, 2}, {1, 2}, {0, 2}, {0, 2}}, /*const static int RunBefore6[7][2]=*/{{1, 3}, {2, 3}, {4, 3}, {3, 3}, {6, 3}, {5, 3}, {0, 2}, {0, 2}} }; uint temp; uint8 *pcode; int indx; if (zerosLeft <= 6) { BitstreamShowBits(stream, codlen[zerosLeft-1], &temp); pcode = (uint8*) & (RunBeforeTab[zerosLeft-1][temp][0]); *code = pcode[0]; BitstreamFlushBits(stream, pcode[1]); } else { BitstreamReadBits(stream, 3, &temp); if (temp) { *code = 7 - temp; } else { BitstreamShowBits(stream, 9, &temp); temp <<= 7; temp |= 1; indx = 0; PV_CLZ(indx, temp) *code = 7 + indx; BitstreamFlushBits(stream, indx + 1); } } return AVCDEC_SUCCESS; } ================================================ FILE: RtspCamera/jni/avc_h264/dec/src/yuv2rgb.cpp ================================================ /* * yuv2rgb.cpp * * Created on: 29 juil. 2009 * Author: rglt1266 */ #include #include "yuv2rgb.h" void convert (int width,int height, uint8 *in,uint32 *out){ uint8 *pY; uint8 *pU; uint8 *pV; int Y,U,V; int i,j; int R,G,B,Cr,Cb; /* Init */ pY = in; pU = in + (width*height); pV = pU + (width*height/4); for(i=0;i>8); G = Y - ((88*Cb+183*Cr)>>8); B = Y + ((454*Cb)>>8); if (R>255)R=255; else if (R<0)R=0; if (G>255)G=255; else if (G<0)G=0; if (B>255)B=255; else if (B<0)B=0; /* Write data */ out[((i*width) + j)]=((((R & 0xFF) << 16) | ((G & 0xFF) << 8) | (B & 0xFF))& 0xFFFFFFFF); } } } ================================================ FILE: RtspCamera/jni/avc_h264/dec/src/yuv2rgb.h ================================================ /* * yuv2rgb.h * * Created on: 29 juil. 2009 * Author: rglt1266 */ #include "oscl_types.h" #ifndef YUV2RGB_H_ #define YUV2RGB_H_ void convert (int width,int height, uint8 *in,uint32 *out); #endif /* YUV2RGB_H_ */ ================================================ FILE: RtspCamera/jni/avc_h264/enc/Android.mk ================================================ # # Copyright (C) 2008 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This makefile supplies the rules for building a library of JNI code for # use by our example platform shared library. LOCAL_PATH:= $(call my-dir) include $(CLEAR_VARS) LOCAL_MODULE_TAGS := optional # This is the target being built. LOCAL_MODULE:= libH264Encoder # All of the source files that we will compile. LOCAL_SRC_FILES:= \ src/avcenc_api.cpp \ src/bitstream_io.cpp \ src/block.cpp \ src/findhalfpel.cpp \ src/header.cpp \ src/init.cpp \ src/intra_est.cpp \ src/motion_comp.cpp \ src/motion_est.cpp \ src/rate_control.cpp \ src/residual.cpp \ src/sad.cpp \ src/sad_halfpel.cpp \ src/slice.cpp \ src/vlc_encode.cpp \ src/NativeH264Encoder.cpp \ src/pvavcencoder.cpp \ ../common/src/mb_access.cpp \ ../common/src/reflist.cpp \ ../common/src/fmo.cpp \ ../common/src/deblock.cpp \ ../common/src/dpb.cpp # All of the shared libraries we link against. LOCAL_SHARED_LIBRARIES := # No static libraries. LOCAL_STATIC_LIBRARIES := # Also need the JNI headers. LOCAL_C_INCLUDES += \ $(JNI_H_INCLUDE)\ $(LOCAL_PATH)/src \ $(LOCAL_PATH)/include \ $(AVC_ROOT)/oscl \ $(AVC_ROOT)/common/include # No specia compiler flags. LOCAL_CFLAGS += # Link libs (ex logs) LOCAL_LDLIBS := -llog # Don't prelink this library. For more efficient code, you may want # to add this library to the prelink map and set this to true. LOCAL_PRELINK_MODULE := false include $(BUILD_SHARED_LIBRARY) ================================================ FILE: RtspCamera/jni/avc_h264/enc/include/pvavcencoder.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2010 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef PVAVCENCODER_H_INCLUDED #define PVAVCENCODER_H_INCLUDED #ifndef PVAVCENCODERINTERFACE_H_INCLUDED #include "pvavcencoderinterface.h" #endif #ifndef AVCENC_API_H_INCLUDED #include "avcenc_api.h" #endif /** AVC encoder class interface. See PVAVCEncoderInterface APIs for virtual functions definitions. */ class PVAVCEncoder : public PVAVCEncoderInterface { public: OSCL_IMPORT_REF static PVAVCEncoder* New(void); OSCL_IMPORT_REF virtual ~PVAVCEncoder(); OSCL_IMPORT_REF virtual TAVCEI_RETVAL Initialize(TAVCEIInputFormat* aVidInFormat, TAVCEIEncodeParam* aEncParam); OSCL_IMPORT_REF virtual int32 GetMaxOutputBufferSize(); OSCL_IMPORT_REF virtual TAVCEI_RETVAL Encode(TAVCEIInputData* aVidIn); OSCL_IMPORT_REF virtual TAVCEI_RETVAL GetParameterSet(uint8* paramSet, int32* size, int* nalType); OSCL_IMPORT_REF virtual TAVCEI_RETVAL GetOutput(TAVCEIOutputData* aVidOut, int *aRemainingBytes); OSCL_IMPORT_REF virtual TAVCEI_RETVAL FlushInput(); virtual TAVCEI_RETVAL CleanupEncoder(); OSCL_IMPORT_REF virtual TAVCEI_RETVAL UpdateBitRate(int32* aBitRate); OSCL_IMPORT_REF virtual TAVCEI_RETVAL UpdateFrameRate(OsclFloat* aFrameRate); OSCL_IMPORT_REF virtual TAVCEI_RETVAL UpdateIDRFrameInterval(int32 aIDRFrameInterval); OSCL_IMPORT_REF virtual TAVCEI_RETVAL IDRRequest(); OSCL_IMPORT_REF virtual int32 GetEncodeWidth(int32 aLayer); OSCL_IMPORT_REF virtual int32 GetEncodeHeight(int32 aLayer); OSCL_IMPORT_REF virtual OsclFloat GetEncodeFrameRate(int32 aLayer); /* for avc encoder lib callback functions */ int AVC_DPBAlloc(uint frame_size_in_mbs, uint num_buffers); int AVC_FrameBind(int indx, uint8** yuv); void AVC_FrameUnbind(int indx); private: PVAVCEncoder(); bool Construct(void); TAVCEI_RETVAL Init(TAVCEIInputFormat *aVidInFormat, TAVCEIEncodeParam *aEncParam, AVCEncParams& aEncOption); void CopyToYUVIn(uint8* YUV, int width, int height); AVCProfile mapProfile(TAVCEIProfile in); AVCLevel mapLevel(TAVCEILevel out); /* internal enum */ enum TAVCEncState { ECreated, EInitialized, EEncoding }; TAVCEncState iState; uint32 iId; /* Pure virtuals from OsclActiveObject implemented in this derived class */ int iSrcWidth; int iSrcHeight; int iFrameOrientation; OsclFloat iSrcFrameRate; int iEncWidth; int iEncHeight; OsclFloat iEncFrameRate; TAVCEIVideoFormat iVideoFormat; /* variables needed in operation */ AVCHandle iAvcHandle; AVCFrameIO iVidIn; uint8* iYUVIn; uint8* iVideoIn; uint8* iVideoOut; uint32 iTimeStamp; uint32 iPacketSize; uint8* iOverrunBuffer; int iOBSize; AVCEnc_Status iEncStatus; bool iIDR; int iDispOrd; uint8* iDPB; bool* iFrameUsed; uint8** iFramePtr; int iNumFrames; /* Tables in color coversion */ uint8 * iY_Table; uint16* iCb_Table; uint16* iCr_Table; uint16* ipCb_Table; uint16* ipCr_Table; int iNumLayer; }; #endif ================================================ FILE: RtspCamera/jni/avc_h264/enc/include/pvavcencoder_factory.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef PVAVCENCODER_FACTORY_H_INCLUDED #define PVAVCENCODER_FACTORY_H_INCLUDED #ifndef OSCL_BASE_H_INCLUDED #include "oscl_base.h" #endif #ifndef OSCL_MEM_H_INCLUDED #include "oscl_mem.h" #endif class PVAVCEncoderInterface; class PVAVCEncoderFactory { public: /** * Creates an instance of a PVAVCDecoder. If the creation fails, this function will leave. * * @returns A pointer to an instance of PVAVCDecoder as PVAVCDecoderInterface reference or leaves if instantiation fails **/ OSCL_IMPORT_REF static PVAVCEncoderInterface* CreatePVAVCEncoder(); /** * Deletes an instance of PVAVCDecoder and reclaims all allocated resources. * * @param aVideoDec The PVAVCDecoder instance to be deleted * @returns A status code indicating success or failure of deletion **/ OSCL_IMPORT_REF static bool DeletePVAVCEncoder(PVAVCEncoderInterface* aVideoEnc); }; #endif ================================================ FILE: RtspCamera/jni/avc_h264/enc/include/pvavcencoderinterface.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2010 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef PVAVCENCODERINTERFACE_H_INCLUDED #define PVAVCENCODERINTERFACE_H_INCLUDED #ifndef OSCL_BASE_H_INCLUDED #include "oscl_base.h" #endif #define AVC_MAX_LAYER 1 /** General returned values. */ enum TAVCEI_RETVAL { EAVCEI_SUCCESS, EAVCEI_FAIL, // upon receiving fail, encoder should be reset /** Encode return values */ EAVCEI_FRAME_DROP, // current frame is dropped, send in a new frame with new timestamp EAVCEI_NOT_READY, // the previous frame is still being processed EAVCEI_INPUT_ERROR, // error with input buffers /** GetOutput return values */ EAVCEI_MORE_DATA, // there are more data to be retrieve (multiple fragments of a NAL) EAVCEI_MORE_NAL // there is more NAL to be retrieved } ; /** Contains supported input format */ enum TAVCEIVideoFormat { EAVCEI_VDOFMT_RGB24, EAVCEI_VDOFMT_RGB12, EAVCEI_VDOFMT_YUV420, EAVCEI_VDOFMT_UYVY, EAVCEI_VDOFMT_YUV420SEMIPLANAR }; /** Type of contents for optimal encoding mode. */ enum TAVCEIEncodingMode { /** Content is encoded as fast as possible with error protection */ EAVCEI_ENCMODE_TWOWAY, /** Content is encoded as fast as possible without error protection */ EAVCEI_ENCMODE_RECORDER, /** Content is encoded with better quality (slow) with error protection */ EAVCEI_ENCMODE_STREAMING, /** Content is encoded with better quality (slow) without error protection */ EAVCEI_ENCMODE_DOWNLOAD }; /** Rate control type. */ enum TAVCEIRateControlType { /** Constant quality, variable bit rate, fixed quantization level. */ EAVCEI_RC_CONSTANT_Q, /** Short-term constant bit rate control. */ EAVCEI_RC_CBR_1, /** Long-term constant bit rate control. */ EAVCEI_RC_VBR_1 }; /** Targeted profile to encode. */ enum TAVCEIProfile { /* Non-scalable profile */ EAVCEI_PROFILE_DEFAULT, EAVCEI_PROFILE_BASELINE, EAVCEI_PROFILE_MAIN, EAVCEI_PROFILE_EXTENDED, EAVCEI_PROFILE_HIGH, EAVCEI_PROFILE_HIGH10, EAVCEI_PROFILE_HIGH422, EAVCEI_PROFILE_HIGH444 }; /** Targeted level to encode. */ enum TAVCEILevel { EAVCEI_LEVEL_AUTODETECT, EAVCEI_LEVEL_1, EAVCEI_LEVEL_1B, EAVCEI_LEVEL_11, EAVCEI_LEVEL_12, EAVCEI_LEVEL_13, EAVCEI_LEVEL_2, EAVCEI_LEVEL_21, EAVCEI_LEVEL_22, EAVCEI_LEVEL_3, EAVCEI_LEVEL_31, EAVCEI_LEVEL_32, EAVCEI_LEVEL_4, EAVCEI_LEVEL_41, EAVCEI_LEVEL_42, EAVCEI_LEVEL_5, EAVCEI_LEVEL_51, }; /** Output format */ enum TAVCEIOutputFormat { /** output in byte stream format according to Annex B */ EAVCEI_OUTPUT_ANNEXB, /** output for MP4 file format */ EAVCEI_OUTPUT_MP4, /** output in RTP format according to RFC 3984 */ EAVCEI_OUTPUT_RTP }; /** This structure contains encoder settings. */ struct TAVCEIEncodeParam { /** Specifies an ID that will be used to specify this encoder while returning the bitstream in asynchronous mode. */ uint32 iEncodeID; /** Specifies the targeted profile, and will also specifies available tools for iEncMode. If default is used, encoder will choose its own preferred profile. If autodetect is used, encoder will check other settings and choose the right profile that doesn't have any conflicts. */ TAVCEIProfile iProfile; /** Specifies the target level When present, other settings will be checked against the range allowable by this target level. Fail will returned upon Initialize call. If not known, users must set it to autodetect. Encoder will calculate the right level that doesn't conflict with other settings. */ TAVCEILevel iLevel; /** Specifies whether base only (iNumLayer = 1) or base + enhancement layer (iNumLayer =2 ) is to be used. */ int32 iNumLayer; /** Specifies the width in pixels of the encoded frames. IFrameWidth[0] is for base layer and iFrameWidth[1] is for enhanced layer. */ int iFrameWidth[AVC_MAX_LAYER]; /** Specifies the height in pixels of the encoded frames. IFrameHeight[0] is for base layer and iFrameHeight[1] is for enhanced layer. */ int iFrameHeight[AVC_MAX_LAYER]; /** Specifies the cumulative bit rate in bit per second. IBitRate[0] is for base layer and iBitRate[1] is for base+enhanced layer.*/ int iBitRate[AVC_MAX_LAYER]; /** Specifies the cumulative frame rate in frame per second. IFrameRate[0] is for base layer and iFrameRate[1] is for base+enhanced layer. */ OsclFloat iFrameRate[AVC_MAX_LAYER]; /** Specifies the encoding mode. This translates to the complexity of encoding modes and error resilient tools. */ TAVCEIEncodingMode iEncMode; /** Specifies that SPS and PPS are retrieved first and sent out-of-band */ bool iOutOfBandParamSet; /** Specifies the desired output format. */ TAVCEIOutputFormat iOutputFormat; /** Specifies the packet size in bytes which represents the desired number of bytes per NAL. If this number is set to 0, the encoder will encode the entire slice group as one NAL. */ uint32 iPacketSize; /** Specifies the rate control algorithm among one of the following constant Q, CBR and VBR. .*/ TAVCEIRateControlType iRateControlType; /** Specifies the VBV buffer size which determines the end-to-end delay between the encoder and the decoder. The size is in unit of seconds. For download application, the buffer size can be larger than the streaming application. For 2-way application, this buffer shall be kept minimal. For a special case, in VBR mode, iBufferDelay will be set to -1 to allow buffer underflow. */ float iBufferDelay; /** Specifies the initial quantization parameter for the first I-frame. If constant Q rate control is used, this QP will be used for all the I-frames. This number must be set between 1 and 31, otherwise, Initialize() will fail. */ int iIquant[AVC_MAX_LAYER]; /** Specifies the initial quantization parameter for the first P-frame. If constant Q rate control is used, this QP will be used for all the P-frames. This number must be set between 1 and 31, otherwise, Initialize() will fail. */ int iPquant[AVC_MAX_LAYER]; /** Specifies the initial quantization parameter for the first B-frame. If constant Q rate control is used, this QP will be used for all the B-frames. This number must be set between 1 and 31, otherwise, Initialize() will fail. */ int iBquant[AVC_MAX_LAYER]; /** Specifies automatic scene detection where I-frame will be used the the first frame in a new scene. */ bool iSceneDetection; /** Specifies the maximum period in seconds between 2 INTRA frames. An INTRA mode is forced to a frame once this interval is reached. When there is only one I-frame is present at the beginning of the clip, iIFrameInterval should be set to -1. For all I-frames coding this number should be set to 0. */ int32 iIFrameInterval; /** According to iIFrameInterval setting, the minimum number of intra MB per frame is optimally calculated for error resiliency. However, when iIFrameInterval is set to -1, iNumIntraMBRefresh must be specified to guarantee the minimum number of intra macroblocks per frame.*/ uint32 iNumIntraMBRefresh; /** Specifies the duration of the clip in millisecond, needed for VBR encode. Set to 0 if unknown.*/ int32 iClipDuration; /** Specify FSI Buffer input */ uint8* iFSIBuff; /** Specify FSI Buffer Length */ int iFSIBuffLength; }; /** Structure for input format information */ struct TAVCEIInputFormat { /** Contains the width in pixels of the input frame. */ int32 iFrameWidth; /** Contains the height in pixels of the input frame. */ int32 iFrameHeight; /** Contains the input frame rate in the unit of frame per second. */ OsclFloat iFrameRate; /** Contains Frame Orientation. Used for RGB input. 1 means Bottom_UP RGB, 0 means Top_Down RGB, -1 for video formats other than RGB*/ int iFrameOrientation; /** Contains the format of the input video, e.g., YUV 4:2:0, UYVY, RGB24, etc. */ TAVCEIVideoFormat iVideoFormat; }; /** Contains the input data information */ struct TAVCEIInputData { /** Pointer to an input frame buffer in input source format.*/ uint8* iSource; /** The corresponding time stamp of the input frame. */ uint32 iTimeStamp; }; /** Contains the output data information */ struct TAVCEIOutputData { /** Pointer to the encoded bitstream buffer. */ uint8* iBitstream; /** The size in bytes of iBStream. */ int32 iBitstreamSize; /** The time stamp of the encoded frame according to the bitstream. */ uint32 iTimeStamp; /** Set to true if this is a fragment of a NAL */ bool iFragment; /** Set to true if this is the last fragment of a NAL*/ bool iLastFragment; /** Set to true if this is a key frame */ bool iKeyFrame; /** Set to true if this is the last NAL of a frame */ bool iLastNAL; /** Pointer to the reconstructed frame buffer in YUV 4:2:0 domain. */ uint8 *iFrame; }; /** \brief This class is the base class for codec specific interface class. The users must maintain an instance of the codec specific class throughout the encoding session. */ class PVAVCEncoderInterface { public: /** \brief Constructor for PVAVCEncoderInterface class. */ virtual ~PVAVCEncoderInterface() {}; /** \brief Initialization function to set the input video format and the encoding parameters. \parm aVidInFormat contains input related attributes. \parm aEncParam contains encoding parameters setting. \return fail if there is any errors. Otherwise, the function returns success.*/ virtual TAVCEI_RETVAL Initialize(TAVCEIInputFormat* aVidInFormat, TAVCEIEncodeParam* aEncParam) = 0; /** \brief Get suggested output buffer size to be allocated such that no frames are dropped. \return Size to be allocated. 0 means the encoder is not initialized. */ virtual int32 GetMaxOutputBufferSize() = 0; /** \brief This function sends in an input video data structure containing a source frame and the associated timestamp. It can start processing such as frame analysis, decision to drop or encode. \parm aVidIn contains one frame and other information of input. \return one of these, SUCCESS, FRAME_DROP, NOT_READY, INPUT_ERROR, FAIL */ virtual TAVCEI_RETVAL Encode(TAVCEIInputData* aVidIn) = 0; /** \brief This function returns an array of parameter sets (either SPS or PPS, as specified by NAL TYPE in the first byte. \parm paramSet contains buffer for parameters sets. \parm size is for size of the input/output. \parm nalType is the NAL type according to the standard. \return one of these, SUCCESS, INPUT_ERROR, FAIL */ virtual TAVCEI_RETVAL GetParameterSet(uint8* paramSet, int32* size, int *nalType) = 0; /** \brief This function returns a compressed bitstream. \parm aVidOut is the structure to contain the output information. \return one of these, SUCCESS, MORE_DATA, NOT_READY, INPUT_ERROR, FAIL */ virtual TAVCEI_RETVAL GetOutput(TAVCEIOutputData* aVidOut, int *aRemainingBytes) = 0; /** This function is used to flush all the unencoded frames store inside the encoder (if there exist). It is used for random re-positioning. Or free all the input. Note that if users want to flush output also, it has to retrieve all the output by calling GetOutput. \return SUCCESS or NOT_READY (if the current frame is being used). */ virtual TAVCEI_RETVAL FlushInput() = 0; /** This function cleanup the AVCEI allocated resources. \return SUCCESS or FAIL. If fail, exception should be thrown. */ virtual TAVCEI_RETVAL CleanupEncoder() = 0; /**This function dynamically changes the target bit rate of the encoder while encoding. aBitRate[n] is the new accumulate target bit rate of layer n. \parm aBitRate is an array of the new target bit rates, size of array is the number of layers. \return SUCCESS, INPUT_ERROR or FAIL (if values are invalid) */ virtual TAVCEI_RETVAL UpdateBitRate(int32* aBitRate) = 0; /** This function dynamically changes the target frame rate of the encoder while encoding. \parm aFrameRate is an array of new accumulate target frame rate \return SUCCESS, INPUT_ERROR or FAIL (if values are invalid) */ virtual TAVCEI_RETVAL UpdateFrameRate(OsclFloat* aFrameRate) = 0; /** This function dynamically changes the IDR frame update interval while encoding to a new value. \parm aIFrameInterval is a new value of the IDR-frame interval in millisecond. \return SUCCESS or FAIL (if the value is invalid). */ virtual TAVCEI_RETVAL UpdateIDRFrameInterval(int32 aIDRFrameInterval) = 0; /** This function forces an IDR mode to the next frame to be encoded. \return none. */ virtual TAVCEI_RETVAL IDRRequest() = 0; /** This function returns the input width of a specific layer (not necessarily multiple of 16). \param aLayer specifies the layer of interest \return width in pixels. */ virtual int32 GetEncodeWidth(int32 aLayer) = 0; /** This function returns the input height of a specific layer (not necessarily multiple of 16). \param aLayer specifies the layer of interest \return height in pixels. */ virtual int32 GetEncodeHeight(int32 aLayer) = 0; /** This function returns the target encoded frame rate of a specific layer. \param aLayer specifies the layer of interest \return frame rate in fps. */ virtual OsclFloat GetEncodeFrameRate(int32 aLayer) = 0; }; #endif ================================================ FILE: RtspCamera/jni/avc_h264/enc/src/NativeH264Encoder.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 2009 OrangeLabs * * Author: Alexis Gilabert Senar * Date: 2009-07-01 * ------------------------------------------------------------------- */ #define LOG_TAG "NativeEnc" #include "NativeH264Encoder.h" #include "pvavcencoder.h" #include "android/log.h" int iSrcWidth; int iSrcHeight; float iSrcFrameRate; int FrameSize; //int NalComplete = 0; // xxx pa int NalComplete = 1; int SkipNextEncoding = 0; /* variables needed in operation */ PVAVCEncoder *encoder; TAVCEIInputFormat *iInputFormat; TAVCEIEncodeParam *iEncodeParam; TAVCEIInputData *iInData; TAVCEIOutputData *iOutData; TAVCEI_RETVAL status; /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder * Method: InitEncoder * Signature: (IIF)I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder_InitEncoder (JNIEnv *env, jclass iclass, jint width, jint height, jint framerate) { /** * Init */ iSrcWidth = width; iSrcHeight = height; iSrcFrameRate = framerate; FrameSize = (iSrcWidth*iSrcHeight*3)>>1; encoder = PVAVCEncoder::New(); if (encoder==NULL) return 0; iInputFormat = (TAVCEIInputFormat*)malloc(sizeof(TAVCEIInputFormat)); if (iInputFormat==NULL) { delete(encoder); return 0; } iEncodeParam = (TAVCEIEncodeParam*)malloc(sizeof(TAVCEIEncodeParam)); if (iEncodeParam==NULL) { free(iInputFormat); delete(encoder); return 0; } iInData = (TAVCEIInputData*)malloc(sizeof(TAVCEIInputData)); if(iInData==NULL){ free(iEncodeParam); free(iInputFormat); delete(encoder); return 0; } iOutData = (TAVCEIOutputData*)malloc(sizeof(TAVCEIOutputData)); if(iOutData==NULL){ free(iInData); free(iEncodeParam); free(iInputFormat); delete(encoder); return 0; } iOutData->iBitstream = (uint8*)malloc(FrameSize); iOutData->iBitstreamSize = FrameSize; /** * Set Encoder params */ iInputFormat->iFrameWidth = width; iInputFormat->iFrameHeight = height; iInputFormat->iFrameRate = (OsclFloat)(framerate); iInputFormat->iFrameOrientation = -1; iInputFormat->iVideoFormat = EAVCEI_VDOFMT_YUV420SEMIPLANAR; iEncodeParam->iEncodeID = 0; iEncodeParam->iProfile = EAVCEI_PROFILE_BASELINE; // xxx pa switch level due to changed screen size and bandwidth // iEncodeParam->iLevel = EAVCEI_LEVEL_1B; iEncodeParam->iLevel = EAVCEI_LEVEL_12; iEncodeParam->iNumLayer = 1; iEncodeParam->iFrameWidth[0] = iInputFormat->iFrameWidth; iEncodeParam->iFrameHeight[0] = iInputFormat->iFrameHeight; iEncodeParam->iBitRate[0] = 64000; iEncodeParam->iFrameRate[0] = (OsclFloat)iInputFormat->iFrameRate; iEncodeParam->iEncMode = EAVCEI_ENCMODE_TWOWAY; // iEncodeParam->iOutOfBandParamSet = true; // xxx pa 120503 set to in-band parameter to trigger SPS and PPS with each IFrame iEncodeParam->iOutOfBandParamSet = false; iEncodeParam->iOutputFormat = EAVCEI_OUTPUT_RTP; iEncodeParam->iPacketSize = 8192; iEncodeParam->iRateControlType = EAVCEI_RC_CBR_1; iEncodeParam->iBufferDelay = (OsclFloat)2.0; iEncodeParam->iIquant[0]=15; iEncodeParam->iPquant[0]=12; iEncodeParam->iBquant[0]=0; iEncodeParam->iSceneDetection = false; // iEncodeParam->iIFrameInterval = 15; // xxx pa 120503 set shorten IFrame intervall for more often SPS/PPS NAL units iEncodeParam->iIFrameInterval = 2; iEncodeParam->iNumIntraMBRefresh = 50; iEncodeParam->iClipDuration = 0; iEncodeParam->iFSIBuff = NULL; iEncodeParam->iFSIBuffLength = 0; /** * Init encoder */ return encoder->Initialize(iInputFormat,iEncodeParam); } /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder * Method: EncodeFrame * Signature: ([BJ)[B */ JNIEXPORT jbyteArray JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder_EncodeFrame (JNIEnv *env, jclass iclass, jbyteArray frame, jlong timestamp) { jbyteArray result ; /** * Check NAL */ if (NalComplete == 0){ __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "Checking NAL"); int32 NalSize = 30; int NalType = 0; uint8* NalBuff = (uint8*)malloc(NalSize*sizeof(uint8)); if(encoder->GetParameterSet(NalBuff,&NalSize,&NalType)== EAVCEI_SUCCESS){ result=(env)->NewByteArray(NalSize); (env)->SetByteArrayRegion(result, 0, NalSize, (jbyte*)NalBuff); free(NalBuff); __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "Checking NAL GetParameterSet->EAVCEI_SUCCESS status? %d", status); return result; } else { NalComplete = 1; // Now encode video } } // only encode if not in MORE_NAL state jint len = env->GetArrayLength(frame); uint8* data = (uint8*)malloc(len); if (SkipNextEncoding == 0){ /** * EncodeFrame */ env->GetByteArrayRegion (frame, (jint)0, (jint)len, (jbyte*)data); iInData->iSource=(uint8*)data; iInData->iTimeStamp = timestamp; // ==============> status = encoder->Encode(iInData); if(status != EAVCEI_SUCCESS){ __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "Encode fail with code: %d",status); result=(env)->NewByteArray(0); free(data); return result; } } else { /** * xxx pa skipped encoding due to MORE NAL output signal */ __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "Skiped encoding"); } int remainingByte = 0; iOutData->iBitstreamSize = FrameSize; // ==============> status = encoder->GetOutput(iOutData,&remainingByte); if(!(status == EAVCEI_SUCCESS || status == EAVCEI_MORE_NAL)){ // if(status != EAVCEI_SUCCESS){ __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "Get output fail with code: %d",status); result=(env)->NewByteArray(0); free(data); return result; } // try to get more NAL if(status == EAVCEI_MORE_NAL){ SkipNextEncoding = 1; } else { // reset flag SkipNextEncoding = 0; } // Copy aOutBuffer into result result=(env)->NewByteArray(iOutData->iBitstreamSize); (env)->SetByteArrayRegion(result, 0, iOutData->iBitstreamSize, (jbyte*)iOutData->iBitstream); free(data); return result; } /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder * Method: getLastEncodeStatus * Signature: ()I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder_getLastEncodeStatus (JNIEnv *env, jclass clazz){ return status; } /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder * Method: DeinitEncoder * Signature: ()I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder_DeinitEncoder (JNIEnv *env, jclass clazz){ delete(encoder); free(iInputFormat); free(iEncodeParam); free(iInData); free(iOutData); NalComplete = 0; return 1; } /* * This is called by the VM when the shared library is first loaded. */ jint JNI_OnLoad(JavaVM* vm, void* reserved) { JNIEnv* env = NULL; jint result = -1; if (vm->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) { goto bail; } /* success -- return valid version number */ result = JNI_VERSION_1_4; bail: return result; } ================================================ FILE: RtspCamera/jni/avc_h264/enc/src/NativeH264Encoder.cpp__orig ================================================ /* ------------------------------------------------------------------ * Copyright (C) 2009 OrangeLabs * * Author: Alexis Gilabert Senar * Date: 2009-07-01 * ------------------------------------------------------------------- */ #define LOG_TAG "NativeEnc" #include "NativeH264Encoder.h" #include "pvavcencoder.h" #include "android/log.h" int iSrcWidth; int iSrcHeight; float iSrcFrameRate; int FrameSize; int NalComplete = 0; /* variables needed in operation */ PVAVCEncoder *encoder; TAVCEIInputFormat *iInputFormat; TAVCEIEncodeParam *iEncodeParam; TAVCEIInputData *iInData; TAVCEIOutputData *iOutData; TAVCEI_RETVAL status; /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder * Method: InitEncoder * Signature: (IIF)I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder_InitEncoder (JNIEnv *env, jclass iclass, jint width, jint height, jint framerate) { /** * Init */ iSrcWidth = width; iSrcHeight = height; iSrcFrameRate = framerate; FrameSize = (iSrcWidth*iSrcHeight*3)>>1; encoder = PVAVCEncoder::New(); if (encoder==NULL) return 0; iInputFormat = (TAVCEIInputFormat*)malloc(sizeof(TAVCEIInputFormat)); if (iInputFormat==NULL) { delete(encoder); return 0; } iEncodeParam = (TAVCEIEncodeParam*)malloc(sizeof(TAVCEIEncodeParam)); if (iEncodeParam==NULL) { free(iInputFormat); delete(encoder); return 0; } iInData = (TAVCEIInputData*)malloc(sizeof(TAVCEIInputData)); if(iInData==NULL){ free(iEncodeParam); free(iInputFormat); delete(encoder); return 0; } iOutData = (TAVCEIOutputData*)malloc(sizeof(TAVCEIOutputData)); if(iOutData==NULL){ free(iInData); free(iEncodeParam); free(iInputFormat); delete(encoder); return 0; } iOutData->iBitstream = (uint8*)malloc(FrameSize); iOutData->iBitstreamSize = FrameSize; /** * Set Encoder params */ iInputFormat->iFrameWidth = width; iInputFormat->iFrameHeight = height; iInputFormat->iFrameRate = (OsclFloat)(framerate); iInputFormat->iFrameOrientation = -1; iInputFormat->iVideoFormat = EAVCEI_VDOFMT_YUV420SEMIPLANAR; iEncodeParam->iEncodeID = 0; iEncodeParam->iProfile = EAVCEI_PROFILE_BASELINE; iEncodeParam->iLevel = EAVCEI_LEVEL_1B; iEncodeParam->iNumLayer = 1; iEncodeParam->iFrameWidth[0] = iInputFormat->iFrameWidth; iEncodeParam->iFrameHeight[0] = iInputFormat->iFrameHeight; iEncodeParam->iBitRate[0] = 64000; iEncodeParam->iFrameRate[0] = (OsclFloat)iInputFormat->iFrameRate; iEncodeParam->iEncMode = EAVCEI_ENCMODE_TWOWAY; iEncodeParam->iOutOfBandParamSet = true; iEncodeParam->iOutputFormat = EAVCEI_OUTPUT_RTP; iEncodeParam->iPacketSize = 8192; iEncodeParam->iRateControlType = EAVCEI_RC_CBR_1; iEncodeParam->iBufferDelay = (OsclFloat)2.0; iEncodeParam->iIquant[0]=15; iEncodeParam->iPquant[0]=12; iEncodeParam->iBquant[0]=0; iEncodeParam->iSceneDetection = false; iEncodeParam->iIFrameInterval = 15; iEncodeParam->iNumIntraMBRefresh = 50; iEncodeParam->iClipDuration = 0; iEncodeParam->iFSIBuff = NULL; iEncodeParam->iFSIBuffLength = 0; /** * Init encoder */ return encoder->Initialize(iInputFormat,iEncodeParam); } /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder * Method: EncodeFrame * Signature: ([BJ)[B */ JNIEXPORT jbyteArray JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder_EncodeFrame (JNIEnv *env, jclass iclass, jbyteArray frame, jlong timestamp) { jbyteArray result ; /** * Check NAL */ if (NalComplete == 0){ __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "Checking NAL"); int32 NalSize = 30; int NalType = 0; uint8* NalBuff = (uint8*)malloc(NalSize*sizeof(uint8)); if(encoder->GetParameterSet(NalBuff,&NalSize,&NalType)== EAVCEI_SUCCESS){ result=(env)->NewByteArray(NalSize); (env)->SetByteArrayRegion(result, 0, NalSize, (jbyte*)NalBuff); free(NalBuff); return result; } else { NalComplete = 1; // Now encode video } } /** * EncodeFrame */ jint len = env->GetArrayLength(frame); uint8* data = (uint8*)malloc(len); env->GetByteArrayRegion (frame, (jint)0, (jint)len, (jbyte*)data); iInData->iSource=(uint8*)data; iInData->iTimeStamp = timestamp; status = encoder->Encode(iInData); if(status != EAVCEI_SUCCESS){ __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "Encode fail with code: %d",status); result=(env)->NewByteArray(0); free(data); return result; } int remainingByte = 0; iOutData->iBitstreamSize = FrameSize; status = encoder->GetOutput(iOutData,&remainingByte); if(status != EAVCEI_SUCCESS){ __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "Get output fail with code: %d",status); result=(env)->NewByteArray(0); free(data); return result; } // Copy aOutBuffer into result result=(env)->NewByteArray(iOutData->iBitstreamSize); (env)->SetByteArrayRegion(result, 0, iOutData->iBitstreamSize, (jbyte*)iOutData->iBitstream); free(data); return result; } /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder * Method: getLastEncodeStatus * Signature: ()I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder_getLastEncodeStatus (JNIEnv *env, jclass clazz){ return status; } /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder * Method: DeinitEncoder * Signature: ()I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder_DeinitEncoder (JNIEnv *env, jclass clazz){ delete(encoder); free(iInputFormat); free(iEncodeParam); free(iInData); free(iOutData); NalComplete = 0; return 1; } /* * This is called by the VM when the shared library is first loaded. */ jint JNI_OnLoad(JavaVM* vm, void* reserved) { JNIEnv* env = NULL; jint result = -1; if (vm->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) { goto bail; } /* success -- return valid version number */ result = JNI_VERSION_1_4; bail: return result; } ================================================ FILE: RtspCamera/jni/avc_h264/enc/src/NativeH264Encoder.h ================================================ /* DO NOT EDIT THIS FILE - it is machine generated */ #include /* Header for class com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder */ #ifndef _Included_NativeH264Encoder #define _Included_NativeH264Encoder #ifdef __cplusplus extern "C" { #endif /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder * Method: InitEncoder * Signature: (IIF)I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder_InitEncoder (JNIEnv *, jclass, jint, jint, jint); /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder * Method: EncodeFrame * Signature: ([BJ)[B */ JNIEXPORT jbyteArray JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder_EncodeFrame (JNIEnv *, jclass, jbyteArray, jlong); /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder * Method: DeinitEncoder * Signature: ()I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder_DeinitEncoder (JNIEnv *, jclass); /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder * Method: getLastEncodeStatus * Signature: ()I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder_getLastEncodeStatus (JNIEnv *env, jclass clazz); #ifdef __cplusplus } #endif #endif ================================================ FILE: RtspCamera/jni/avc_h264/enc/src/avcenc_api.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2010 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "oscl_types.h" #include "oscl_mem.h" #include "avcenc_api.h" #include "avcenc_lib.h" // xxx pa #define LOG_TAG "avenc_api" #include "android/log.h" /* ======================================================================== */ /* Function : PVAVCGetNALType() */ /* Date : 11/4/2003 */ /* Purpose : Sniff NAL type from the bitstream */ /* In/out : */ /* Return : AVCENC_SUCCESS if succeed, AVCENC_FAIL if fail. */ /* Modified : */ /* ======================================================================== */ OSCL_EXPORT_REF AVCEnc_Status PVAVCEncGetNALType(unsigned char *bitstream, int size, int *nal_type, int *nal_ref_idc) { int forbidden_zero_bit; if (size > 0) { forbidden_zero_bit = bitstream[0] >> 7; if (forbidden_zero_bit != 0) return AVCENC_FAIL; *nal_ref_idc = (bitstream[0] & 0x60) >> 5; *nal_type = bitstream[0] & 0x1F; return AVCENC_SUCCESS; } return AVCENC_FAIL; } /* ======================================================================== */ /* Function : PVAVCEncGetProfileLevel() */ /* Date : 3/4/2010 */ /* Purpose : Get profile and level type from the bitstream */ /* In/out : */ /* Return : AVCENC_SUCCESS if succeed, AVCENC_FAIL if fail. */ /* Modified : */ /* ======================================================================== */ OSCL_EXPORT_REF AVCEnc_Status PVAVCEncGetProfileLevel(AVCHandle* avcHandle, AVCProfile* profile, AVCLevel* level) { AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject; AVCCommonObj *video = encvid->common; AVCSeqParamSet *seqParam = video->currSeqParams; if (encvid == NULL) { return AVCENC_UNINITIALIZED; } *profile = (AVCProfile)seqParam->profile_idc; *level = (AVCLevel)seqParam->level_idc; return AVCENC_SUCCESS; } /* ======================================================================== */ /* Function : PVAVCEncInitialize() */ /* Date : 3/18/2004 */ /* Purpose : Initialize the encoder library, allocate memory and verify */ /* the profile/level support/settings. */ /* In/out : Encoding parameters. */ /* Return : AVCENC_SUCCESS for success. */ /* Modified : */ /* ======================================================================== */ OSCL_EXPORT_REF AVCEnc_Status PVAVCEncInitialize(AVCHandle *avcHandle, AVCEncParams *encParam, void* extSPS, void* extPPS) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncInitialize"); AVCEnc_Status status; AVCEncObject *encvid; AVCCommonObj *video; uint32 *userData = (uint32*) avcHandle->userData; int framesize; if (avcHandle->AVCObject != NULL) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncInitialize (AVCObject != NULL) -> return: AVCENC_ALREADY_INITIALIZED"); return AVCENC_ALREADY_INITIALIZED; /* It's already initialized, need to cleanup first */ } /* not initialized */ /* allocate videoObject */ avcHandle->AVCObject = (void*)avcHandle->CBAVC_Malloc(userData, sizeof(AVCEncObject), DEFAULT_ATTR); if (avcHandle->AVCObject == NULL) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncInitialize (AVCObject == NULL) -> return: AVCENC_MEMORY_FAIL"); return AVCENC_MEMORY_FAIL; } encvid = (AVCEncObject*) avcHandle->AVCObject; oscl_memset(encvid, 0, sizeof(AVCEncObject)); /* reset everything */ encvid->enc_state = AVCEnc_Initializing; encvid->avcHandle = avcHandle; encvid->common = (AVCCommonObj*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCCommonObj), DEFAULT_ATTR); if (encvid->common == NULL) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncInitialize (encvid->common == NULL) -> return: AVCENC_MEMORY_FAIL"); return AVCENC_MEMORY_FAIL; } video = encvid->common; oscl_memset(video, 0, sizeof(AVCCommonObj)); /* allocate bitstream structure */ encvid->bitstream = (AVCEncBitstream*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCEncBitstream), DEFAULT_ATTR); if (encvid->bitstream == NULL) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncInitialize (encvid->bitstream == NULL) -> return: AVCENC_MEMORY_FAIL"); return AVCENC_MEMORY_FAIL; } encvid->bitstream->encvid = encvid; /* to point back for reallocation */ /* allocate sequence parameter set structure */ video->currSeqParams = (AVCSeqParamSet*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCSeqParamSet), DEFAULT_ATTR); if (video->currSeqParams == NULL) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncInitialize (video->currSeqParams == NULL) -> return: AVCENC_MEMORY_FAIL"); return AVCENC_MEMORY_FAIL; } oscl_memset(video->currSeqParams, 0, sizeof(AVCSeqParamSet)); /* allocate picture parameter set structure */ video->currPicParams = (AVCPicParamSet*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCPicParamSet), DEFAULT_ATTR); if (video->currPicParams == NULL) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncInitialize (video->currPicParams == NULL) -> return: AVCENC_MEMORY_FAIL"); return AVCENC_MEMORY_FAIL; } oscl_memset(video->currPicParams, 0, sizeof(AVCPicParamSet)); /* allocate slice header structure */ video->sliceHdr = (AVCSliceHeader*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCSliceHeader), DEFAULT_ATTR); if (video->sliceHdr == NULL) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncInitialize (video->sliceHdr == NULL) -> return: AVCENC_MEMORY_FAIL"); return AVCENC_MEMORY_FAIL; } oscl_memset(video->sliceHdr, 0, sizeof(AVCSliceHeader)); /* allocate encoded picture buffer structure*/ video->decPicBuf = (AVCDecPicBuffer*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCDecPicBuffer), DEFAULT_ATTR); if (video->decPicBuf == NULL) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncInitialize (video->decPicBuf == NULL) -> return: AVCENC_MEMORY_FAIL"); return AVCENC_MEMORY_FAIL; } oscl_memset(video->decPicBuf, 0, sizeof(AVCDecPicBuffer)); /* allocate rate control structure */ encvid->rateCtrl = (AVCRateControl*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCRateControl), DEFAULT_ATTR); if (encvid->rateCtrl == NULL) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncInitialize (encvid->rateCtrl == NULL) -> return: AVCENC_MEMORY_FAIL"); return AVCENC_MEMORY_FAIL; } oscl_memset(encvid->rateCtrl, 0, sizeof(AVCRateControl)); /* reset frame list, not really needed */ video->currPic = NULL; video->currFS = NULL; encvid->currInput = NULL; video->prevRefPic = NULL; /* now read encParams, and allocate dimension-dependent variables */ /* such as mblock */ status = SetEncodeParam(avcHandle, encParam, extSPS, extPPS); /* initialized variables to be used in SPS*/ if (status != AVCENC_SUCCESS) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncInitialize (SetEncodeParam() != AVCENC_SUCCESS) -> return: status: %d", status); return status; } if (encParam->use_overrun_buffer == AVC_ON) { /* allocate overrun buffer */ encvid->oBSize = encvid->rateCtrl->cpbSize; if (encvid->oBSize > DEFAULT_OVERRUN_BUFFER_SIZE) { encvid->oBSize = DEFAULT_OVERRUN_BUFFER_SIZE; } encvid->overrunBuffer = (uint8*) avcHandle->CBAVC_Malloc(userData, encvid->oBSize, DEFAULT_ATTR); if (encvid->overrunBuffer == NULL) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncInitialize (encvid->overrunBuffer == NULL) -> return: AVCENC_MEMORY_FAIL"); return AVCENC_MEMORY_FAIL; } } else { encvid->oBSize = 0; encvid->overrunBuffer = NULL; } /* allocate frame size dependent structures */ framesize = video->FrameHeightInMbs * video->PicWidthInMbs; video->mblock = (AVCMacroblock*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCMacroblock) * framesize, DEFAULT_ATTR); if (video->mblock == NULL) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncInitialize (video->mblock == NULL) -> return: AVCENC_MEMORY_FAIL"); return AVCENC_MEMORY_FAIL; } video->MbToSliceGroupMap = (int*) avcHandle->CBAVC_Malloc(userData, sizeof(uint) * video->PicSizeInMapUnits * 2, DEFAULT_ATTR); if (video->MbToSliceGroupMap == NULL) { return AVCENC_MEMORY_FAIL; } encvid->mot16x16 = (AVCMV*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCMV) * framesize, DEFAULT_ATTR); if (encvid->mot16x16 == NULL) { return AVCENC_MEMORY_FAIL; } oscl_memset(encvid->mot16x16, 0, sizeof(AVCMV)*framesize); encvid->intraSearch = (uint8*) avcHandle->CBAVC_Malloc(userData, sizeof(uint8) * framesize, DEFAULT_ATTR); if (encvid->intraSearch == NULL) { return AVCENC_MEMORY_FAIL; } encvid->min_cost = (int*) avcHandle->CBAVC_Malloc(userData, sizeof(int) * framesize, DEFAULT_ATTR); if (encvid->min_cost == NULL) { return AVCENC_MEMORY_FAIL; } /* initialize motion search related memory */ if (AVCENC_SUCCESS != InitMotionSearchModule(avcHandle)) { return AVCENC_MEMORY_FAIL; } if (AVCENC_SUCCESS != InitRateControlModule(avcHandle)) { return AVCENC_MEMORY_FAIL; } /* intialize function pointers */ encvid->functionPointer = (AVCEncFuncPtr*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCEncFuncPtr), DEFAULT_ATTR); if (encvid->functionPointer == NULL) { return AVCENC_MEMORY_FAIL; } encvid->functionPointer->SAD_Macroblock = &AVCSAD_Macroblock_C; encvid->functionPointer->SAD_MB_HalfPel[0] = NULL; encvid->functionPointer->SAD_MB_HalfPel[1] = &AVCSAD_MB_HalfPel_Cxh; encvid->functionPointer->SAD_MB_HalfPel[2] = &AVCSAD_MB_HalfPel_Cyh; encvid->functionPointer->SAD_MB_HalfPel[3] = &AVCSAD_MB_HalfPel_Cxhyh; /* initialize timing control */ encvid->modTimeRef = 0; /* ALWAYS ASSUME THAT TIMESTAMP START FROM 0 !!!*/ video->prevFrameNum = 0; encvid->prevCodedFrameNum = 0; encvid->dispOrdPOCRef = 0; if (encvid->outOfBandParamSet == TRUE) { encvid->enc_state = AVCEnc_Encoding_SPS; } else { // xxx pa encvid->enc_state = AVCEnc_Analyzing_Frame; //encvid->enc_state = AVCEnc_Encoding_Frame; } return AVCENC_SUCCESS; } /* ======================================================================== */ /* Function : PVAVCEncGetMaxOutputSize() */ /* Date : 11/29/2008 */ /* Purpose : Return max output buffer size that apps should allocate for */ /* output buffer. */ /* In/out : */ /* Return : AVCENC_SUCCESS for success. */ /* Modified : size */ /* ======================================================================== */ OSCL_EXPORT_REF AVCEnc_Status PVAVCEncGetMaxOutputBufferSize(AVCHandle *avcHandle, int* size) { AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject; if (encvid == NULL) { return AVCENC_UNINITIALIZED; } *size = encvid->rateCtrl->cpbSize; return AVCENC_SUCCESS; } /* ======================================================================== */ /* Function : PVAVCEncSetInput() */ /* Date : 4/18/2004 */ /* Purpose : To feed an unencoded original frame to the encoder library. */ /* In/out : */ /* Return : AVCENC_SUCCESS for success. */ /* Modified : */ /* ======================================================================== */ OSCL_EXPORT_REF AVCEnc_Status PVAVCEncSetInput(AVCHandle *avcHandle, AVCFrameIO *input) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncSetInput"); AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject; AVCCommonObj *video = encvid->common; AVCRateControl *rateCtrl = encvid->rateCtrl; AVCEnc_Status status; uint frameNum; if (encvid == NULL) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncSetInput return: AVCENC_UNINITIALIZED"); return AVCENC_UNINITIALIZED; } if (encvid->enc_state == AVCEnc_WaitingForBuffer) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncSetInput goto: RECALL_INITFRAME"); goto RECALL_INITFRAME; } else if (encvid->enc_state != AVCEnc_Analyzing_Frame) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncSetInput return: AVCENC_FAIL"); return AVCENC_FAIL; } if (input->pitch > 0xFFFF) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncSetInput return: AVCENC_NOT_SUPPORTED"); return AVCENC_NOT_SUPPORTED; // we use 2-bytes for pitch } /***********************************/ /* Let's rate control decide whether to encode this frame or not */ /* Also set video->nal_unit_type, sliceHdr->slice_type, video->slice_type */ if (AVCENC_SUCCESS != RCDetermineFrameNum(encvid, rateCtrl, input->coding_timestamp, &frameNum)) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncSetInput return: AVCENC_SKIPPED_PICTURE"); return AVCENC_SKIPPED_PICTURE; /* not time to encode, thus skipping */ } /* we may not need this line */ //nextFrmModTime = (uint32)((((frameNum+1)*1000)/rateCtrl->frame_rate) + modTimeRef); /* rec. time */ //encvid->nextModTime = nextFrmModTime - (encvid->frameInterval>>1) - 1; /* between current and next frame */ encvid->currInput = input; encvid->currInput->coding_order = frameNum; RECALL_INITFRAME: /* initialize and analyze the frame */ status = InitFrame(encvid); if (status == AVCENC_SUCCESS) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncSetInput AVCENC_SUCCESS -> enc_state = AVCEnc_Encoding_Frame"); encvid->enc_state = AVCEnc_Encoding_Frame; } else if (status == AVCENC_NEW_IDR) { if (encvid->outOfBandParamSet == TRUE) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncSetInput AVCENC_NEW_IDR -> enc_state = AVCEnc_Encoding_Frame"); encvid->enc_state = AVCEnc_Encoding_Frame; } else // assuming that in-band paramset keeps sending new SPS and PPS. { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncSetInput AVCENC_NEW_IDR -> enc_state = AVCEnc_Encoding_SPS"); encvid->enc_state = AVCEnc_Encoding_SPS; //video->currSeqParams->seq_parameter_set_id++; //if(video->currSeqParams->seq_parameter_set_id > 31) // range check { video->currSeqParams->seq_parameter_set_id = 0; // reset } } video->sliceHdr->idr_pic_id++; if (video->sliceHdr->idr_pic_id > 65535) // range check { video->sliceHdr->idr_pic_id = 0; // reset } } /* the following logics need to be revisited */ else if (status == AVCENC_PICTURE_READY) // no buffers returned back to the encoder { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncSetInput AVCENC_PICTURE_READY -> enc_state = AVCEnc_WaitingForBuffer"); encvid->enc_state = AVCEnc_WaitingForBuffer; // Input accepted but can't continue // need to free up some memory before proceeding with Encode } __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncSetInput return: final status"); return status; // return status, including the AVCENC_FAIL case and all 3 above. } /* ======================================================================== */ /* Function : PVAVCEncodeNAL() */ /* Date : 4/29/2004 */ /* Purpose : To encode one NAL/slice. */ /* In/out : */ /* Return : AVCENC_SUCCESS for success. */ /* Modified : */ /* ======================================================================== */ OSCL_EXPORT_REF AVCEnc_Status PVAVCEncodeNAL(AVCHandle *avcHandle, unsigned char *buffer, unsigned int *buf_nal_size, int *nal_type) { AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject; AVCCommonObj *video = encvid->common; AVCEncBitstream *bitstream = encvid->bitstream; AVCEnc_Status status; if (encvid == NULL) { return AVCENC_UNINITIALIZED; } __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncodeNAL: %d",encvid->enc_state); switch (encvid->enc_state) { case AVCEnc_Initializing: return AVCENC_UNINITIALIZED; case AVCEnc_Encoding_SPS: __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncodeNAL: AVCEnc_Encoding_SPS"); /* initialized the structure */ BitstreamEncInit(bitstream, buffer, *buf_nal_size, NULL, 0); BitstreamWriteBits(bitstream, 8, (1 << 5) | AVC_NALTYPE_SPS); /* encode SPS */ status = EncodeSPS(encvid, bitstream); if (status != AVCENC_SUCCESS) { return status; } /* closing the NAL with trailing bits */ status = BitstreamTrailingBits(bitstream, buf_nal_size); if (status == AVCENC_SUCCESS) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncodeNAL AVCEnc_Encoding_SPS -> enc_state = AVCEnc_Encoding_PPS"); encvid->enc_state = AVCEnc_Encoding_PPS; video->currPicParams->seq_parameter_set_id = video->currSeqParams->seq_parameter_set_id; video->currPicParams->pic_parameter_set_id++; *nal_type = AVC_NALTYPE_SPS; *buf_nal_size = bitstream->write_pos; } break; case AVCEnc_Encoding_PPS: __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncodeNAL: AVCEnc_Encoding_PPS"); /* initialized the structure */ BitstreamEncInit(bitstream, buffer, *buf_nal_size, NULL, 0); BitstreamWriteBits(bitstream, 8, (1 << 5) | AVC_NALTYPE_PPS); /* encode PPS */ status = EncodePPS(encvid, bitstream); if (status != AVCENC_SUCCESS) { return status; } /* closing the NAL with trailing bits */ status = BitstreamTrailingBits(bitstream, buf_nal_size); if (status == AVCENC_SUCCESS) { if (encvid->outOfBandParamSet == TRUE) // already extract PPS, SPS { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncodeNAL AVCEnc_Encoding_PPS -> enc_state = AVCEnc_Analyzing_Frame"); encvid->enc_state = AVCEnc_Analyzing_Frame; } else // SetInput has been called before SPS and PPS. { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncodeNAL AVCEnc_Encoding_PPS -> enc_state = AVCEnc_Encoding_Frame"); encvid->enc_state = AVCEnc_Encoding_Frame; } *nal_type = AVC_NALTYPE_PPS; *buf_nal_size = bitstream->write_pos; } break; case AVCEnc_Encoding_Frame: __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncodeNAL: AVCEnc_Encoding_Frame"); /* initialized the structure */ BitstreamEncInit(bitstream, buffer, *buf_nal_size, encvid->overrunBuffer, encvid->oBSize); BitstreamWriteBits(bitstream, 8, (video->nal_ref_idc << 5) | (video->nal_unit_type)); /* Re-order the reference list according to the ref_pic_list_reordering() */ /* We don't have to reorder the list for the encoder here. This can only be done after we encode this slice. We can run thru a second-pass to see if new ordering would save more bits. Too much delay !! */ /* status = ReOrderList(video);*/ status = InitSlice(encvid); if (status != AVCENC_SUCCESS) { return status; } /* when we have everything, we encode the slice header */ status = EncodeSliceHeader(encvid, bitstream); if (status != AVCENC_SUCCESS) { return status; } status = AVCEncodeSlice(encvid); video->slice_id++; /* closing the NAL with trailing bits */ BitstreamTrailingBits(bitstream, buf_nal_size); *buf_nal_size = bitstream->write_pos; encvid->rateCtrl->numFrameBits += ((*buf_nal_size) << 3); *nal_type = video->nal_unit_type; if (status == AVCENC_PICTURE_READY) { status = RCUpdateFrame(encvid); if (status == AVCENC_SKIPPED_PICTURE) /* skip current frame */ { DPBReleaseCurrentFrame(avcHandle, video); __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncodeNAL AVCEnc_Encoding_Frame+AVCENC_SKIPPED_PICTURE -> enc_state = AVCEnc_Analyzing_Frame"); encvid->enc_state = AVCEnc_Analyzing_Frame; return status; } /* perform loop-filtering on the entire frame */ DeblockPicture(video); /* update the original frame array */ encvid->prevCodedFrameNum = encvid->currInput->coding_order; /* store the encoded picture in the DPB buffer */ StorePictureInDPB(avcHandle, video); if (video->currPic->isReference) { video->PrevRefFrameNum = video->sliceHdr->frame_num; } /* update POC related variables */ PostPOC(video); __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncodeNAL AVCEnc_Encoding_Frame -> enc_state = AVCEnc_Analyzing_Frame"); encvid->enc_state = AVCEnc_Analyzing_Frame; status = AVCENC_PICTURE_READY; } break; default: __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "PVAVCEncodeNAL: status = WRONG STATE"); status = AVCENC_WRONG_STATE; } return status; } /* ======================================================================== */ /* Function : PVAVCEncGetOverrunBuffer() */ /* Purpose : To retrieve the overrun buffer. Check whether overrun buffer */ /* is used or not before returning */ /* In/out : */ /* Return : Pointer to the internal overrun buffer. */ /* Modified : */ /* ======================================================================== */ OSCL_EXPORT_REF uint8* PVAVCEncGetOverrunBuffer(AVCHandle* avcHandle) { AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject; AVCEncBitstream *bitstream = encvid->bitstream; if (bitstream->overrunBuffer == bitstream->bitstreamBuffer) /* OB is used */ { return encvid->overrunBuffer; } else { return NULL; } } /* ======================================================================== */ /* Function : PVAVCEncGetRecon() */ /* Date : 4/29/2004 */ /* Purpose : To retrieve the most recently encoded frame. */ /* assume that user will make a copy if they want to hold on */ /* to it. Otherwise, it is not guaranteed to be reserved. */ /* Most applications prefer to see original frame rather than */ /* reconstructed frame. So, we stay away from complex */ /* buffering mechanism. If needed, can be added later. */ /* In/out : */ /* Return : AVCENC_SUCCESS for success. */ /* Modified : */ /* ======================================================================== */ OSCL_EXPORT_REF AVCEnc_Status PVAVCEncGetRecon(AVCHandle *avcHandle, AVCFrameIO *recon) { AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject; AVCCommonObj *video = encvid->common; AVCFrameStore *currFS = video->currFS; if (encvid == NULL) { return AVCENC_UNINITIALIZED; } recon->YCbCr[0] = currFS->frame.Sl; recon->YCbCr[1] = currFS->frame.Scb; recon->YCbCr[2] = currFS->frame.Scr; recon->height = currFS->frame.height; recon->pitch = currFS->frame.pitch; recon->disp_order = currFS->PicOrderCnt; recon->coding_order = currFS->FrameNum; recon->id = (uint32) currFS->base_dpb; /* use the pointer as the id */ currFS->IsOutputted |= 1; return AVCENC_SUCCESS; } OSCL_EXPORT_REF AVCEnc_Status PVAVCEncReleaseRecon(AVCHandle *avcHandle, AVCFrameIO *recon) { OSCL_UNUSED_ARG(avcHandle); OSCL_UNUSED_ARG(recon); return AVCENC_SUCCESS; //for now } /* ======================================================================== */ /* Function : PVAVCCleanUpEncoder() */ /* Date : 4/18/2004 */ /* Purpose : To clean up memories allocated by PVAVCEncInitialize() */ /* In/out : */ /* Return : AVCENC_SUCCESS for success. */ /* Modified : */ /* ======================================================================== */ OSCL_EXPORT_REF void PVAVCCleanUpEncoder(AVCHandle *avcHandle) { AVCEncObject *encvid = (AVCEncObject*) avcHandle->AVCObject; AVCCommonObj *video; uint32 *userData = (uint32*) avcHandle->userData; if (encvid != NULL) { CleanMotionSearchModule(avcHandle); CleanupRateControlModule(avcHandle); if (encvid->functionPointer != NULL) { avcHandle->CBAVC_Free(userData, (int)encvid->functionPointer); } if (encvid->min_cost) { avcHandle->CBAVC_Free(userData, (int)encvid->min_cost); } if (encvid->intraSearch) { avcHandle->CBAVC_Free(userData, (int)encvid->intraSearch); } if (encvid->mot16x16) { avcHandle->CBAVC_Free(userData, (int)encvid->mot16x16); } if (encvid->rateCtrl) { avcHandle->CBAVC_Free(userData, (int)encvid->rateCtrl); } if (encvid->overrunBuffer) { avcHandle->CBAVC_Free(userData, (int)encvid->overrunBuffer); } video = encvid->common; if (video != NULL) { if (video->MbToSliceGroupMap) { avcHandle->CBAVC_Free(userData, (int)video->MbToSliceGroupMap); } if (video->mblock != NULL) { avcHandle->CBAVC_Free(userData, (int)video->mblock); } if (video->decPicBuf != NULL) { CleanUpDPB(avcHandle, video); avcHandle->CBAVC_Free(userData, (int)video->decPicBuf); } if (video->sliceHdr != NULL) { avcHandle->CBAVC_Free(userData, (int)video->sliceHdr); } if (video->currPicParams != NULL) { if (video->currPicParams->slice_group_id) { avcHandle->CBAVC_Free(userData, (int)video->currPicParams->slice_group_id); } avcHandle->CBAVC_Free(userData, (int)video->currPicParams); } if (video->currSeqParams != NULL) { avcHandle->CBAVC_Free(userData, (int)video->currSeqParams); } if (encvid->bitstream != NULL) { avcHandle->CBAVC_Free(userData, (int)encvid->bitstream); } if (video != NULL) { avcHandle->CBAVC_Free(userData, (int)video); } } avcHandle->CBAVC_Free(userData, (int)encvid); avcHandle->AVCObject = NULL; } return ; } /* ======================================================================== */ /* Function : PVAVCEncUpdateBitRate() */ /* Date : 2/20/2010 */ /* Purpose : Update bitrate while encoding. */ /* In/out : */ /* Return : AVCENC_SUCCESS for success, else fail. */ /* Modified : */ /* ======================================================================== */ OSCL_EXPORT_REF AVCEnc_Status PVAVCEncUpdateBitRate(AVCHandle *avcHandle, uint32 bitrate) { AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject; AVCCommonObj *video = encvid->common; AVCRateControl *rateCtrl = encvid->rateCtrl; AVCSeqParamSet *seqParam = video->currSeqParams; int lev_idx; if (encvid == NULL) { return AVCENC_UNINITIALIZED; } // only allow changing bit rate right after encoding a frame and before a new frame is analyzed. if (encvid->enc_state != AVCEnc_Analyzing_Frame) { return AVCENC_WRONG_STATE; } if (bitrate && rateCtrl->cpbSize && (rateCtrl->rcEnable == TRUE)) { // verify level constraint // Note we keep the same cbpsize, hence the vbv delay will be affected. lev_idx = mapLev2Idx[seqParam->level_idc]; if (bitrate > (uint32)(MaxBR[lev_idx]*1000)) { return AVCENC_FAIL; } rateCtrl->bitRate = bitrate; // update other rate control parameters RCUpdateParams(rateCtrl, encvid); return AVCENC_SUCCESS; } else { return AVCENC_FAIL; } } /* ======================================================================== */ /* Function : PVAVCEncUpdateFrameRate() */ /* Date : 2/20/2010 */ /* Purpose : Update frame rate while encoding. */ /* In/out : */ /* Return : AVCENC_SUCCESS for success, else fail. */ /* Limitation: Changing frame rate will affect the first IDR frame coming */ /* after this call. It may come earlier or later than expected */ /* but after this first IDR frame, the IDR period will be back */ /* to normal. */ /* Modified : */ /* ======================================================================== */ OSCL_EXPORT_REF AVCEnc_Status PVAVCEncUpdateFrameRate(AVCHandle *avcHandle, uint32 num, uint32 denom) { AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject; AVCCommonObj *video = encvid->common; AVCRateControl *rateCtrl = encvid->rateCtrl; AVCSeqParamSet *seqParam = video->currSeqParams; int mb_per_sec; int lev_idx; if (encvid == NULL) { return AVCENC_UNINITIALIZED; } // only allow changing frame rate right after encoding a frame and before a new frame is analyzed. if (encvid->enc_state != AVCEnc_Analyzing_Frame) { return AVCENC_WRONG_STATE; } if (num && denom && (rateCtrl->rcEnable == TRUE)) { mb_per_sec = ((video->PicSizeInMbs * num) + denom - 1) / denom; // copy some code from VerifyLevel here lev_idx = mapLev2Idx[seqParam->level_idc]; if (mb_per_sec > MaxMBPS[lev_idx]) { return AVCENC_FAIL; } rateCtrl->frame_rate = (OsclFloat)num / denom; // update other rate control parameters RCUpdateParams(rateCtrl, encvid); return AVCENC_SUCCESS; } else { return AVCENC_FAIL; } return AVCENC_FAIL; } /* ======================================================================== */ /* Function : PVAVCEncUpdateIDRInterval() */ /* Date : 2/20/2010 */ /* Purpose : Update IDR interval while encoding. */ /* In/out : */ /* Return : AVCENC_SUCCESS for success, else fail. */ /* Limitation: See PVAVCEncUpdateFrameRate. */ /* Modified : */ /* ======================================================================== */ OSCL_EXPORT_REF AVCEnc_Status PVAVCEncUpdateIDRInterval(AVCHandle *avcHandle, int IDRInterval) { AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject; AVCCommonObj *video = encvid->common; AVCRateControl *rateCtrl = encvid->rateCtrl; if (encvid == NULL) { return AVCENC_UNINITIALIZED; } if (IDRInterval > (int)video->MaxFrameNum) { return AVCENC_FAIL; } /* Note : IDRInterval defines periodicity of IDR frames after every nPFrames.*/ rateCtrl->idrPeriod = IDRInterval; /* Note, when set to 1 (all I-frame), rate control is turned off */ return AVCENC_SUCCESS; } /* ======================================================================== */ /* Function : PVAVCEncIDRRequest() */ /* Date : 2/20/2010 */ /* Purpose : Request next frame to be IDR. */ /* In/out : */ /* Return : AVCENC_SUCCESS for success, else fail. */ /* Modified : */ /* ======================================================================== */ OSCL_EXPORT_REF AVCEnc_Status PVAVCEncIDRRequest(AVCHandle *avcHandle) { AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject; AVCRateControl *rateCtrl = encvid->rateCtrl; if (encvid == NULL) { return AVCENC_UNINITIALIZED; } // only allow changing frame rate right after encoding a frame and before a new frame is analyzed. if (encvid->enc_state != AVCEnc_Analyzing_Frame) { return AVCENC_WRONG_STATE; } rateCtrl->first_frame = 1; return AVCENC_SUCCESS; } /* ======================================================================== */ /* Function : PVAVCEncUpdateIMBRefresh() */ /* Date : 2/20/2010 */ /* Purpose : Update number of minimal I MBs per frame. */ /* In/out : */ /* Return : AVCENC_SUCCESS for success, else fail. */ /* Modified : */ /* ======================================================================== */ OSCL_EXPORT_REF AVCEnc_Status PVAVCEncUpdateIMBRefresh(AVCHandle *avcHandle, int numMB) { AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject; AVCRateControl *rateCtrl = encvid->rateCtrl; AVCCommonObj *video = encvid->common; if (encvid == NULL) { return AVCENC_UNINITIALIZED; } if (numMB <= (int)video->PicSizeInMbs) { rateCtrl->intraMBRate = numMB; return AVCENC_SUCCESS; } return AVCENC_FAIL; } void PVAVCEncGetFrameStats(AVCHandle *avcHandle, AVCEncFrameStats *avcStats) { AVCEncObject *encvid = (AVCEncObject*) avcHandle->AVCObject; AVCRateControl *rateCtrl = encvid->rateCtrl; avcStats->avgFrameQP = GetAvgFrameQP(rateCtrl); avcStats->numIntraMBs = encvid->numIntraMB; return ; } ================================================ FILE: RtspCamera/jni/avc_h264/enc/src/avcenc_api.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2010 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /** This file contains application function interfaces to the AVC encoder library and necessary type defitionitions and enumerations. @publishedAll */ #ifndef AVCENC_API_H_INCLUDED #define AVCENC_API_H_INCLUDED #ifndef OSCL_BASE_H_INCLUDED #include "oscl_base.h" #endif #ifndef OSCL_TYPES_H_INCLUDED #include "oscl_types.h" #endif #ifndef AVCAPI_COMMON_H_INCLUDED #include "avcapi_common.h" #endif /** This enumeration is used for the status returned from the library interface. */ typedef enum { /** Fail information, need to add more error code for more specific info */ AVCENC_TRAILINGONES_FAIL = -35, AVCENC_SLICE_EMPTY = -34, AVCENC_POC_FAIL = -33, AVCENC_CONSECUTIVE_NONREF = -32, AVCENC_CABAC_FAIL = -31, AVCENC_PRED_WEIGHT_TAB_FAIL = -30, AVCENC_DEC_REF_PIC_MARK_FAIL = -29, AVCENC_SPS_FAIL = -28, AVCENC_BITSTREAM_BUFFER_FULL = -27, AVCENC_BITSTREAM_INIT_FAIL = -26, AVCENC_CHROMA_QP_FAIL = -25, AVCENC_INIT_QS_FAIL = -24, AVCENC_INIT_QP_FAIL = -23, AVCENC_WEIGHTED_BIPRED_FAIL = -22, AVCENC_INVALID_INTRA_PERIOD = -21, AVCENC_INVALID_CHANGE_RATE = -20, AVCENC_INVALID_BETA_OFFSET = -19, AVCENC_INVALID_ALPHA_OFFSET = -18, AVCENC_INVALID_DEBLOCK_IDC = -17, AVCENC_INVALID_REDUNDANT_PIC = -16, AVCENC_INVALID_FRAMERATE = -15, AVCENC_INVALID_NUM_SLICEGROUP = -14, AVCENC_INVALID_POC_LSB = -13, AVCENC_INVALID_NUM_REF = -12, AVCENC_INVALID_FMO_TYPE = -11, AVCENC_ENCPARAM_MEM_FAIL = -10, AVCENC_LEVEL_NOT_SUPPORTED = -9, AVCENC_LEVEL_FAIL = -8, AVCENC_PROFILE_NOT_SUPPORTED = -7, AVCENC_TOOLS_NOT_SUPPORTED = -6, AVCENC_WRONG_STATE = -5, AVCENC_UNINITIALIZED = -4, AVCENC_ALREADY_INITIALIZED = -3, AVCENC_NOT_SUPPORTED = -2, AVCENC_MEMORY_FAIL = AVC_MEMORY_FAIL, AVCENC_FAIL = AVC_FAIL, /** Generic success value */ AVCENC_SUCCESS = AVC_SUCCESS, AVCENC_PICTURE_READY = 2, AVCENC_NEW_IDR = 3, /* upon getting this, users have to call PVAVCEncodeSPS and PVAVCEncodePPS to get a new SPS and PPS*/ AVCENC_SKIPPED_PICTURE = 4 /* continuable error message */ } AVCEnc_Status; #define MAX_NUM_SLICE_GROUP 8 /* maximum for all the profiles */ /** This structure contains the encoding parameters. */ typedef struct tagAVCEncParam { /* if profile/level is set to zero, encoder will choose the closest one for you */ AVCProfile profile; /* profile of the bitstream to be compliant with*/ AVCLevel level; /* level of the bitstream to be compliant with*/ int width; /* width of an input frame in pixel */ int height; /* height of an input frame in pixel */ int poc_type; /* picture order count mode, 0,1 or 2 */ /* for poc_type == 0 */ uint log2_max_poc_lsb_minus_4; /* specify maximum value of POC Lsb, range 0..12*/ /* for poc_type == 1 */ uint delta_poc_zero_flag; /* delta POC always zero */ int offset_poc_non_ref; /* offset for non-reference pic */ int offset_top_bottom; /* offset between top and bottom field */ uint num_ref_in_cycle; /* number of reference frame in one cycle */ int *offset_poc_ref; /* array of offset for ref pic, dimension [num_ref_in_cycle] */ int num_ref_frame; /* number of reference frame used */ int num_slice_group; /* number of slice group */ int fmo_type; /* 0: interleave, 1: dispersed, 2: foreground with left-over 3: box-out, 4:raster scan, 5:wipe, 6:explicit */ /* for fmo_type == 0 */ uint run_length_minus1[MAX_NUM_SLICE_GROUP]; /* array of size num_slice_group, in round robin fasion */ /* fmo_type == 2*/ uint top_left[MAX_NUM_SLICE_GROUP-1]; /* array of co-ordinates of each slice_group */ uint bottom_right[MAX_NUM_SLICE_GROUP-1]; /* except the last one which is the background. */ /* fmo_type == 3,4,5 */ AVCFlag change_dir_flag; /* slice group change direction flag */ uint change_rate_minus1; /* fmo_type == 6 */ uint *slice_group; /* array of size MBWidth*MBHeight */ AVCFlag db_filter; /* enable deblocking loop filter */ int disable_db_idc; /* 0: filter everywhere, 1: no filter, 2: no filter across slice boundary */ int alpha_offset; /* alpha offset range -6,...,6 */ int beta_offset; /* beta offset range -6,...,6 */ AVCFlag constrained_intra_pred; /* constrained intra prediction flag */ AVCFlag auto_scd; /* scene change detection on or off */ int idr_period; /* idr frame refresh rate in number of target encoded frame (no concept of actual time).*/ int intramb_refresh; /* minimum number of intra MB per frame */ AVCFlag data_par; /* enable data partitioning */ AVCFlag fullsearch; /* enable full-pel full-search mode */ int search_range; /* search range for motion vector in (-search_range,+search_range) pixels */ AVCFlag sub_pel; /* enable sub pel prediction */ AVCFlag submb_pred; /* enable sub MB partition mode */ AVCFlag rdopt_mode; /* RD optimal mode selection */ AVCFlag bidir_pred; /* enable bi-directional for B-slice, this flag forces the encoder to encode any frame with POC less than the previously encoded frame as a B-frame. If it's off, then such frames will remain P-frame. */ AVCFlag rate_control; /* rate control enable, on: RC on, off: constant QP */ int initQP; /* initial QP */ uint32 bitrate; /* target encoding bit rate in bits/second */ uint32 CPB_size; /* coded picture buffer in number of bits */ uint32 init_CBP_removal_delay; /* initial CBP removal delay in msec */ uint32 frame_rate; /* frame rate in the unit of frames per 1000 second */ /* note, frame rate is only needed by the rate control, AVC is timestamp agnostic. */ AVCFlag out_of_band_param_set; /* flag to set whether param sets are to be retrieved up front or not */ AVCFlag use_overrun_buffer; /* do not throw away the frame if output buffer is not big enough. copy excess bits to the overrun buffer */ } AVCEncParams; /** This structure contains current frame encoding statistics for debugging purpose. */ typedef struct tagAVCEncFrameStats { int avgFrameQP; /* average frame QP */ int numIntraMBs; /* number of intra MBs */ int numFalseAlarm; int numMisDetected; int numDetected; } AVCEncFrameStats; #ifdef __cplusplus extern "C" { #endif /** THE FOLLOWINGS ARE APIS */ /** This function initializes the encoder library. It verifies the validity of the encoding parameters against the specified profile/level and the list of supported tools by this library. It allocates necessary memories required to perform encoding. For re-encoding application, if users want to setup encoder in a more precise way, users can give the external SPS and PPS to the encoder to follow. \param "avcHandle" "Handle to the AVC encoder library object." \param "encParam" "Pointer to the encoding parameter structure." \param "extSPS" "External SPS used for re-encoding purpose. NULL if not present" \param "extPPS" "External PPS used for re-encoding purpose. NULL if not present" \return "AVCENC_SUCCESS for success, AVCENC_NOT_SUPPORTED for the use of unsupported tools, AVCENC_MEMORY_FAIL for memory allocation failure, AVCENC_FAIL for generic failure." */ OSCL_IMPORT_REF AVCEnc_Status PVAVCEncInitialize(AVCHandle *avcHandle, AVCEncParams *encParam, void* extSPS, void* extPPS); /** Since the output buffer size is not known prior to encoding a frame, users need to allocate big enough buffer otherwise, that frame will be dropped. This function returns the size of the output buffer to be allocated by the users that guarantees to hold one frame. It follows the CPB spec for a particular level. However, when the users set use_overrun_buffer flag, this API is useless as excess output bits are saved in the overrun buffer waiting to be copied out in small chunks, i.e. users can allocate any size of output buffer. \param "avcHandle" "Handle to the AVC encoder library object." \param "size" "Pointer to the size to be modified." \return "AVCENC_SUCCESS for success, AVCENC_UNINITIALIZED when level is not known. */ OSCL_IMPORT_REF AVCEnc_Status PVAVCEncGetMaxOutputBufferSize(AVCHandle *avcHandle, int* size); /** Users call this function to provide an input structure to the encoder library which will keep a list of input structures it receives in case the users call this function many time before calling PVAVCEncodeSlice. The encoder library will encode them according to the frame_num order. Users should not modify the content of a particular frame until this frame is encoded and returned thru CBAVCEnc_ReturnInput() callback function. \param "avcHandle" "Handle to the AVC encoder library object." \param "input" "Pointer to the input structure." \return "AVCENC_SUCCESS for success, AVCENC_FAIL if the encoder is not in the right state to take a new input frame. AVCENC_NEW_IDR for the detection or determination of a new IDR, with this status, the returned NAL is an SPS NAL, AVCENC_NO_PICTURE if the input frame coding timestamp is too early, users must get next frame or adjust the coding timestamp." */ OSCL_IMPORT_REF AVCEnc_Status PVAVCEncSetInput(AVCHandle *avcHandle, AVCFrameIO *input); /** This function is called to encode a NAL unit which can be an SPS NAL, a PPS NAL or a VCL (video coding layer) NAL which contains one slice of data. It could be a fixed number of macroblocks, as specified in the encoder parameters set, or the maximum number of macroblocks fitted into the given input argument "buffer". The input frame is taken from the oldest unencoded input frame retrieved by users by PVAVCEncGetInput API. \param "avcHandle" "Handle to the AVC encoder library object." \param "buffer" "Pointer to the output AVC bitstream buffer, the format will be EBSP, not RBSP." \param "buf_nal_size" "As input, the size of the buffer in bytes. This is the physical limitation of the buffer. As output, the size of the EBSP." \param "nal_type" "Pointer to the NAL type of the returned buffer." \return "AVCENC_SUCCESS for success of encoding one slice, AVCENC_PICTURE_READY for the completion of a frame encoding, AVCENC_FAIL for failure (this should not occur, though)." */ OSCL_IMPORT_REF AVCEnc_Status PVAVCEncodeNAL(AVCHandle *avcHandle, uint8 *buffer, uint *buf_nal_size, int *nal_type); /** This function sniffs the nal_unit_type such that users can call corresponding APIs. This function is identical to PVAVCDecGetNALType() in the decoder. \param "bitstream" "Pointer to the beginning of a NAL unit (start with forbidden_zero_bit, etc.)." \param "size" "size of the bitstream (NumBytesInNALunit + 1)." \param "nal_unit_type" "Pointer to the return value of nal unit type." \return "AVCENC_SUCCESS if success, AVCENC_FAIL otherwise." */ OSCL_IMPORT_REF AVCEnc_Status PVAVCEncGetNALType(uint8 *bitstream, int size, int *nal_type, int *nal_ref_idc); /** This function gets the profile and level. \param "avcHandle" "Handle to the AVC encoder library object." \param "profile" "profile value" \param "level" "level value" \return "AVCENC_SUCCESS if success, AVCENC_UNINITIALIZED if encoder obj is NULL." */ OSCL_IMPORT_REF AVCEnc_Status PVAVCEncGetProfileLevel(AVCHandle* avcHandle, AVCProfile* profile, AVCLevel* level); /** This function returns the pointer to internal overrun buffer. Users can call this to query whether the overrun buffer has been used to encode the current NAL. \param "avcHandle" "Pointer to the handle." \return "Pointer to overrun buffer if it is used, otherwise, NULL." */ OSCL_IMPORT_REF uint8* PVAVCEncGetOverrunBuffer(AVCHandle* avcHandle); /** This function returns the reconstructed frame of the most recently encoded frame. Note that this frame is not returned to the users yet. Users should only read the content of this frame. \param "avcHandle" "Handle to the AVC encoder library object." \param "output" "Pointer to the input structure." \return "AVCENC_SUCCESS for success, AVCENC_NO_PICTURE if no picture to be outputted." */ OSCL_IMPORT_REF AVCEnc_Status PVAVCEncGetRecon(AVCHandle *avcHandle, AVCFrameIO *recon); /** This function is used to return the recontructed frame back to the AVC encoder library in order to be re-used for encoding operation. If users want the content of it to remain unchanged for a long time, they should make a copy of it and release the memory back to the encoder. The encoder relies on the id element in the AVCFrameIO structure, thus users should not change the id value. \param "avcHandle" "Handle to the AVC decoder library object." \param "output" "Pointer to the AVCFrameIO structure." \return "AVCENC_SUCCESS for success, AVCENC_FAIL for fail for id not found." */ OSCL_IMPORT_REF AVCEnc_Status PVAVCEncReleaseRecon(AVCHandle *avcHandle, AVCFrameIO *recon); /** This function performs clean up operation including memory deallocation. The encoder will also clear the list of input structures it has not released. This implies that users must keep track of the number of input structure they have allocated and free them accordingly. \param "avcHandle" "Handle to the AVC encoder library object." */ OSCL_IMPORT_REF void PVAVCCleanUpEncoder(AVCHandle *avcHandle); /** This function extracts statistics of the current frame. If the encoder has not finished with the current frame, the result is not accurate. \param "avcHandle" "Handle to the AVC encoder library object." \param "avcStats" "Pointer to AVCEncFrameStats structure." \return "void." */ void PVAVCEncGetFrameStats(AVCHandle *avcHandle, AVCEncFrameStats *avcStats); /** These functions are used for the modification of encoding parameters. To be polished. */ OSCL_IMPORT_REF AVCEnc_Status PVAVCEncUpdateBitRate(AVCHandle *avcHandle, uint32 bitrate); OSCL_IMPORT_REF AVCEnc_Status PVAVCEncUpdateFrameRate(AVCHandle *avcHandle, uint32 num, uint32 denom); OSCL_IMPORT_REF AVCEnc_Status PVAVCEncUpdateIDRInterval(AVCHandle *avcHandle, int IDRInterval); OSCL_IMPORT_REF AVCEnc_Status PVAVCEncIDRRequest(AVCHandle *avcHandle); OSCL_IMPORT_REF AVCEnc_Status PVAVCEncUpdateIMBRefresh(AVCHandle *avcHandle, int numMB); #ifdef __cplusplus } #endif #endif /* _AVCENC_API_H_ */ ================================================ FILE: RtspCamera/jni/avc_h264/enc/src/avcenc_int.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /** This file contains application function interfaces to the AVC encoder library and necessary type defitionitions and enumerations. @publishedAll */ #ifndef AVCENC_INT_H_INCLUDED #define AVCENC_INT_H_INCLUDED #ifndef AVCINT_COMMON_H_INCLUDED #include "avcint_common.h" #endif #ifndef AVCENC_API_H_INCLUDED #include "avcenc_api.h" #endif /* Definition for the structures below */ #define DEFAULT_ATTR 0 /* default memory attribute */ #define MAX_INPUT_FRAME 30 /* some arbitrary number, it can be much higher than this. */ #define MAX_REF_FRAME 16 /* max size of the RefPicList0 and RefPicList1 */ #define MAX_REF_PIC_LIST 33 #define MIN_QP 0 #define MAX_QP 51 #define SHIFT_QP 12 #define LAMBDA_ACCURACY_BITS 16 #define LAMBDA_FACTOR(lambda) ((int)((double)(1<>LAMBDA_ACCURACY_BITS) #define MV_COST(f,s,cx,cy,px,py) (WEIGHTED_COST(f,mvbits[((cx)<<(s))-px]+mvbits[((cy)<<(s))-py])) #define MV_COST_S(f,cx,cy,px,py) (WEIGHTED_COST(f,mvbits[cx-px]+mvbits[cy-py])) /* for sub-pel search and interpolation */ #define SUBPEL_PRED_BLK_SIZE 576 // 24x24 #define REF_CENTER 75 #define V2Q_H0Q 1 #define V0Q_H2Q 2 #define V2Q_H2Q 3 /* #define V3Q_H0Q 1 #define V3Q_H1Q 2 #define V0Q_H1Q 3 #define V1Q_H1Q 4 #define V1Q_H0Q 5 #define V1Q_H3Q 6 #define V0Q_H3Q 7 #define V3Q_H3Q 8 #define V2Q_H3Q 9 #define V2Q_H0Q 10 #define V2Q_H1Q 11 #define V2Q_H2Q 12 #define V3Q_H2Q 13 #define V0Q_H2Q 14 #define V1Q_H2Q 15 */ #define DEFAULT_OVERRUN_BUFFER_SIZE 1000 // associated with the above cost model const uint8 COEFF_COST[2][16] = { {3, 2, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9} }; //! convert from H.263 QP to H.264 quant given by: quant=pow(2,QP/6) const int QP2QUANT[40] = { 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 6, 6, 7, 8, 9, 10, 11, 13, 14, 16, 18, 20, 23, 25, 29, 32, 36, 40, 45, 51, 57, 64, 72, 81, 91 }; /** This enumeration keeps track of the internal status of the encoder whether it is doing something. The encoding flow follows the order in which these states are. @publishedAll */ typedef enum { AVCEnc_Initializing = 0, AVCEnc_Encoding_SPS, AVCEnc_Encoding_PPS, AVCEnc_Analyzing_Frame, AVCEnc_WaitingForBuffer, // pending state AVCEnc_Encoding_Frame, } AVCEnc_State ; /** Bitstream structure contains bitstream related parameters such as the pointer to the buffer, the current byte position and bit position. The content of the bitstreamBuffer will be in EBSP format as the emulation prevention codes are automatically inserted as the RBSP is recorded. @publishedAll */ typedef struct tagEncBitstream { uint8 *bitstreamBuffer; /* pointer to buffer memory */ int buf_size; /* size of the buffer memory */ int write_pos; /* next position to write to bitstreamBuffer */ int count_zeros; /* count number of consecutive zero */ uint current_word; /* byte-swapped (MSB left) current word to write to buffer */ int bit_left; /* number of bit left in current_word */ uint8 *overrunBuffer; /* extra output buffer to prevent current skip due to output buffer overrun*/ int oBSize; /* size of allocated overrun buffer */ void *encvid; /* pointer to the main object */ } AVCEncBitstream; /** This structure is used for rate control purpose and other performance related control variables such as, RD cost, statistics, motion search stuffs, etc. should be in this structure. @publishedAll */ typedef struct tagRDInfo { int QP; int actual_bits; OsclFloat mad; OsclFloat R_D; } RDInfo; typedef struct tagMultiPass { /* multipass rate control data */ int target_bits; /* target bits for current frame, = rc->T */ int actual_bits; /* actual bits for current frame obtained after encoding, = rc->Rc*/ int QP; /* quantization level for current frame, = rc->Qc*/ int prev_QP; /* quantization level for previous frame */ int prev_prev_QP; /* quantization level for previous frame before last*/ OsclFloat mad; /* mad for current frame, = video->avgMAD*/ int bitrate; /* bitrate for current frame */ OsclFloat framerate; /* framerate for current frame*/ int nRe_Quantized; /* control variable for multipass encoding, */ /* 0 : first pass */ /* 1 : intermediate pass(quantization and VLC loop only) */ /* 2 : final pass(de-quantization, idct, etc) */ /* 3 : macroblock level rate control */ int encoded_frames; /* counter for all encoded frames */ int re_encoded_frames; /* counter for all multipass encoded frames*/ int re_encoded_times; /* counter for all times of multipass frame encoding */ /* Multiple frame prediction*/ RDInfo **pRDSamples; /* pRDSamples[30][32], 30->30fps, 32 -> 5 bit quantizer, 32 candidates*/ int framePos; /* specific position in previous multiple frames*/ int frameRange; /* number of overall previous multiple frames */ int samplesPerFrame[30]; /* number of samples per frame, 30->30fps */ /* Bit allocation for scene change frames and high motion frames */ OsclFloat sum_mad; int counter_BTsrc; /* BT = Bit Transfer, bit transfer from low motion frames or less complicatedly compressed frames */ int counter_BTdst; /* BT = Bit Transfer, bit transfer to scene change frames or high motion frames or more complicatedly compressed frames */ OsclFloat sum_QP; int diff_counter; /* diff_counter = -diff_counter_BTdst, or diff_counter_BTsrc */ /* For target bitrate or framerate update */ OsclFloat target_bits_per_frame; /* = C = bitrate/framerate */ OsclFloat target_bits_per_frame_prev; /* previous C */ OsclFloat aver_mad; /* so-far average mad could replace sum_mad */ OsclFloat aver_mad_prev; /* previous average mad */ int overlapped_win_size; /* transition period of time */ int encoded_frames_prev; /* previous encoded_frames */ } MultiPass; typedef struct tagdataPointArray { int Qp; int Rp; OsclFloat Mp; /* for MB-based RC */ struct tagdataPointArray *next; struct tagdataPointArray *prev; } dataPointArray; typedef struct tagAVCRateControl { /* these parameters are initialized by the users AVCEncParams */ /* bitrate-robustness tradeoff */ uint scdEnable; /* enable scene change detection */ int idrPeriod; /* IDR period in number of frames */ int intraMBRate; /* intra MB refresh rate per frame */ uint dpEnable; /* enable data partitioning */ /* quality-complexity tradeoff */ uint subPelEnable; /* enable quarter pel search */ int mvRange; /* motion vector search range in +/- pixel */ uint subMBEnable; /* enable sub MB prediction mode (4x4, 4x8, 8x4) */ uint rdOptEnable; /* enable RD-opt mode selection */ uint twoPass; /* flag for 2 pass encoding ( for future )*/ uint bidirPred; /* bi-directional prediction for B-frame. */ uint rcEnable; /* enable rate control, '1' on, '0' const QP */ int initQP; /* initial QP */ /* note the following 3 params are for HRD, these triplets can be a series of triplets as the generalized HRD allows. SEI message must be generated in this case. */ /* We no longer have to differentiate between CBR and VBR. The users to the AVC encoder lib will do the mapping from CBR/VBR to these parameters. */ int32 bitRate; /* target bit rate for the overall clip in bits/second*/ int32 cpbSize; /* coded picture buffer size in bytes */ int32 initDelayOffset; /* initial CBP removal delay in bits */ OsclFloat frame_rate; /* frame rate */ int srcInterval; /* source frame rate in msec */ int basicUnit; /* number of macroblocks per BU */ /* Then internal parameters for the operation */ uint first_frame; /* a flag for the first frame */ int lambda_mf; /* for example */ int totalSAD; /* SAD of current frame */ /*******************************************/ /* this part comes from MPEG4 rate control */ int alpha; /* weight for I frame */ int Rs; /*bit rate for the sequence (or segment) e.g., 24000 bits/sec */ int Rc; /*bits used for the current frame. It is the bit count obtained after encoding. */ int Rp; /*bits to be removed from the buffer per picture. */ /*? is this the average one, or just the bits coded for the previous frame */ int Rps; /*bit to be removed from buffer per src frame */ OsclFloat Ts; /*number of seconds for the sequence (or segment). e.g., 10 sec */ OsclFloat Ep; OsclFloat Ec; /*mean absolute difference for the current frame after motion compensation.*/ /*If the macroblock is intra coded, the original spatial pixel values are summed.*/ int Qc; /*quantization level used for the current frame. */ int Nr; /*number of P frames remaining for encoding.*/ int Rr; /*number of bits remaining for encoding this sequence (or segment).*/ int Rr_Old; int T; /*target bit to be used for the current frame.*/ int S; /*number of bits used for encoding the previous frame.*/ int Hc; /*header and motion vector bits used in the current frame. It includes all the information except to the residual information.*/ int Hp; /*header and motion vector bits used in the previous frame. It includes all the information except to the residual information.*/ int Ql; /*quantization level used in the previous frame */ int Bs; /*buffer size e.g., R/2 */ int B; /*current buffer level e.g., R/4 - start from the middle of the buffer */ OsclFloat X1; OsclFloat X2; OsclFloat X11; OsclFloat M; /*safe margin for the buffer */ OsclFloat smTick; /*ratio of src versus enc frame rate */ double remnant; /*remainder frame of src/enc frame for fine frame skipping */ int timeIncRes; /* vol->timeIncrementResolution */ dataPointArray *end; /*quantization levels for the past (20) frames */ int frameNumber; /* ranging from 0 to 20 nodes*/ int w; int Nr_Original; int Nr_Old, Nr_Old2; int skip_next_frame; int Qdep; /* smooth Q adjustment */ int VBR_Enabled; int totalFrameNumber; /* total coded frames, for debugging!!*/ char oFirstTime; int numFrameBits; /* keep track of number of bits of the current frame */ int NumberofHeaderBits; int NumberofTextureBits; int numMBHeaderBits; int numMBTextureBits; double *MADofMB; int32 bitsPerFrame; /* BX rate control, something like TMN8 rate control*/ MultiPass *pMP; int TMN_W; int TMN_TH; int VBV_fullness; int max_BitVariance_num; /* the number of the maximum bit variance within the given buffer with the unit of 10% of bitrate/framerate*/ int encoded_frames; /* counter for all encoded frames */ int low_bound; /* bound for underflow detection, usually low_bound=-Bs/2, but could be changed in H.263 mode */ int VBV_fullness_offset; /* offset of VBV_fullness, usually is zero, but can be changed in H.263 mode*/ /* End BX */ } AVCRateControl; /** This structure is for the motion vector information. */ typedef struct tagMV { int x; int y; uint sad; } AVCMV; /** This structure contains function pointers for different platform dependent implementation of functions. */ typedef struct tagAVCEncFuncPtr { int (*SAD_MB_HalfPel[4])(uint8*, uint8*, int, void *); int (*SAD_Macroblock)(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info); } AVCEncFuncPtr; /** This structure contains information necessary for correct padding. */ typedef struct tagPadInfo { int i; int width; int j; int height; } AVCPadInfo; #ifdef HTFM typedef struct tagHTFM_Stat { int abs_dif_mad_avg; uint countbreak; int offsetArray[16]; int offsetRef[16]; } HTFM_Stat; #endif /** This structure is the main object for AVC encoder library providing access to all global variables. It is allocated at PVAVCInitEncoder and freed at PVAVCCleanUpEncoder. @publishedAll */ typedef struct tagEncObject { AVCCommonObj *common; AVCEncBitstream *bitstream; /* for current NAL */ uint8 *overrunBuffer; /* extra output buffer to prevent current skip due to output buffer overrun*/ int oBSize; /* size of allocated overrun buffer */ /* rate control */ AVCRateControl *rateCtrl; /* pointer to the rate control structure */ /* encoding operation */ AVCEnc_State enc_state; /* encoding state */ AVCFrameIO *currInput; /* pointer to the current input frame */ int currSliceGroup; /* currently encoded slice group id */ int level[24][16], run[24][16]; /* scratch memory */ int leveldc[16], rundc[16]; /* for DC component */ int levelcdc[16], runcdc[16]; /* for chroma DC component */ int numcoefcdc[2]; /* number of coefficient for chroma DC */ int numcoefdc; /* number of coefficients for DC component */ int qp_const; int qp_const_c; /********* intra prediction scratch memory **********************/ uint8 pred_i16[AVCNumI16PredMode][256]; /* save prediction for MB */ uint8 pred_i4[AVCNumI4PredMode][16]; /* save prediction for blk */ uint8 pred_ic[AVCNumIChromaMode][128]; /* for 2 chroma */ int mostProbableI4Mode[16]; /* in raster scan order */ /********* motion compensation related variables ****************/ AVCMV *mot16x16; /* Saved motion vectors for 16x16 block*/ AVCMV(*mot16x8)[2]; /* Saved motion vectors for 16x8 block*/ AVCMV(*mot8x16)[2]; /* Saved motion vectors for 8x16 block*/ AVCMV(*mot8x8)[4]; /* Saved motion vectors for 8x8 block*/ /********* subpel position **************************************/ uint32 subpel_pred[SUBPEL_PRED_BLK_SIZE/*<<2*/]; /* all 16 sub-pel positions */ uint8 *hpel_cand[9]; /* pointer to half-pel position */ int best_hpel_pos; /* best position */ uint8 qpel_cand[8][24*16]; /* pointer to quarter-pel position */ int best_qpel_pos; uint8 *bilin_base[9][4]; /* pointer to 4 position at top left of bilinear quarter-pel */ /* need for intra refresh rate */ uint8 *intraSearch; /* Intra Array for MBs to be intra searched */ uint firstIntraRefreshMBIndx; /* keep track for intra refresh */ int i4_sad; /* temporary for i4 mode SAD */ int *min_cost; /* Minimum cost for the all MBs */ int lambda_mode; /* Lagrange parameter for mode selection */ int lambda_motion; /* Lagrange parameter for MV selection */ uint8 *mvbits_array; /* Table for bits spent in the cost funciton */ uint8 *mvbits; /* An offset to the above array. */ /* to speedup the SAD calculation */ void *sad_extra_info; uint8 currYMB[256]; /* interleaved current macroblock in HTFM order */ #ifdef HTFM int nrmlz_th[48]; /* Threshold for fast SAD calculation using HTFM */ HTFM_Stat htfm_stat; /* For statistics collection */ #endif /* statistics */ int numIntraMB; /* keep track of number of intra MB */ /* encoding complexity control */ uint fullsearch_enable; /* flag to enable full-pel full-search */ /* misc.*/ bool outOfBandParamSet; /* flag to enable out-of-band param set */ AVCSeqParamSet extSPS; /* for external SPS */ AVCPicParamSet extPPS; /* for external PPS */ /* time control */ uint32 prevFrameNum; /* previous frame number starting from modTimeRef */ uint32 modTimeRef; /* Reference modTime update every I-Vop*/ uint32 wrapModTime; /* Offset to modTime Ref, rarely used */ uint prevProcFrameNum; /* previously processed frame number, could be skipped */ uint prevCodedFrameNum; /* previously encoded frame number */ /* POC related variables */ uint32 dispOrdPOCRef; /* reference POC is displayer order unit. */ /* Function pointers */ AVCEncFuncPtr *functionPointer; /* store pointers to platform specific functions */ /* Application control data */ AVCHandle *avcHandle; } AVCEncObject; #endif /*AVCENC_INT_H_INCLUDED*/ ================================================ FILE: RtspCamera/jni/avc_h264/enc/src/avcenc_lib.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2010 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /** This file contains declarations of internal functions for AVC decoder library. @publishedAll */ #ifndef AVCENC_LIB_H_INCLUDED #define AVCENC_LIB_H_INCLUDED #ifndef AVCLIB_COMMON_H_INCLUDED #include "avclib_common.h" #endif #ifndef AVCENC_INT_H_INCLUDED #include "avcenc_int.h" #endif #ifdef __cplusplus extern "C" { #endif /*------------- block.c -------------------------*/ /** This function perform residue calculation, transform, quantize, inverse quantize, inverse transform and residue compensation on a 4x4 block. \param "encvid" "Pointer to AVCEncObject." \param "blkidx" "raster scan block index of the current 4x4 block." \param "cur" "Pointer to the reconstructed block." \param "org" "Pointer to the original block." \param "coef_cost" "Pointer to the coefficient cost to be filled in and returned." \return "Number of non-zero coefficients." */ int dct_luma(AVCEncObject *encvid, int blkidx, uint8 *cur, uint8 *org, int *coef_cost); /** This function performs IDCT on an INTER macroblock. \param "video" "Pointer to AVCCommonObj." \param "curL" "Pointer to the origin of the macroblock on the current frame." \param "currMB" "Pointer to the AVCMacroblock structure." \param "picPitch" "Pitch of the current frame." \return "void". */ void MBInterIdct(AVCCommonObj *video, uint8 *curL, AVCMacroblock *currMB, int picPitch); /** This function perform residue calculation, transform, quantize, inverse quantize, inverse transform and residue compensation on a macroblock. \param "encvid" "Pointer to AVCEncObject." \param "curL" "Pointer to the reconstructed MB." \param "orgL" "Pointer to the original MB." \return "void" */ void dct_luma_16x16(AVCEncObject *encvid, uint8 *curL, uint8 *orgL); /** This function perform residue calculation, transform, quantize, inverse quantize, inverse transform and residue compensation for chroma components of an MB. \param "encvid" "Pointer to AVCEncObject." \param "curC" "Pointer to the reconstructed MB." \param "orgC" "Pointer to the original MB." \param "cr" "Flag whether it is Cr or not." \return "void" */ void dct_chroma(AVCEncObject *encvid, uint8 *curC, uint8 *orgC, int cr); /*----------- init.c ------------------*/ /** This function interprets the encoding parameters provided by users in encParam. The results are kept in AVCEncObject, AVCSeqParamSet, AVCPicParamSet and AVCSliceHeader. \param "encvid" "Pointer to AVCEncObject." \param "encParam" "Pointer to AVCEncParam." \param "extSPS" "External SPS template to be followed. NULL if not present." \param "extPPS" "External PPS template to be followed. NULL if not present." \return "see AVCEnc_Status." */ AVCEnc_Status SetEncodeParam(AVCHandle *avcHandle, AVCEncParams *encParam, void *extSPS, void *extPPS); /** This function verifies the encoding parameters whether they meet the set of supported tool by a specific profile. If the profile is not set, it will just find the closest profile instead of verifying it. \param "video" "Pointer to AVCEncObject." \param "seqParam" "Pointer to AVCSeqParamSet." \param "picParam" "Pointer to AVCPicParamSet." \return "AVCENC_SUCCESS if success, AVCENC_PROFILE_NOT_SUPPORTED if the specified profile is not supported by this version of the library, AVCENC_TOOLS_NOT_SUPPORTED if any of the specified encoding tools are not supported by the user-selected profile." */ AVCEnc_Status VerifyProfile(AVCEncObject *video, AVCSeqParamSet *seqParam, AVCPicParamSet *picParam); /** This function verifies the encoding parameters whether they meet the requirement for a specific level. If the level is not set, it will just find the closest level instead of verifying it. \param "video" "Pointer to AVCEncObject." \param "seqParam" "Pointer to AVCSeqParamSet." \param "picParam" "Pointer to AVCPicParamSet." \return "AVCENC_SUCCESS if success, AVCENC_LEVEL_NOT_SUPPORTED if the specified level is not supported by this version of the library, AVCENC_LEVEL_FAIL if any of the encoding parameters exceed the range of the user-selected level." */ AVCEnc_Status VerifyLevel(AVCEncObject *video, AVCSeqParamSet *seqParam, AVCPicParamSet *picParam); /** This funciton initializes the frame encoding by setting poc/frame_num related parameters. it also performs motion estimation. \param "encvid" "Pointer to the AVCEncObject." \return "AVCENC_SUCCESS if success, AVCENC_NO_PICTURE if there is no input picture in the queue to encode, AVCENC_POC_FAIL or AVCENC_CONSECUTIVE_NONREF for POC related errors, AVCENC_NEW_IDR if new IDR is detected." */ AVCEnc_Status InitFrame(AVCEncObject *encvid); /** This function initializes slice header related variables and other variables necessary for decoding one slice. \param "encvid" "Pointer to the AVCEncObject." \return "AVCENC_SUCCESS if success." */ AVCEnc_Status InitSlice(AVCEncObject *encvid); /*----------- header.c ----------------*/ /** This function performs bitstream encoding of the sequence parameter set NAL. \param "encvid" "Pointer to the AVCEncObject." \param "stream" "Pointer to AVCEncBitstream." \return "AVCENC_SUCCESS if success or AVCENC_SPS_FAIL or others for unexpected failure which should not occur. The SPS parameters should all be verified before this function is called." */ AVCEnc_Status EncodeSPS(AVCEncObject *encvid, AVCEncBitstream *stream); /** This function encodes the VUI parameters into the sequence parameter set bitstream. \param "stream" "Pointer to AVCEncBitstream." \param "vui" "Pointer to AVCVUIParams." \return "nothing." */ void EncodeVUI(AVCEncBitstream* stream, AVCVUIParams* vui); /** This function encodes HRD parameters into the sequence parameter set bitstream \param "stream" "Pointer to AVCEncBitstream." \param "hrd" "Pointer to AVCHRDParams." \return "nothing." */ void EncodeHRD(AVCEncBitstream* stream, AVCHRDParams* hrd); /** This function performs bitstream encoding of the picture parameter set NAL. \param "encvid" "Pointer to the AVCEncObject." \param "stream" "Pointer to AVCEncBitstream." \return "AVCENC_SUCCESS if success or AVCENC_PPS_FAIL or others for unexpected failure which should not occur. The SPS parameters should all be verified before this function is called." */ AVCEnc_Status EncodePPS(AVCEncObject *encvid, AVCEncBitstream *stream); /** This function encodes slice header information which has been initialized or fabricated prior to entering this funciton. \param "encvid" "Pointer to the AVCEncObject." \param "stream" "Pointer to AVCEncBitstream." \return "AVCENC_SUCCESS if success or bitstream fail statuses." */ AVCEnc_Status EncodeSliceHeader(AVCEncObject *encvid, AVCEncBitstream *stream); /** This function encodes reference picture list reordering relted syntax. \param "video" "Pointer to AVCCommonObj." \param "stream" "Pointer to AVCEncBitstream." \param "sliceHdr" "Pointer to AVCSliceHdr." \param "slice_type" "Value of slice_type - 5 if greater than 5." \return "AVCENC_SUCCESS for success and AVCENC_FAIL otherwise." */ AVCEnc_Status ref_pic_list_reordering(AVCCommonObj *video, AVCEncBitstream *stream, AVCSliceHeader *sliceHdr, int slice_type); /** This function encodes dec_ref_pic_marking related syntax. \param "video" "Pointer to AVCCommonObj." \param "stream" "Pointer to AVCEncBitstream." \param "sliceHdr" "Pointer to AVCSliceHdr." \return "AVCENC_SUCCESS for success and AVCENC_FAIL otherwise." */ AVCEnc_Status dec_ref_pic_marking(AVCCommonObj *video, AVCEncBitstream *stream, AVCSliceHeader *sliceHdr); /** This function initializes the POC related variables and the POC syntax to be encoded to the slice header derived from the disp_order and is_reference flag of the original input frame to be encoded. \param "video" "Pointer to the AVCEncObject." \return "AVCENC_SUCCESS if success, AVCENC_POC_FAIL if the poc type is undefined or AVCENC_CONSECUTIVE_NONREF if there are consecutive non-reference frame for POC type 2." */ AVCEnc_Status InitPOC(AVCEncObject *video); /** This function performs POC related operation after a picture is decoded. \param "video" "Pointer to AVCCommonObj." \return "AVCENC_SUCCESS" */ AVCEnc_Status PostPOC(AVCCommonObj *video); /*----------- bitstream_io.c ----------------*/ /** This function initializes the bitstream structure with the information given by the users. \param "bitstream" "Pointer to the AVCEncBitstream structure." \param "buffer" "Pointer to the unsigned char buffer for output." \param "buf_size" "The size of the buffer in bytes." \param "overrunBuffer" "Pointer to extra overrun buffer." \param "oBSize" "Size of overrun buffer in bytes." \return "AVCENC_SUCCESS if success, AVCENC_BITSTREAM_INIT_FAIL if fail" */ AVCEnc_Status BitstreamEncInit(AVCEncBitstream *bitstream, uint8 *buffer, int buf_size, uint8 *overrunBuffer, int oBSize); /** This function writes the data from the cache into the bitstream buffer. It also adds the emulation prevention code if necessary. \param "stream" "Pointer to the AVCEncBitstream structure." \return "AVCENC_SUCCESS if success or AVCENC_BITSTREAM_BUFFER_FULL if fail." */ AVCEnc_Status AVCBitstreamSaveWord(AVCEncBitstream *stream); /** This function writes the codeword into the cache which will eventually be written to the bitstream buffer. \param "stream" "Pointer to the AVCEncBitstream structure." \param "nBits" "Number of bits in the codeword." \param "code" "The codeword." \return "AVCENC_SUCCESS if success or AVCENC_BITSTREAM_BUFFER_FULL if fail." */ AVCEnc_Status BitstreamWriteBits(AVCEncBitstream *stream, int nBits, uint code); /** This function writes one bit of data into the cache which will eventually be written to the bitstream buffer. \param "stream" "Pointer to the AVCEncBitstream structure." \param "code" "The codeword." \return "AVCENC_SUCCESS if success or AVCENC_BITSTREAM_BUFFER_FULL if fail." */ AVCEnc_Status BitstreamWrite1Bit(AVCEncBitstream *stream, uint code); /** This function adds trailing bits to the bitstream and reports back the final EBSP size. \param "stream" "Pointer to the AVCEncBitstream structure." \param "nal_size" "Output the final NAL size." \return "AVCENC_SUCCESS if success or AVCENC_BITSTREAM_BUFFER_FULL if fail." */ AVCEnc_Status BitstreamTrailingBits(AVCEncBitstream *bitstream, uint *nal_size); /** This function checks whether the current bit position is byte-aligned or not. \param "stream" "Pointer to the bitstream structure." \return "true if byte-aligned, false otherwise." */ bool byte_aligned(AVCEncBitstream *stream); /** This function checks the availability of overrun buffer and switches to use it when normal bufffer is not big enough. \param "stream" "Pointer to the bitstream structure." \param "numExtraBytes" "Number of extra byte needed." \return "AVCENC_SUCCESS or AVCENC_FAIL." */ AVCEnc_Status AVCBitstreamUseOverrunBuffer(AVCEncBitstream* stream, int numExtraBytes); /*-------------- intra_est.c ---------------*/ /** This function performs intra/inter decision based on ABE. \param "encvid" "Pointer to AVCEncObject." \param "min_cost" "Best inter cost." \param "curL" "Pointer to the current MB origin in reconstructed frame." \param "picPitch" "Pitch of the reconstructed frame." \return "Boolean for intra mode." */ //bool IntraDecisionABE(AVCEncObject *encvid, int min_cost, uint8 *curL, int picPitch); bool IntraDecision(int *min_cost, uint8 *cur, int pitch, bool ave); /** This function performs intra prediction mode search. \param "encvid" "Pointer to AVCEncObject." \param "mbnum" "Current MB number." \param "curL" "Pointer to the current MB origin in reconstructed frame." \param "picPitch" "Pitch of the reconstructed frame." \return "void." */ void MBIntraSearch(AVCEncObject *encvid, int mbnum, uint8 *curL, int picPitch); /** This function generates all the I16 prediction modes for an MB and keep it in encvid->pred_i16. \param "encvid" "Pointer to AVCEncObject." \return "void" */ void intrapred_luma_16x16(AVCEncObject *encvid); /** This function calculate the cost of all I16 modes and compare them to get the minimum. \param "encvid" "Pointer to AVCEncObject." \param "orgY" "Pointer to the original luma MB." \param "min_cost" "Pointer to the minimal cost so-far." \return "void" */ void find_cost_16x16(AVCEncObject *encvid, uint8 *orgY, int *min_cost); /** This function calculates the cost of each I16 mode. \param "org" "Pointer to the original luma MB." \param "org_pitch" "Stride size of the original frame." \param "pred" "Pointer to the prediction values." \param "min_cost" "Minimal cost so-far." \return "Cost" */ int cost_i16(uint8 *org, int org_pitch, uint8 *pred, int min_cost); /** This function generates all the I4 prediction modes and select the best one for all the blocks inside a macroblock.It also calls dct_luma to generate the reconstructed MB, and transform coefficients to be encoded. \param "encvid" "Pointer to AVCEncObject." \param "min_cost" "Pointer to the minimal cost so-far." \return "void" */ void mb_intra4x4_search(AVCEncObject *encvid, int *min_cost); /** This function calculates the most probable I4 mode of a given 4x4 block from neighboring informationaccording to AVC/H.264 standard. \param "video" "Pointer to AVCCommonObj." \param "blkidx" "The current block index." \return "Most probable mode." */ int FindMostProbableI4Mode(AVCCommonObj *video, int blkidx); /** This function is where a lot of actions take place in the 4x4 block level inside mb_intra4x4_search. \param "encvid" "Pointer to AVCEncObject." \param "blkidx" "The current 4x4 block index." \param "cur" "Pointer to the reconstructed block." \param "org" "Pointer to the original block." \return "Minimal cost, also set currMB->i4Mode" */ int blk_intra4x4_search(AVCEncObject *encvid, int blkidx, uint8 *cur, uint8 *org); /** This function calculates the cost of a given I4 prediction mode. \param "org" "Pointer to the original block." \param "org_pitch" "Stride size of the original frame." \param "pred" "Pointer to the prediction block. (encvid->pred_i4)" \param "cost" "Pointer to the minimal cost (to be updated)." \return "void" */ void cost_i4(uint8 *org, int org_pitch, uint8 *pred, uint16 *cost); /** This function performs chroma intra search. Each mode is saved in encvid->pred_ic. \param "encvid" "Pointer to AVCEncObject." \return "void" */ void chroma_intra_search(AVCEncObject *encvid); /** This function calculates the cost of a chroma prediction mode. \param "orgCb" "Pointer to the original Cb block." \param "orgCr" "Pointer to the original Cr block." \param "org_pitch" "Stride size of the original frame." \param "pred" "Pointer to the prediction block (encvid->pred_ic)" \param "mincost" "Minimal cost so far." \return "Cost." */ int SATDChroma(uint8 *orgCb, uint8 *orgCr, int org_pitch, uint8 *pred, int mincost); /*-------------- motion_comp.c ---------------*/ /** This is a main function to peform inter prediction. \param "encvid" "Pointer to AVCEncObject." \param "video" "Pointer to AVCCommonObj." \return "void". */ void AVCMBMotionComp(AVCEncObject *encvid, AVCCommonObj *video); /** This function is called for luma motion compensation. \param "ref" "Pointer to the origin of a reference luma." \param "picwidth" "Width of the picture." \param "picheight" "Height of the picture." \param "x_pos" "X-coordinate of the predicted block in quarter pel resolution." \param "y_pos" "Y-coordinate of the predicted block in quarter pel resolution." \param "pred" "Pointer to the output predicted block." \param "pred_pitch" "Width of pred." \param "blkwidth" "Width of the current partition." \param "blkheight" "Height of the current partition." \return "void" */ void eLumaMotionComp(uint8 *ref, int picwidth, int picheight, int x_pos, int y_pos, uint8 *pred, int pred_pitch, int blkwidth, int blkheight); void eFullPelMC(uint8 *in, int inwidth, uint8 *out, int outpitch, int blkwidth, int blkheight); void eHorzInterp1MC(uint8 *in, int inpitch, uint8 *out, int outpitch, int blkwidth, int blkheight, int dx); void eHorzInterp2MC(int *in, int inpitch, uint8 *out, int outpitch, int blkwidth, int blkheight, int dx); void eHorzInterp3MC(uint8 *in, int inpitch, int *out, int outpitch, int blkwidth, int blkheight); void eVertInterp1MC(uint8 *in, int inpitch, uint8 *out, int outpitch, int blkwidth, int blkheight, int dy); void eVertInterp2MC(uint8 *in, int inpitch, int *out, int outpitch, int blkwidth, int blkheight); void eVertInterp3MC(int *in, int inpitch, uint8 *out, int outpitch, int blkwidth, int blkheight, int dy); void eDiagonalInterpMC(uint8 *in1, uint8 *in2, int inpitch, uint8 *out, int outpitch, int blkwidth, int blkheight); void eChromaMotionComp(uint8 *ref, int picwidth, int picheight, int x_pos, int y_pos, uint8 *pred, int pred_pitch, int blkwidth, int blkheight); void eChromaDiagonalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy, uint8 *pOut, int predPitch, int blkwidth, int blkheight); void eChromaHorizontalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy, uint8 *pOut, int predPitch, int blkwidth, int blkheight); void eChromaVerticalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy, uint8 *pOut, int predPitch, int blkwidth, int blkheight); void eChromaFullMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy, uint8 *pOut, int predPitch, int blkwidth, int blkheight); void eChromaVerticalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy, uint8 *pOut, int predPitch, int blkwidth, int blkheight); void eChromaHorizontalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy, uint8 *pOut, int predPitch, int blkwidth, int blkheight); void eChromaDiagonalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy, uint8 *pOut, int predPitch, int blkwidth, int blkheight); /*-------------- motion_est.c ---------------*/ /** Allocate and initialize arrays necessary for motion search algorithm. \param "envid" "Pointer to AVCEncObject." \return "AVC_SUCCESS or AVC_MEMORY_FAIL." */ AVCEnc_Status InitMotionSearchModule(AVCHandle *avcHandle); /** Clean up memory allocated in InitMotionSearchModule. \param "envid" "Pointer to AVCEncObject." \return "void." */ void CleanMotionSearchModule(AVCHandle *avcHandle); /** This function performs motion estimation of all macroblocks in a frame during the InitFrame. The goal is to find the best MB partition for inter and find out if intra search is needed for any MBs. This intra MB tendency can be used for scene change detection. \param "encvid" "Pointer to AVCEncObject." \return "void" */ void AVCMotionEstimation(AVCEncObject *encvid); /** This function performs repetitive edge padding to the reference picture by adding 16 pixels around the luma and 8 pixels around the chromas. \param "refPic" "Pointer to the reference picture." \return "void" */ void AVCPaddingEdge(AVCPictureData *refPic); /** This function keeps track of intra refresh macroblock locations. \param "encvid" "Pointer to the global array structure AVCEncObject." \param "mblock" "Pointer to the array of AVCMacroblock structures." \param "totalMB" "Total number of MBs in a frame." \param "numRefresh" "Number of MB to be intra refresh in a single frame." \return "void" */ void AVCRasterIntraUpdate(AVCEncObject *encvid, AVCMacroblock *mblock, int totalMB, int numRefresh); #ifdef HTFM void InitHTFM(VideoEncData *encvid, HTFM_Stat *htfm_stat, double *newvar, int *collect); void UpdateHTFM(AVCEncObject *encvid, double *newvar, double *exp_lamda, HTFM_Stat *htfm_stat); void CalcThreshold(double pf, double exp_lamda[], int nrmlz_th[]); void HTFMPrepareCurMB_AVC(AVCEncObject *encvid, HTFM_Stat *htfm_stat, uint8 *cur, int pitch); #endif /** This function reads the input MB into a smaller faster memory space to minimize the cache miss. \param "encvid" "Pointer to the global AVCEncObject." \param "cur" "Pointer to the original input macroblock." \param "pitch" "Stride size of the input frame (luma)." \return "void" */ void AVCPrepareCurMB(AVCEncObject *encvid, uint8 *cur, int pitch); /** Performs motion vector search for a macroblock. \param "encvid" "Pointer to AVCEncObject structure." \param "cur" "Pointer to the current macroblock in the input frame." \param "best_cand" "Array of best candidates (to be filled in and returned)." \param "i0" "X-coordinate of the macroblock." \param "j0" "Y-coordinate of the macroblock." \param "type_pred" "Indicates the type of operations." \param "FS_en" "Flag for fullsearch enable." \param "hp_guess" "Guess for half-pel search." \return "void" */ void AVCMBMotionSearch(AVCEncObject *encvid, uint8 *cur, uint8 *best_cand[], int i0, int j0, int type_pred, int FS_en, int *hp_guess); //AVCEnc_Status AVCMBMotionSearch(AVCEncObject *encvid, AVCMacroblock *currMB, int mbNum, // int num_pass); /** Perform full-pel exhaustive search around the predicted MV. \param "encvid" "Pointer to AVCEncObject structure." \param "prev" "Pointer to the reference frame." \param "cur" "Pointer to the input macroblock." \param "imin" "Pointer to minimal mv (x)." \param "jmin" "Pointer to minimal mv (y)." \param "ilow, ihigh, jlow, jhigh" "Lower bound on search range." \param "cmvx, cmvy" "Predicted MV value." \return "The cost function of the best candidate." */ int AVCFullSearch(AVCEncObject *encvid, uint8 *prev, uint8 *cur, int *imin, int *jmin, int ilow, int ihigh, int jlow, int jhigh, int cmvx, int cmvy); /** Select candidates from neighboring blocks according to the type of the prediction selection. \param "mvx" "Pointer to the candidate, x-coordinate." \param "mvy" "Pointer to the candidate, y-coordinate." \param "num_can" "Pointer to the number of candidates returned." \param "imb" "The MB index x-coordinate." \param "jmb" "The MB index y-coordinate." \param "type_pred" "Type of the prediction." \param "cmvx, cmvy" "Pointer to predicted MV (modified version)." \return "void." */ void AVCCandidateSelection(int *mvx, int *mvy, int *num_can, int imb, int jmb, AVCEncObject *encvid, int type_pred, int *cmvx, int *cmvy); /** Utility function to move the values in the array dn according to the new location to avoid redundant calculation. \param "dn" "Array of integer of size 9." \param "new_loc" "New location index." \return "void." */ void AVCMoveNeighborSAD(int dn[], int new_loc); /** Find minimum index of dn. \param "dn" "Array of integer of size 9." \return "The index of dn with the smallest dn[] value." */ int AVCFindMin(int dn[]); /*------------- findhalfpel.c -------------------*/ /** Search for the best half-pel resolution MV around the full-pel MV. \param "encvid" "Pointer to the global AVCEncObject structure." \param "cur" "Pointer to the current macroblock." \param "mot" "Pointer to the AVCMV array of the frame." \param "ncand" "Pointer to the origin of the fullsearch result." \param "xpos" "The current MB position in x." \param "ypos" "The current MB position in y." \param "hp_guess" "Input to help speedup the search." \param "cmvx, cmvy" "Predicted motion vector use for mvcost." \return "Minimal cost (SATD) without MV cost. (for rate control purpose)" */ int AVCFindHalfPelMB(AVCEncObject *encvid, uint8 *cur, AVCMV *mot, uint8 *ncand, int xpos, int ypos, int hp_guess, int cmvx, int cmvy); /** This function generates sub-pel pixels required to do subpel MV search. \param "subpel_pred" "Pointer to 2-D array, each array for each position." \param "ncand" "Pointer to the full-pel center position in ref frame." \param "lx" "Pitch of the ref frame." \return "void" */ void GenerateHalfPelPred(uint8 *subpel_pred, uint8 *ncand, int lx); /** This function calculate vertical interpolation at half-point of size 4x17. \param "dst" "Pointer to destination." \param "ref" "Pointer to the starting reference pixel." \return "void." */ void VertInterpWClip(uint8 *dst, uint8 *ref); /** This function generates quarter-pel pixels around the best half-pel result during the sub-pel MV search. \param "bilin_base" "Array of pointers to be used as basis for q-pel interp." \param "qpel_pred" "Array of pointers pointing to quarter-pel candidates." \param "hpel_pos" "Best half-pel position at the center." \return "void" */ void GenerateQuartPelPred(uint8 **bilin_base, uint8 *qpel_pred, int hpel_pos); /** This function calculates the SATD of a subpel candidate. \param "cand" "Pointer to a candidate." \param "cur" "Pointer to the current block." \param "dmin" "Min-so-far SATD." \return "Sum of Absolute Transformed Difference." */ int SATD_MB(uint8 *cand, uint8 *cur, int dmin); /*------------- rate_control.c -------------------*/ /** This function is a utility function. It returns average QP of the previously encoded frame. \param "rateCtrl" "Pointer to AVCRateControl structure." \return "Average QP." */ int GetAvgFrameQP(AVCRateControl *rateCtrl); /** This function takes the timestamp of the input and determine whether it should be encoded or skipped. \param "encvid" "Pointer to the AVCEncObject structure." \param "rateCtrl" "Pointer to the AVCRateControl structure." \param "modTime" "The 32 bit timestamp of the input frame." \param "frameNum" "Pointer to the frame number if to be encoded." \return "AVC_SUCCESS or else." */ AVCEnc_Status RCDetermineFrameNum(AVCEncObject *encvid, AVCRateControl *rateCtrl, uint32 modTime, uint *frameNum); /** This function updates the buffer fullness when frames are dropped either by the rate control algorithm or by the users to make sure that target bit rate is still met. \param "video" "Pointer to the common object structure." \param "rateCtrl" "Pointer to rate control structure." \param "frameInc" "Difference of the current frame number and previous frame number." \return "void." */ void RCUpdateBuffer(AVCCommonObj *video, AVCRateControl *rateCtrl, int frameInc); /** This function initializes rate control module and allocates necessary bufferes to do the job. \param "avcHandle" "Pointer to the encoder handle." \return "AVCENC_SUCCESS or AVCENC_MEMORY_FAIL." */ AVCEnc_Status InitRateControlModule(AVCHandle *avcHandle); /** This function frees buffers allocated in InitRateControlModule. \param "avcHandle" "Pointer to the encoder handle." \return "void." */ void CleanupRateControlModule(AVCHandle *avcHandle); /** This function is called at the beginning of each GOP or the first IDR frame. It calculates target bits for a GOP. \param "encvid" "Pointer to the encoder object." \return "void." */ void RCInitGOP(AVCEncObject *encvid); /** This function calculates target bits for a particular frame. \param "video" "Pointer to the AVCEncObject structure." \return "void" */ void RCInitFrameQP(AVCEncObject *video); /** This function calculates QP for the upcoming frame or basic unit. \param "encvid" "Pointer to the encoder object." \param "rateCtrl" "Pointer to the rate control object." \return "QP value ranging from 0-51." */ int RCCalculateQP(AVCEncObject *encvid, AVCRateControl *rateCtrl); /** This function translates the luma QP to chroma QP and calculates lambda based on QP. \param "video" "Pointer to the AVCEncObject structure." \return "void" */ void RCInitChromaQP(AVCEncObject *encvid); /** This function is called before encoding each macroblock. \param "encvid" "Pointer to the encoder object." \return "void." */ void RCInitMBQP(AVCEncObject *encvid); /** This function updates bits usage stats after encoding an macroblock. \param "video" "Pointer to AVCCommonObj." \param "rateCtrl" "Pointer to AVCRateControl." \param "num_header_bits" "Number of bits used for MB header." \param "num_texture_bits" "Number of bits used for MB texture." \return "void" */ void RCPostMB(AVCCommonObj *video, AVCRateControl *rateCtrl, int num_header_bits, int num_texture_bits); /** This function calculates the difference between prediction and original MB. \param "encvid" "Pointer to the encoder object." \param "currMB" "Pointer to the current macroblock structure." \param "orgL" "Pointer to the original MB." \param "orgPitch" "Pointer to the original picture pitch." \return "void." */ void RCCalculateMAD(AVCEncObject *encvid, AVCMacroblock *currMB, uint8 *orgL, int orgPitch); /** Restore QP related parameters of previous MB when current MB is skipped. \param "currMB" "Pointer to the current macroblock." \param "video" "Pointer to the common video structure." \param "encvid" "Pointer to the global encoding structure." \return "void" */ void RCRestoreQP(AVCMacroblock *currMB, AVCCommonObj *video, AVCEncObject *encvid); /** This function is called after done with a frame. \param "encvid" "Pointer to the encoder object." \return "AVCENC_SUCCESS or AVCENC_SKIPPED_PICTURE when bufer overflow (need to discard current frame)." */ AVCEnc_Status RCUpdateFrame(AVCEncObject *encvid); /** This function is called to update the RC internal variables when bit rate/frame rate is changed. \param "rateCtrl" "Pointer to the rate control structure." \param "encvid" "Pointer to AVCEncObject." \return "void" */ void RCUpdateParams(AVCRateControl *rateCtrl, AVCEncObject *encvid); /*--------- residual.c -------------------*/ /** This function encodes the intra pcm data and fill it in the corresponding location on the current picture. \param "video" "Pointer to AVCEncObject." \return "AVCENC_SUCCESS if success, or else for bitstream errors." */ AVCEnc_Status EncodeIntraPCM(AVCEncObject *video); /** This function performs CAVLC syntax encoding on the run and level information of the coefficients. The level and run arrays are elements in AVCEncObject structure, populated by TransQuantZZ, TransQuantIntraDC and TransQuantChromaDC functions. \param "video" "Pointer to AVCEncObject." \param "type" "One of AVCResidualType for a particular 4x4 block." \param "bindx" "Block index or number of nonzero coefficients for AVC_Intra16DC and AVC_ChromaDC mode." \param "currMB" "Pointer to the current macroblock structure." \return "AVCENC_SUCCESS for success." \Note "This function has 32-bit machine specific instruction!!!!" */ AVCEnc_Status enc_residual_block(AVCEncObject *encvid, AVCResidualType type, int bindx, AVCMacroblock *currMB); /*------------- sad.c ---------------------------*/ int AVCSAD_MB_HalfPel_Cxhyh(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info); int AVCSAD_MB_HalfPel_Cyh(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info); int AVCSAD_MB_HalfPel_Cxh(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info); int AVCSAD_Macroblock_C(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info); #ifdef HTFM /* 3/2/1, Hypothesis Testing Fast Matching */ int AVCSAD_MB_HP_HTFM_Collectxhyh(uint8 *ref, uint8 *blk, int dmin_x, void *extra_info); int AVCSAD_MB_HP_HTFM_Collectyh(uint8 *ref, uint8 *blk, int dmin_x, void *extra_info); int AVCSAD_MB_HP_HTFM_Collectxh(uint8 *ref, uint8 *blk, int dmin_x, void *extra_info); int AVCSAD_MB_HP_HTFMxhyh(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info); int AVCSAD_MB_HP_HTFMyh(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info); int AVCSAD_MB_HP_HTFMxh(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info); int AVCSAD_MB_HTFM_Collect(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info); int AVCSAD_MB_HTFM(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info); #endif /*------------- slice.c -------------------------*/ /** This function performs the main encoding loop for a slice. \param "encvid" "Pointer to AVCEncObject." \return "AVCENC_SUCCESS for success, AVCENC_PICTURE_READY for end-of-picture and AVCENC_FAIL or AVCENC_SLICE_EMPTY otherwise." */ AVCEnc_Status AVCEncodeSlice(AVCEncObject *encvid); /** This function performs the main encoding operation for one macroblock. \param "video" "pointer to AVCEncObject." \return "AVCENC_SUCCESS for success, or other bitstream related failure status." */ AVCEnc_Status EncodeMB(AVCEncObject *video); /** This function calls prediction INTRA/INTER functions, transform, quantization and zigzag scanning to get the run-level symbols. \param "encvid" "pointer to AVCEncObject." \param "curL" "pointer to Luma component of the current frame. \param "curCb" "pointer to Cb component of the current frame. \param "curCr" "pointer to Cr component of the current frame. \return "void for now." */ void MBPredTransQuantZZ(AVCEncObject *encvid, uint8 *curL, uint8 *curCb, uint8 *curCr); /** This function copies the content of the prediction MB into the reconstructed YUV frame directly. \param "curL" "Pointer to the destination Y component." \param "curCb" "Pointer to the destination Cb component." \param "curCr" "Pointer to the destination Cr component." \param "predBlock" "Pointer to the prediction MB." \param "picWidth" "The width of the frame." \return "None." */ void Copy_MB(uint8 *curL, uint8 *curCb, uint8 *curCr, uint8 *predBlock, int picWidth); /** This function encodes the mb_type, CBP, prediction mode, ref idx and MV. \param "currMB" "Pointer to the current macroblock structure." \param "video" "Pointer to the AVCEncObject structure." \return "AVCENC_SUCCESS for success or else for fail." */ AVCEnc_Status EncodeMBHeader(AVCMacroblock *currMB, AVCEncObject *video); /** This function finds the right mb_type for a macroblock given the mbMode, CBP, NumPart, PredPartMode. \param "currMB" "Pointer to the current macroblock structure." \param "slice_type" "Value of the slice_type." \return "mb_type." */ uint InterpretMBType(AVCMacroblock *currMB, int slice_type); /** This function encodes the mb_pred part of the macroblock data. \param "video" "Pointer to the AVCCommonObj structure." \param "currMB" "Pointer to the current macroblock structure." \param "stream" "Pointer to the AVCEncBitstream structure." \return "AVCENC_SUCCESS for success or bitstream fail status." */ AVCEnc_Status mb_pred(AVCCommonObj *video, AVCMacroblock *currMB, AVCEncBitstream *stream); /** This function encodes the sub_mb_pred part of the macroblock data. \param "video" "Pointer to the AVCCommonObj structure." \param "currMB" "Pointer to the current macroblock structure." \param "stream" "Pointer to the AVCEncBitstream structure." \return "AVCENC_SUCCESS for success or bitstream fail status." */ AVCEnc_Status sub_mb_pred(AVCCommonObj *video, AVCMacroblock *currMB, AVCEncBitstream *stream); /** This function interprets the sub_mb_type and sets necessary information when the slice type is AVC_P_SLICE. in the macroblock structure. \param "mblock" "Pointer to current AVCMacroblock." \param "sub_mb_type" "From the syntax bitstream." \return "void" */ void InterpretSubMBTypeP(AVCMacroblock *mblock, uint *sub_mb_type); /** This function interprets the sub_mb_type and sets necessary information when the slice type is AVC_B_SLICE. in the macroblock structure. \param "mblock" "Pointer to current AVCMacroblock." \param "sub_mb_type" "From the syntax bitstream." \return "void" */ void InterpretSubMBTypeB(AVCMacroblock *mblock, uint *sub_mb_type); /** This function encodes intra 4x4 mode. It calculates the predicted I4x4 mode and the remnant to be encoded. \param "video" "Pointer to AVCEncObject structure." \param "currMB" "Pointer to the AVCMacroblock structure." \param "stream" "Pointer to AVCEncBitstream sructure." \return "AVCENC_SUCCESS for success." */ AVCEnc_Status EncodeIntra4x4Mode(AVCCommonObj *video, AVCMacroblock *currMB, AVCEncBitstream *stream); /*------------- vlc_encode.c -----------------------*/ /** This function encodes and writes a value into an Exp-Golomb codeword. \param "bitstream" "Pointer to AVCEncBitstream." \param "codeNum" "Pointer to the value of the codeNum." \return "AVCENC_SUCCESS for success or bitstream error messages for fail." */ AVCEnc_Status ue_v(AVCEncBitstream *bitstream, uint codeNum); /** This function maps and encodes signed Exp-Golomb codes. \param "bitstream" "Pointer to AVCEncBitstream." \param "value" "Pointer to syntax element value." \return "AVCENC_SUCCESS or AVCENC_FAIL." */ AVCEnc_Status se_v(AVCEncBitstream *bitstream, int value); /** This function maps and encodes truncated Exp-Golomb codes. \param "bitstream" "Pointer to AVCEncBitstream." \param "value" "Pointer to syntax element value." \param "range" "Range of the value as input to determine the algorithm." \return "AVCENC_SUCCESS or AVCENC_FAIL." */ AVCEnc_Status te_v(AVCEncBitstream *bitstream, uint value, uint range); /** This function creates Exp-Golomb codeword from codeNum. \param "bitstream" "Pointer to AVCEncBitstream." \param "codeNum" "Pointer to the codeNum value." \return "AVCENC_SUCCESS for success or bitstream error messages for fail." */ AVCEnc_Status SetEGBitstring(AVCEncBitstream *bitstream, uint codeNum); /** This function performs CAVLC encoding of the CBP (coded block pattern) of a macroblock by calling ue_v() and then mapping the CBP to the corresponding VLC codeNum. \param "currMB" "Pointer to the current AVCMacroblock structure." \param "stream" "Pointer to the AVCEncBitstream." \return "void" */ AVCEnc_Status EncodeCBP(AVCMacroblock *currMB, AVCEncBitstream *stream); /** This function encodes trailing ones and total coefficient. \param "stream" "Pointer to the AVCEncBitstream." \param "TrailingOnes" "The trailing one variable output." \param "TotalCoeff" "The total coefficient variable output." \param "nC" "Context for number of nonzero coefficient (prediction context)." \return "AVCENC_SUCCESS for success or else for bitstream failure." */ AVCEnc_Status ce_TotalCoeffTrailingOnes(AVCEncBitstream *stream, int TrailingOnes, int TotalCoeff, int nC); /** This function encodes trailing ones and total coefficient for chroma DC block. \param "stream" "Pointer to the AVCEncBitstream." \param "TrailingOnes" "The trailing one variable output." \param "TotalCoeff" "The total coefficient variable output." \return "AVCENC_SUCCESS for success or else for bitstream failure." */ AVCEnc_Status ce_TotalCoeffTrailingOnesChromaDC(AVCEncBitstream *stream, int TrailingOnes, int TotalCoeff); /** This function encodes total_zeros value as in Table 9-7 and 9-8. \param "stream" "Pointer to the AVCEncBitstream." \param "TotalZeros" "The total_zeros value." \param "TotalCoeff" "The total coefficient variable output." \return "AVCENC_SUCCESS for success or else for bitstream failure." */ AVCEnc_Status ce_TotalZeros(AVCEncBitstream *stream, int total_zeros, int TotalCoeff); /** This function encodes total_zeros VLC syntax for chroma DC as in Table 9-9. \param "stream" "Pointer to the AVCEncBitstream." \param "TotalZeros" "The total_zeros value." \param "TotalCoeff" "The total coefficient variable output." \return "AVCENC_SUCCESS for success or else for bitstream failure." */ AVCEnc_Status ce_TotalZerosChromaDC(AVCEncBitstream *stream, int total_zeros, int TotalCoeff); /** This function encodes run_before VLC syntax as in Table 9-10. \param "stream" "Pointer to the AVCEncBitstream." \param "run_before" "The run_before value." \param "zerosLeft" "The context for number of zeros left." \return "AVCENC_SUCCESS for success or else for bitstream failure." */ AVCEnc_Status ce_RunBefore(AVCEncBitstream *stream, int run_before, int zerosLeft); #ifdef __cplusplus } #endif #endif /* _AVCENC_LIB_H_ */ ================================================ FILE: RtspCamera/jni/avc_h264/enc/src/bitstream_io.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "avcenc_lib.h" #include "oscl_mem.h" #define WORD_SIZE 32 /* array for trailing bit pattern as function of number of bits */ /* the first one is unused. */ const static uint8 trailing_bits[9] = {0, 0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80}; /* ======================================================================== */ /* Function : BitstreamInit() */ /* Date : 11/4/2003 */ /* Purpose : Populate bitstream structure with bitstream buffer and size */ /* it also initializes internal data */ /* In/out : */ /* Return : AVCENC_SUCCESS if successed, AVCENC_FAIL if failed. */ /* Modified : */ /* ======================================================================== */ /* |--------|--------|----~~~~~-----|---------|---------|---------| ^ ^write_pos ^buf_size bitstreamBuffer <---------> current_word |-----xxxxxxxxxxxxx| = current_word 32 or 16 bits <----> bit_left ======================================================================== */ AVCEnc_Status BitstreamEncInit(AVCEncBitstream *stream, uint8 *buffer, int buf_size, uint8 *overrunBuffer, int oBSize) { if (stream == NULL || buffer == NULL || buf_size <= 0) { return AVCENC_BITSTREAM_INIT_FAIL; } stream->bitstreamBuffer = buffer; stream->buf_size = buf_size; stream->write_pos = 0; stream->count_zeros = 0; stream->current_word = 0; stream->bit_left = WORD_SIZE; stream->overrunBuffer = overrunBuffer; stream->oBSize = oBSize; return AVCENC_SUCCESS; } /* ======================================================================== */ /* Function : AVCBitstreamSaveWord() */ /* Date : 3/29/2004 */ /* Purpose : Save the current_word into the buffer, byte-swap, and */ /* add emulation prevention insertion. */ /* In/out : */ /* Return : AVCENC_SUCCESS if successed, AVCENC_WRITE_FAIL if buffer is */ /* full. */ /* Modified : */ /* ======================================================================== */ AVCEnc_Status AVCBitstreamSaveWord(AVCEncBitstream *stream) { int num_bits; uint8 *write_pnt, byte; uint current_word; /* check number of bytes in current_word, must always be byte-aligned!!!! */ num_bits = WORD_SIZE - stream->bit_left; /* must be multiple of 8 !!*/ if (stream->buf_size - stream->write_pos <= (num_bits >> 3) + 2) /* 2 more bytes for possible EPBS */ { if (AVCENC_SUCCESS != AVCBitstreamUseOverrunBuffer(stream, (num_bits >> 3) + 2)) { return AVCENC_BITSTREAM_BUFFER_FULL; } } /* write word, byte-by-byte */ write_pnt = stream->bitstreamBuffer + stream->write_pos; current_word = stream->current_word; while (num_bits) /* no need to check stream->buf_size and stream->write_pos, taken care already */ { num_bits -= 8; byte = (current_word >> num_bits) & 0xFF; if (byte != 0) { *write_pnt++ = byte; stream->write_pos++; stream->count_zeros = 0; } else { stream->count_zeros++; *write_pnt++ = byte; stream->write_pos++; if (stream->count_zeros == 2) { /* for num_bits = 32, this can add 2 more bytes extra for EPBS */ *write_pnt++ = 0x3; stream->write_pos++; stream->count_zeros = 0; } } } /* reset current_word and bit_left */ stream->current_word = 0; stream->bit_left = WORD_SIZE; return AVCENC_SUCCESS; } /* ======================================================================== */ /* Function : BitstreamWriteBits() */ /* Date : 3/29/2004 */ /* Purpose : Write up to machine word. */ /* In/out : Unused bits in 'code' must be all zeros. */ /* Return : AVCENC_SUCCESS if successed, AVCENC_WRITE_FAIL if buffer is */ /* full. */ /* Modified : */ /* ======================================================================== */ AVCEnc_Status BitstreamWriteBits(AVCEncBitstream *stream, int nBits, uint code) { AVCEnc_Status status = AVCENC_SUCCESS; int bit_left = stream->bit_left; uint current_word = stream->current_word; //DEBUG_LOG(userData,AVC_LOGTYPE_INFO,"BitstreamWriteBits",nBits,-1); if (nBits > WORD_SIZE) /* has to be taken care of specially */ { return AVCENC_FAIL; /* for now */ /* otherwise, break it down to 2 write of less than 16 bits at a time. */ } if (nBits <= bit_left) /* more bits left in current_word */ { stream->current_word = (current_word << nBits) | code; stream->bit_left -= nBits; if (stream->bit_left == 0) /* prepare for the next word */ { status = AVCBitstreamSaveWord(stream); return status; } } else { stream->current_word = (current_word << bit_left) | (code >> (nBits - bit_left)); nBits -= bit_left; stream->bit_left = 0; status = AVCBitstreamSaveWord(stream); /* save current word */ stream->bit_left = WORD_SIZE - nBits; stream->current_word = code; /* no extra masking for code, must be handled before saving */ } return status; } /* ======================================================================== */ /* Function : BitstreamWrite1Bit() */ /* Date : 3/30/2004 */ /* Purpose : Write 1 bit */ /* In/out : Unused bits in 'code' must be all zeros. */ /* Return : AVCENC_SUCCESS if successed, AVCENC_WRITE_FAIL if buffer is */ /* full. */ /* Modified : */ /* ======================================================================== */ AVCEnc_Status BitstreamWrite1Bit(AVCEncBitstream *stream, uint code) { AVCEnc_Status status; uint current_word = stream->current_word; //DEBUG_LOG(userData,AVC_LOGTYPE_INFO,"BitstreamWrite1Bit",code,-1); //if(1 <= bit_left) /* more bits left in current_word */ /* we can assume that there always be positive bit_left in the current word */ stream->current_word = (current_word << 1) | code; stream->bit_left--; if (stream->bit_left == 0) /* prepare for the next word */ { status = AVCBitstreamSaveWord(stream); return status; } return AVCENC_SUCCESS; } /* ======================================================================== */ /* Function : BitstreamTrailingBits() */ /* Date : 3/31/2004 */ /* Purpose : Add trailing bits and report the final EBSP size. */ /* In/out : */ /* Return : AVCENC_SUCCESS if successed, AVCENC_WRITE_FAIL if buffer is */ /* full. */ /* Modified : */ /* ======================================================================== */ AVCEnc_Status BitstreamTrailingBits(AVCEncBitstream *bitstream, uint *nal_size) { (void)(nal_size); AVCEnc_Status status; int bit_left = bitstream->bit_left; bit_left &= 0x7; /* modulo by 8 */ if (bit_left == 0) bit_left = 8; /* bitstream->bit_left == 0 cannot happen here since it would have been Saved already */ status = BitstreamWriteBits(bitstream, bit_left, trailing_bits[bit_left]); if (status != AVCENC_SUCCESS) { return status; } /* if it's not saved, save it. */ //if(bitstream->bit_left<(WORD_SIZE<<3)) /* in fact, no need to check */ { status = AVCBitstreamSaveWord(bitstream); } return status; } /* check whether it's byte-aligned */ bool byte_aligned(AVCEncBitstream *stream) { if (stream->bit_left % 8) return false; else return true; } /* determine whether overrun buffer can be used or not */ AVCEnc_Status AVCBitstreamUseOverrunBuffer(AVCEncBitstream* stream, int numExtraBytes) { AVCEncObject *encvid = (AVCEncObject*)stream->encvid; if (stream->overrunBuffer != NULL) // overrunBuffer is set { if (stream->bitstreamBuffer != stream->overrunBuffer) // not already used { if (stream->write_pos + numExtraBytes >= stream->oBSize) { stream->oBSize = stream->write_pos + numExtraBytes + 100; stream->oBSize &= (~0x3); // make it multiple of 4 // allocate new overrun Buffer if (encvid->overrunBuffer) { encvid->avcHandle->CBAVC_Free((uint32*)encvid->avcHandle->userData, (int)encvid->overrunBuffer); } encvid->oBSize = stream->oBSize; encvid->overrunBuffer = (uint8*) encvid->avcHandle->CBAVC_Malloc(encvid->avcHandle->userData, stream->oBSize, DEFAULT_ATTR); stream->overrunBuffer = encvid->overrunBuffer; if (stream->overrunBuffer == NULL) { return AVCENC_FAIL; } } // copy everything to overrun buffer and start using it. oscl_memcpy(stream->overrunBuffer, stream->bitstreamBuffer, stream->write_pos); stream->bitstreamBuffer = stream->overrunBuffer; stream->buf_size = stream->oBSize; } else // overrun buffer is already used { stream->oBSize = stream->write_pos + numExtraBytes + 100; stream->oBSize &= (~0x3); // make it multiple of 4 // allocate new overrun buffer encvid->oBSize = stream->oBSize; encvid->overrunBuffer = (uint8*) encvid->avcHandle->CBAVC_Malloc(encvid->avcHandle->userData, stream->oBSize, DEFAULT_ATTR); if (encvid->overrunBuffer == NULL) { return AVCENC_FAIL; } // copy from the old buffer to new buffer oscl_memcpy(encvid->overrunBuffer, stream->overrunBuffer, stream->write_pos); // free old buffer encvid->avcHandle->CBAVC_Free((uint32*)encvid->avcHandle->userData, (int)stream->overrunBuffer); // assign pointer to new buffer stream->overrunBuffer = encvid->overrunBuffer; stream->bitstreamBuffer = stream->overrunBuffer; stream->buf_size = stream->oBSize; } return AVCENC_SUCCESS; } else // overrunBuffer is not enable. { return AVCENC_FAIL; } } ================================================ FILE: RtspCamera/jni/avc_h264/enc/src/block.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "oscl_mem.h" #include "avcenc_lib.h" /* subtract with the prediction and do transformation */ void trans(uint8 *cur, int pitch, uint8 *predBlock, int16 *dataBlock) { int16 *ptr = dataBlock; int r0, r1, r2, r3, j; int curpitch = (uint)pitch >> 16; int predpitch = (pitch & 0xFFFF); /* horizontal */ j = 4; while (j > 0) { /* calculate the residue first */ r0 = cur[0] - predBlock[0]; r1 = cur[1] - predBlock[1]; r2 = cur[2] - predBlock[2]; r3 = cur[3] - predBlock[3]; r0 += r3; //ptr[0] + ptr[3]; r3 = r0 - (r3 << 1); //ptr[0] - ptr[3]; r1 += r2; //ptr[1] + ptr[2]; r2 = r1 - (r2 << 1); //ptr[1] - ptr[2]; ptr[0] = r0 + r1; ptr[2] = r0 - r1; ptr[1] = (r3 << 1) + r2; ptr[3] = r3 - (r2 << 1); ptr += 16; predBlock += predpitch; cur += curpitch; j--; } /* vertical */ ptr = dataBlock; j = 4; while (j > 0) { r0 = ptr[0] + ptr[48]; r3 = ptr[0] - ptr[48]; r1 = ptr[16] + ptr[32]; r2 = ptr[16] - ptr[32]; ptr[0] = r0 + r1; ptr[32] = r0 - r1; ptr[16] = (r3 << 1) + r2; ptr[48] = r3 - (r2 << 1); ptr++; j--; } return ; } /* do residue transform quant invquant, invtrans and write output out */ int dct_luma(AVCEncObject *encvid, int blkidx, uint8 *cur, uint8 *org, int *coef_cost) { AVCCommonObj *video = encvid->common; int org_pitch = encvid->currInput->pitch; int pitch = video->currPic->pitch; int16 *coef = video->block; uint8 *pred = video->pred_block; // size 16 for a 4x4 block int pred_pitch = video->pred_pitch; int r0, r1, r2, r3, j, k, idx; int *level, *run; int Qq, Rq, q_bits, qp_const, quant; int data, lev, zero_run; int numcoeff; coef += ((blkidx & 0x3) << 2) + ((blkidx >> 2) << 6); /* point to the 4x4 block */ /* first take a 4x4 transform */ /* horizontal */ j = 4; while (j > 0) { /* calculate the residue first */ r0 = org[0] - pred[0]; /* OPTIMIZEABLE */ r1 = org[1] - pred[1]; r2 = org[2] - pred[2]; r3 = org[3] - pred[3]; r0 += r3; //ptr[0] + ptr[3]; r3 = r0 - (r3 << 1); //ptr[0] - ptr[3]; r1 += r2; //ptr[1] + ptr[2]; r2 = r1 - (r2 << 1); //ptr[1] - ptr[2]; coef[0] = r0 + r1; coef[2] = r0 - r1; coef[1] = (r3 << 1) + r2; coef[3] = r3 - (r2 << 1); coef += 16; org += org_pitch; pred += pred_pitch; j--; } /* vertical */ coef -= 64; pred -= (pred_pitch << 2); j = 4; while (j > 0) /* OPTIMIZABLE */ { r0 = coef[0] + coef[48]; r3 = coef[0] - coef[48]; r1 = coef[16] + coef[32]; r2 = coef[16] - coef[32]; coef[0] = r0 + r1; coef[32] = r0 - r1; coef[16] = (r3 << 1) + r2; coef[48] = r3 - (r2 << 1); coef++; j--; } coef -= 4; /* quant */ level = encvid->level[ras2dec[blkidx]]; run = encvid->run[ras2dec[blkidx]]; Rq = video->QPy_mod_6; Qq = video->QPy_div_6; qp_const = encvid->qp_const; q_bits = 15 + Qq; zero_run = 0; numcoeff = 0; for (k = 0; k < 16; k++) { idx = ZZ_SCAN_BLOCK[k]; /* map back to raster scan order */ data = coef[idx]; quant = quant_coef[Rq][k]; if (data > 0) { lev = data * quant + qp_const; } else { lev = -data * quant + qp_const; } lev >>= q_bits; if (lev) { *coef_cost += ((lev > 1) ? MAX_VALUE : COEFF_COST[DISABLE_THRESHOLDING][zero_run]); /* dequant */ quant = dequant_coefres[Rq][k]; if (data > 0) { level[numcoeff] = lev; coef[idx] = (lev * quant) << Qq; } else { level[numcoeff] = -lev; coef[idx] = (-lev * quant) << Qq; } run[numcoeff++] = zero_run; zero_run = 0; } else { zero_run++; coef[idx] = 0; } } if (video->currMB->mb_intra) // only do inverse transform with intra block { if (numcoeff) /* then do inverse transform */ { for (j = 4; j > 0; j--) /* horizontal */ { r0 = coef[0] + coef[2]; r1 = coef[0] - coef[2]; r2 = (coef[1] >> 1) - coef[3]; r3 = coef[1] + (coef[3] >> 1); coef[0] = r0 + r3; coef[1] = r1 + r2; coef[2] = r1 - r2; coef[3] = r0 - r3; coef += 16; } coef -= 64; for (j = 4; j > 0; j--) /* vertical, has to be done after horizontal */ { r0 = coef[0] + coef[32]; r1 = coef[0] - coef[32]; r2 = (coef[16] >> 1) - coef[48]; r3 = coef[16] + (coef[48] >> 1); r0 += r3; r3 = (r0 - (r3 << 1)); /* r0-r3 */ r1 += r2; r2 = (r1 - (r2 << 1)); /* r1-r2 */ r0 += 32; r1 += 32; r2 += 32; r3 += 32; r0 = pred[0] + (r0 >> 6); if ((uint)r0 > 0xFF) r0 = 0xFF & (~(r0 >> 31)); /* clip */ r1 = *(pred += pred_pitch) + (r1 >> 6); if ((uint)r1 > 0xFF) r1 = 0xFF & (~(r1 >> 31)); /* clip */ r2 = *(pred += pred_pitch) + (r2 >> 6); if ((uint)r2 > 0xFF) r2 = 0xFF & (~(r2 >> 31)); /* clip */ r3 = pred[pred_pitch] + (r3 >> 6); if ((uint)r3 > 0xFF) r3 = 0xFF & (~(r3 >> 31)); /* clip */ *cur = r0; *(cur += pitch) = r1; *(cur += pitch) = r2; cur[pitch] = r3; cur -= (pitch << 1); cur++; pred -= (pred_pitch << 1); pred++; coef++; } } else // copy from pred to cur { *((uint32*)cur) = *((uint32*)pred); *((uint32*)(cur += pitch)) = *((uint32*)(pred += pred_pitch)); *((uint32*)(cur += pitch)) = *((uint32*)(pred += pred_pitch)); *((uint32*)(cur += pitch)) = *((uint32*)(pred += pred_pitch)); } } return numcoeff; } void MBInterIdct(AVCCommonObj *video, uint8 *curL, AVCMacroblock *currMB, int picPitch) { int16 *coef, *coef8 = video->block; uint8 *cur; // the same as curL int b8, b4; int r0, r1, r2, r3, j, blkidx; for (b8 = 0; b8 < 4; b8++) { cur = curL; coef = coef8; if (currMB->CBP&(1 << b8)) { for (b4 = 0; b4 < 4; b4++) { blkidx = blkIdx2blkXY[b8][b4]; /* do IDCT */ if (currMB->nz_coeff[blkidx]) { for (j = 4; j > 0; j--) /* horizontal */ { r0 = coef[0] + coef[2]; r1 = coef[0] - coef[2]; r2 = (coef[1] >> 1) - coef[3]; r3 = coef[1] + (coef[3] >> 1); coef[0] = r0 + r3; coef[1] = r1 + r2; coef[2] = r1 - r2; coef[3] = r0 - r3; coef += 16; } coef -= 64; for (j = 4; j > 0; j--) /* vertical, has to be done after horizontal */ { r0 = coef[0] + coef[32]; r1 = coef[0] - coef[32]; r2 = (coef[16] >> 1) - coef[48]; r3 = coef[16] + (coef[48] >> 1); r0 += r3; r3 = (r0 - (r3 << 1)); /* r0-r3 */ r1 += r2; r2 = (r1 - (r2 << 1)); /* r1-r2 */ r0 += 32; r1 += 32; r2 += 32; r3 += 32; r0 = cur[0] + (r0 >> 6); if ((uint)r0 > 0xFF) r0 = 0xFF & (~(r0 >> 31)); /* clip */ *cur = r0; r1 = *(cur += picPitch) + (r1 >> 6); if ((uint)r1 > 0xFF) r1 = 0xFF & (~(r1 >> 31)); /* clip */ *cur = r1; r2 = *(cur += picPitch) + (r2 >> 6); if ((uint)r2 > 0xFF) r2 = 0xFF & (~(r2 >> 31)); /* clip */ *cur = r2; r3 = cur[picPitch] + (r3 >> 6); if ((uint)r3 > 0xFF) r3 = 0xFF & (~(r3 >> 31)); /* clip */ cur[picPitch] = r3; cur -= (picPitch << 1); cur++; coef++; } cur -= 4; coef -= 4; } if (b4&1) { cur += ((picPitch << 2) - 4); coef += 60; } else { cur += 4; coef += 4; } } } if (b8&1) { curL += ((picPitch << 3) - 8); coef8 += 120; } else { curL += 8; coef8 += 8; } } return ; } /* performa dct, quant, iquant, idct for the entire MB */ void dct_luma_16x16(AVCEncObject *encvid, uint8 *curL, uint8 *orgL) { AVCCommonObj *video = encvid->common; int pitch = video->currPic->pitch; int org_pitch = encvid->currInput->pitch; AVCMacroblock *currMB = video->currMB; int16 *coef = video->block; uint8 *pred = encvid->pred_i16[currMB->i16Mode]; int blk_x, blk_y, j, k, idx, b8, b4; int r0, r1, r2, r3, m0, m1, m2 , m3; int data, lev; int *level, *run, zero_run, ncoeff; int Rq, Qq, quant, q_bits, qp_const; int offset_cur[4], offset_pred[4], offset; /* horizontal */ for (j = 16; j > 0; j--) { for (blk_x = 4; blk_x > 0; blk_x--) { /* calculate the residue first */ r0 = *orgL++ - *pred++; r1 = *orgL++ - *pred++; r2 = *orgL++ - *pred++; r3 = *orgL++ - *pred++; r0 += r3; //ptr[0] + ptr[3]; r3 = r0 - (r3 << 1); //ptr[0] - ptr[3]; r1 += r2; //ptr[1] + ptr[2]; r2 = r1 - (r2 << 1); //ptr[1] - ptr[2]; *coef++ = r0 + r1; *coef++ = (r3 << 1) + r2; *coef++ = r0 - r1; *coef++ = r3 - (r2 << 1); } orgL += (org_pitch - 16); } pred -= 256; coef -= 256; /* vertical */ for (blk_y = 4; blk_y > 0; blk_y--) { for (j = 16; j > 0; j--) { r0 = coef[0] + coef[48]; r3 = coef[0] - coef[48]; r1 = coef[16] + coef[32]; r2 = coef[16] - coef[32]; coef[0] = r0 + r1; coef[32] = r0 - r1; coef[16] = (r3 << 1) + r2; coef[48] = r3 - (r2 << 1); coef++; } coef += 48; } /* then perform DC transform */ coef -= 256; for (j = 4; j > 0; j--) { r0 = coef[0] + coef[12]; r3 = coef[0] - coef[12]; r1 = coef[4] + coef[8]; r2 = coef[4] - coef[8]; coef[0] = r0 + r1; coef[8] = r0 - r1; coef[4] = r3 + r2; coef[12] = r3 - r2; coef += 64; } coef -= 256; for (j = 4; j > 0; j--) { r0 = coef[0] + coef[192]; r3 = coef[0] - coef[192]; r1 = coef[64] + coef[128]; r2 = coef[64] - coef[128]; coef[0] = (r0 + r1) >> 1; coef[128] = (r0 - r1) >> 1; coef[64] = (r3 + r2) >> 1; coef[192] = (r3 - r2) >> 1; coef += 4; } coef -= 16; // then quantize DC level = encvid->leveldc; run = encvid->rundc; Rq = video->QPy_mod_6; Qq = video->QPy_div_6; quant = quant_coef[Rq][0]; q_bits = 15 + Qq; qp_const = encvid->qp_const; zero_run = 0; ncoeff = 0; for (k = 0; k < 16; k++) /* in zigzag scan order */ { idx = ZIGZAG2RASTERDC[k]; data = coef[idx]; if (data > 0) // quant { lev = data * quant + (qp_const << 1); } else { lev = -data * quant + (qp_const << 1); } lev >>= (q_bits + 1); if (lev) // dequant { if (data > 0) { level[ncoeff] = lev; coef[idx] = lev; } else { level[ncoeff] = -lev; coef[idx] = -lev; } run[ncoeff++] = zero_run; zero_run = 0; } else { zero_run++; coef[idx] = 0; } } /* inverse transform DC */ encvid->numcoefdc = ncoeff; if (ncoeff) { quant = dequant_coefres[Rq][0]; for (j = 0; j < 4; j++) { m0 = coef[0] + coef[4]; m1 = coef[0] - coef[4]; m2 = coef[8] + coef[12]; m3 = coef[8] - coef[12]; coef[0] = m0 + m2; coef[4] = m0 - m2; coef[8] = m1 - m3; coef[12] = m1 + m3; coef += 64; } coef -= 256; if (Qq >= 2) /* this way should be faster than JM */ { /* they use (((m4*scale)<<(QPy/6))+2)>>2 for both cases. */ Qq -= 2; for (j = 0; j < 4; j++) { m0 = coef[0] + coef[64]; m1 = coef[0] - coef[64]; m2 = coef[128] + coef[192]; m3 = coef[128] - coef[192]; coef[0] = ((m0 + m2) * quant) << Qq; coef[64] = ((m0 - m2) * quant) << Qq; coef[128] = ((m1 - m3) * quant) << Qq; coef[192] = ((m1 + m3) * quant) << Qq; coef += 4; } Qq += 2; /* restore the value */ } else { Qq = 2 - Qq; offset = 1 << (Qq - 1); for (j = 0; j < 4; j++) { m0 = coef[0] + coef[64]; m1 = coef[0] - coef[64]; m2 = coef[128] + coef[192]; m3 = coef[128] - coef[192]; coef[0] = (((m0 + m2) * quant + offset) >> Qq); coef[64] = (((m0 - m2) * quant + offset) >> Qq); coef[128] = (((m1 - m3) * quant + offset) >> Qq); coef[192] = (((m1 + m3) * quant + offset) >> Qq); coef += 4; } Qq = 2 - Qq; /* restore the value */ } coef -= 16; /* back to the origin */ } /* now zigzag scan ac coefs, quant, iquant and itrans */ run = encvid->run[0]; level = encvid->level[0]; /* offset btw 4x4 block */ offset_cur[0] = 0; offset_cur[1] = (pitch << 2) - 8; /* offset btw 8x8 block */ offset_cur[2] = 8 - (pitch << 3); offset_cur[3] = -8; /* similarly for pred */ offset_pred[0] = 0; offset_pred[1] = 56; offset_pred[2] = -120; offset_pred[3] = -8; currMB->CBP = 0; for (b8 = 0; b8 < 4; b8++) { for (b4 = 0; b4 < 4; b4++) { zero_run = 0; ncoeff = 0; for (k = 1; k < 16; k++) { idx = ZZ_SCAN_BLOCK[k]; /* map back to raster scan order */ data = coef[idx]; quant = quant_coef[Rq][k]; if (data > 0) { lev = data * quant + qp_const; } else { lev = -data * quant + qp_const; } lev >>= q_bits; if (lev) { /* dequant */ quant = dequant_coefres[Rq][k]; if (data > 0) { level[ncoeff] = lev; coef[idx] = (lev * quant) << Qq; } else { level[ncoeff] = -lev; coef[idx] = (-lev * quant) << Qq; } run[ncoeff++] = zero_run; zero_run = 0; } else { zero_run++; coef[idx] = 0; } } currMB->nz_coeff[blkIdx2blkXY[b8][b4]] = ncoeff; /* in raster scan !!! */ if (ncoeff) { currMB->CBP |= (1 << b8); // do inverse transform here for (j = 4; j > 0; j--) { r0 = coef[0] + coef[2]; r1 = coef[0] - coef[2]; r2 = (coef[1] >> 1) - coef[3]; r3 = coef[1] + (coef[3] >> 1); coef[0] = r0 + r3; coef[1] = r1 + r2; coef[2] = r1 - r2; coef[3] = r0 - r3; coef += 16; } coef -= 64; for (j = 4; j > 0; j--) { r0 = coef[0] + coef[32]; r1 = coef[0] - coef[32]; r2 = (coef[16] >> 1) - coef[48]; r3 = coef[16] + (coef[48] >> 1); r0 += r3; r3 = (r0 - (r3 << 1)); /* r0-r3 */ r1 += r2; r2 = (r1 - (r2 << 1)); /* r1-r2 */ r0 += 32; r1 += 32; r2 += 32; r3 += 32; r0 = pred[0] + (r0 >> 6); if ((uint)r0 > 0xFF) r0 = 0xFF & (~(r0 >> 31)); /* clip */ r1 = pred[16] + (r1 >> 6); if ((uint)r1 > 0xFF) r1 = 0xFF & (~(r1 >> 31)); /* clip */ r2 = pred[32] + (r2 >> 6); if ((uint)r2 > 0xFF) r2 = 0xFF & (~(r2 >> 31)); /* clip */ r3 = pred[48] + (r3 >> 6); if ((uint)r3 > 0xFF) r3 = 0xFF & (~(r3 >> 31)); /* clip */ *curL = r0; *(curL += pitch) = r1; *(curL += pitch) = r2; curL[pitch] = r3; curL -= (pitch << 1); curL++; pred++; coef++; } } else // do DC-only inverse { m0 = coef[0] + 32; for (j = 4; j > 0; j--) { r0 = pred[0] + (m0 >> 6); if ((uint)r0 > 0xFF) r0 = 0xFF & (~(r0 >> 31)); /* clip */ r1 = pred[16] + (m0 >> 6); if ((uint)r1 > 0xFF) r1 = 0xFF & (~(r1 >> 31)); /* clip */ r2 = pred[32] + (m0 >> 6); if ((uint)r2 > 0xFF) r2 = 0xFF & (~(r2 >> 31)); /* clip */ r3 = pred[48] + (m0 >> 6); if ((uint)r3 > 0xFF) r3 = 0xFF & (~(r3 >> 31)); /* clip */ *curL = r0; *(curL += pitch) = r1; *(curL += pitch) = r2; curL[pitch] = r3; curL -= (pitch << 1); curL++; pred++; } coef += 4; } run += 16; // follow coding order level += 16; curL += offset_cur[b4&1]; pred += offset_pred[b4&1]; coef += offset_pred[b4&1]; } curL += offset_cur[2 + (b8&1)]; pred += offset_pred[2 + (b8&1)]; coef += offset_pred[2 + (b8&1)]; } return ; } void dct_chroma(AVCEncObject *encvid, uint8 *curC, uint8 *orgC, int cr) { AVCCommonObj *video = encvid->common; AVCMacroblock *currMB = video->currMB; int org_pitch = (encvid->currInput->pitch) >> 1; int pitch = (video->currPic->pitch) >> 1; int pred_pitch = 16; int16 *coef = video->block + 256; uint8 *pred = video->pred_block; int j, blk_x, blk_y, k, idx, b4; int r0, r1, r2, r3, m0; int Qq, Rq, qp_const, q_bits, quant; int *level, *run, zero_run, ncoeff; int data, lev; int offset_cur[2], offset_pred[2], offset_coef[2]; uint8 nz_temp[4]; int coeff_cost; if (cr) { coef += 8; pred += 8; } if (currMB->mb_intra == 0) // inter mode { pred = curC; pred_pitch = pitch; } /* do 4x4 transform */ /* horizontal */ for (j = 8; j > 0; j--) { for (blk_x = 2; blk_x > 0; blk_x--) { /* calculate the residue first */ r0 = *orgC++ - *pred++; r1 = *orgC++ - *pred++; r2 = *orgC++ - *pred++; r3 = *orgC++ - *pred++; r0 += r3; //ptr[0] + ptr[3]; r3 = r0 - (r3 << 1); //ptr[0] - ptr[3]; r1 += r2; //ptr[1] + ptr[2]; r2 = r1 - (r2 << 1); //ptr[1] - ptr[2]; *coef++ = r0 + r1; *coef++ = (r3 << 1) + r2; *coef++ = r0 - r1; *coef++ = r3 - (r2 << 1); } coef += 8; // coef pitch is 16 pred += (pred_pitch - 8); // pred_pitch is 16 orgC += (org_pitch - 8); } pred -= (pred_pitch << 3); coef -= 128; /* vertical */ for (blk_y = 2; blk_y > 0; blk_y--) { for (j = 8; j > 0; j--) { r0 = coef[0] + coef[48]; r3 = coef[0] - coef[48]; r1 = coef[16] + coef[32]; r2 = coef[16] - coef[32]; coef[0] = r0 + r1; coef[32] = r0 - r1; coef[16] = (r3 << 1) + r2; coef[48] = r3 - (r2 << 1); coef++; } coef += 56; } /* then perform DC transform */ coef -= 128; /* 2x2 transform of DC components*/ r0 = coef[0]; r1 = coef[4]; r2 = coef[64]; r3 = coef[68]; coef[0] = r0 + r1 + r2 + r3; coef[4] = r0 - r1 + r2 - r3; coef[64] = r0 + r1 - r2 - r3; coef[68] = r0 - r1 - r2 + r3; Qq = video->QPc_div_6; Rq = video->QPc_mod_6; quant = quant_coef[Rq][0]; q_bits = 15 + Qq; qp_const = encvid->qp_const_c; zero_run = 0; ncoeff = 0; run = encvid->runcdc + (cr << 2); level = encvid->levelcdc + (cr << 2); /* in zigzag scan order */ for (k = 0; k < 4; k++) { idx = ((k >> 1) << 6) + ((k & 1) << 2); data = coef[idx]; if (data > 0) { lev = data * quant + (qp_const << 1); } else { lev = -data * quant + (qp_const << 1); } lev >>= (q_bits + 1); if (lev) { if (data > 0) { level[ncoeff] = lev; coef[idx] = lev; } else { level[ncoeff] = -lev; coef[idx] = -lev; } run[ncoeff++] = zero_run; zero_run = 0; } else { zero_run++; coef[idx] = 0; } } encvid->numcoefcdc[cr] = ncoeff; if (ncoeff) { currMB->CBP |= (1 << 4); // DC present // do inverse transform quant = dequant_coefres[Rq][0]; r0 = coef[0] + coef[4]; r1 = coef[0] - coef[4]; r2 = coef[64] + coef[68]; r3 = coef[64] - coef[68]; r0 += r2; r2 = r0 - (r2 << 1); r1 += r3; r3 = r1 - (r3 << 1); if (Qq >= 1) { Qq -= 1; coef[0] = (r0 * quant) << Qq; coef[4] = (r1 * quant) << Qq; coef[64] = (r2 * quant) << Qq; coef[68] = (r3 * quant) << Qq; Qq++; } else { coef[0] = (r0 * quant) >> 1; coef[4] = (r1 * quant) >> 1; coef[64] = (r2 * quant) >> 1; coef[68] = (r3 * quant) >> 1; } } /* now do AC zigzag scan, quant, iquant and itrans */ if (cr) { run = encvid->run[20]; level = encvid->level[20]; } else { run = encvid->run[16]; level = encvid->level[16]; } /* offset btw 4x4 block */ offset_cur[0] = 0; offset_cur[1] = (pitch << 2) - 8; offset_pred[0] = 0; offset_pred[1] = (pred_pitch << 2) - 8; offset_coef[0] = 0; offset_coef[1] = 56; coeff_cost = 0; for (b4 = 0; b4 < 4; b4++) { zero_run = 0; ncoeff = 0; for (k = 1; k < 16; k++) /* in zigzag scan order */ { idx = ZZ_SCAN_BLOCK[k]; /* map back to raster scan order */ data = coef[idx]; quant = quant_coef[Rq][k]; if (data > 0) { lev = data * quant + qp_const; } else { lev = -data * quant + qp_const; } lev >>= q_bits; if (lev) { /* for RD performance*/ if (lev > 1) coeff_cost += MAX_VALUE; // set high cost, shall not be discarded else coeff_cost += COEFF_COST[DISABLE_THRESHOLDING][zero_run]; /* dequant */ quant = dequant_coefres[Rq][k]; if (data > 0) { level[ncoeff] = lev; coef[idx] = (lev * quant) << Qq; } else { level[ncoeff] = -lev; coef[idx] = (-lev * quant) << Qq; } run[ncoeff++] = zero_run; zero_run = 0; } else { zero_run++; coef[idx] = 0; } } nz_temp[b4] = ncoeff; // raster scan // just advance the pointers for now, do IDCT later coef += 4; run += 16; level += 16; coef += offset_coef[b4&1]; } /* rewind the pointers */ coef -= 128; if (coeff_cost < _CHROMA_COEFF_COST_) { /* if it's not efficient to encode any blocks. Just do DC only */ /* We can reset level and run also, but setting nz to zero should be enough. */ currMB->nz_coeff[16+(cr<<1)] = 0; currMB->nz_coeff[17+(cr<<1)] = 0; currMB->nz_coeff[20+(cr<<1)] = 0; currMB->nz_coeff[21+(cr<<1)] = 0; for (b4 = 0; b4 < 4; b4++) { // do DC-only inverse m0 = coef[0] + 32; for (j = 4; j > 0; j--) { r0 = pred[0] + (m0 >> 6); if ((uint)r0 > 0xFF) r0 = 0xFF & (~(r0 >> 31)); /* clip */ r1 = *(pred += pred_pitch) + (m0 >> 6); if ((uint)r1 > 0xFF) r1 = 0xFF & (~(r1 >> 31)); /* clip */ r2 = pred[pred_pitch] + (m0 >> 6); if ((uint)r2 > 0xFF) r2 = 0xFF & (~(r2 >> 31)); /* clip */ r3 = pred[pred_pitch<<1] + (m0 >> 6); if ((uint)r3 > 0xFF) r3 = 0xFF & (~(r3 >> 31)); /* clip */ *curC = r0; *(curC += pitch) = r1; *(curC += pitch) = r2; curC[pitch] = r3; curC -= (pitch << 1); curC++; pred += (1 - pred_pitch); } coef += 4; curC += offset_cur[b4&1]; pred += offset_pred[b4&1]; coef += offset_coef[b4&1]; } } else // not dropping anything, continue with the IDCT { for (b4 = 0; b4 < 4; b4++) { ncoeff = nz_temp[b4] ; // in raster scan currMB->nz_coeff[16+(b4&1)+(cr<<1)+((b4>>1)<<2)] = ncoeff; // in raster scan if (ncoeff) // do a check on the nonzero-coeff { currMB->CBP |= (2 << 4); // do inverse transform here for (j = 4; j > 0; j--) { r0 = coef[0] + coef[2]; r1 = coef[0] - coef[2]; r2 = (coef[1] >> 1) - coef[3]; r3 = coef[1] + (coef[3] >> 1); coef[0] = r0 + r3; coef[1] = r1 + r2; coef[2] = r1 - r2; coef[3] = r0 - r3; coef += 16; } coef -= 64; for (j = 4; j > 0; j--) { r0 = coef[0] + coef[32]; r1 = coef[0] - coef[32]; r2 = (coef[16] >> 1) - coef[48]; r3 = coef[16] + (coef[48] >> 1); r0 += r3; r3 = (r0 - (r3 << 1)); /* r0-r3 */ r1 += r2; r2 = (r1 - (r2 << 1)); /* r1-r2 */ r0 += 32; r1 += 32; r2 += 32; r3 += 32; r0 = pred[0] + (r0 >> 6); if ((uint)r0 > 0xFF) r0 = 0xFF & (~(r0 >> 31)); /* clip */ r1 = *(pred += pred_pitch) + (r1 >> 6); if ((uint)r1 > 0xFF) r1 = 0xFF & (~(r1 >> 31)); /* clip */ r2 = pred[pred_pitch] + (r2 >> 6); if ((uint)r2 > 0xFF) r2 = 0xFF & (~(r2 >> 31)); /* clip */ r3 = pred[pred_pitch<<1] + (r3 >> 6); if ((uint)r3 > 0xFF) r3 = 0xFF & (~(r3 >> 31)); /* clip */ *curC = r0; *(curC += pitch) = r1; *(curC += pitch) = r2; curC[pitch] = r3; curC -= (pitch << 1); curC++; pred += (1 - pred_pitch); coef++; } } else { // do DC-only inverse m0 = coef[0] + 32; for (j = 4; j > 0; j--) { r0 = pred[0] + (m0 >> 6); if ((uint)r0 > 0xFF) r0 = 0xFF & (~(r0 >> 31)); /* clip */ r1 = *(pred += pred_pitch) + (m0 >> 6); if ((uint)r1 > 0xFF) r1 = 0xFF & (~(r1 >> 31)); /* clip */ r2 = pred[pred_pitch] + (m0 >> 6); if ((uint)r2 > 0xFF) r2 = 0xFF & (~(r2 >> 31)); /* clip */ r3 = pred[pred_pitch<<1] + (m0 >> 6); if ((uint)r3 > 0xFF) r3 = 0xFF & (~(r3 >> 31)); /* clip */ *curC = r0; *(curC += pitch) = r1; *(curC += pitch) = r2; curC[pitch] = r3; curC -= (pitch << 1); curC++; pred += (1 - pred_pitch); } coef += 4; } curC += offset_cur[b4&1]; pred += offset_pred[b4&1]; coef += offset_coef[b4&1]; } } return ; } /* only DC transform */ int TransQuantIntra16DC(AVCEncObject *encvid) { AVCCommonObj *video = encvid->common; int16 *block = video->block; int *level = encvid->leveldc; int *run = encvid->rundc; int16 *ptr = block; int r0, r1, r2, r3, j; int Qq = video->QPy_div_6; int Rq = video->QPy_mod_6; int q_bits, qp_const, quant; int data, lev, zero_run; int k, ncoeff, idx; /* DC transform */ /* horizontal */ j = 4; while (j) { r0 = ptr[0] + ptr[12]; r3 = ptr[0] - ptr[12]; r1 = ptr[4] + ptr[8]; r2 = ptr[4] - ptr[8]; ptr[0] = r0 + r1; ptr[8] = r0 - r1; ptr[4] = r3 + r2; ptr[12] = r3 - r2; ptr += 64; j--; } /* vertical */ ptr = block; j = 4; while (j) { r0 = ptr[0] + ptr[192]; r3 = ptr[0] - ptr[192]; r1 = ptr[64] + ptr[128]; r2 = ptr[64] - ptr[128]; ptr[0] = (r0 + r1) >> 1; ptr[128] = (r0 - r1) >> 1; ptr[64] = (r3 + r2) >> 1; ptr[192] = (r3 - r2) >> 1; ptr += 4; j--; } quant = quant_coef[Rq][0]; q_bits = 15 + Qq; qp_const = (1 << q_bits) / 3; // intra zero_run = 0; ncoeff = 0; for (k = 0; k < 16; k++) /* in zigzag scan order */ { idx = ZIGZAG2RASTERDC[k]; data = block[idx]; if (data > 0) { lev = data * quant + (qp_const << 1); } else { lev = -data * quant + (qp_const << 1); } lev >>= (q_bits + 1); if (lev) { if (data > 0) { level[ncoeff] = lev; block[idx] = lev; } else { level[ncoeff] = -lev; block[idx] = -lev; } run[ncoeff++] = zero_run; zero_run = 0; } else { zero_run++; block[idx] = 0; } } return ncoeff; } int TransQuantChromaDC(AVCEncObject *encvid, int16 *block, int slice_type, int cr) { AVCCommonObj *video = encvid->common; int *level, *run; int r0, r1, r2, r3; int Qq, Rq, q_bits, qp_const, quant; int data, lev, zero_run; int k, ncoeff, idx; level = encvid->levelcdc + (cr << 2); /* cb or cr */ run = encvid->runcdc + (cr << 2); /* 2x2 transform of DC components*/ r0 = block[0]; r1 = block[4]; r2 = block[64]; r3 = block[68]; block[0] = r0 + r1 + r2 + r3; block[4] = r0 - r1 + r2 - r3; block[64] = r0 + r1 - r2 - r3; block[68] = r0 - r1 - r2 + r3; Qq = video->QPc_div_6; Rq = video->QPc_mod_6; quant = quant_coef[Rq][0]; q_bits = 15 + Qq; if (slice_type == AVC_I_SLICE) { qp_const = (1 << q_bits) / 3; } else { qp_const = (1 << q_bits) / 6; } zero_run = 0; ncoeff = 0; for (k = 0; k < 4; k++) /* in zigzag scan order */ { idx = ((k >> 1) << 6) + ((k & 1) << 2); data = block[idx]; if (data > 0) { lev = data * quant + (qp_const << 1); } else { lev = -data * quant + (qp_const << 1); } lev >>= (q_bits + 1); if (lev) { if (data > 0) { level[ncoeff] = lev; block[idx] = lev; } else { level[ncoeff] = -lev; block[idx] = -lev; } run[ncoeff++] = zero_run; zero_run = 0; } else { zero_run++; block[idx] = 0; } } return ncoeff; } ================================================ FILE: RtspCamera/jni/avc_h264/enc/src/findhalfpel.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "avcenc_lib.h" #include "oscl_base_macros.h" /* 3/29/01 fast half-pel search based on neighboring guess */ /* value ranging from 0 to 4, high complexity (more accurate) to low complexity (less accurate) */ #define HP_DISTANCE_TH 5 // 2 /* half-pel distance threshold */ #define PREF_16_VEC 129 /* 1MV bias versus 4MVs*/ const static int distance_tab[9][9] = /* [hp_guess][k] */ { {0, 1, 1, 1, 1, 1, 1, 1, 1}, {1, 0, 1, 2, 3, 4, 3, 2, 1}, {1, 0, 0, 0, 1, 2, 3, 2, 1}, {1, 2, 1, 0, 1, 2, 3, 4, 3}, {1, 2, 1, 0, 0, 0, 1, 2, 3}, {1, 4, 3, 2, 1, 0, 1, 2, 3}, {1, 2, 3, 2, 1, 0, 0, 0, 1}, {1, 2, 3, 4, 3, 2, 1, 0, 1}, {1, 0, 1, 2, 3, 2, 1, 0, 0} }; #define CLIP_RESULT(x) if((uint)x > 0xFF){ \ x = 0xFF & (~(x>>31));} #define CLIP_UPPER16(x) if((uint)x >= 0x20000000){ \ x = 0xFF0000 & (~(x>>31));} \ else { \ x = (x>>5)&0xFF0000; \ } /*===================================================================== Function: AVCFindHalfPelMB Date: 10/31/2007 Purpose: Find half pel resolution MV surrounding the full-pel MV =====================================================================*/ int AVCFindHalfPelMB(AVCEncObject *encvid, uint8 *cur, AVCMV *mot, uint8 *ncand, int xpos, int ypos, int hp_guess, int cmvx, int cmvy) { AVCPictureData *currPic = encvid->common->currPic; int lx = currPic->pitch; int d, dmin, satd_min; uint8* cand; int lambda_motion = encvid->lambda_motion; uint8 *mvbits = encvid->mvbits; int mvcost; /* list of candidate to go through for half-pel search*/ uint8 *subpel_pred = (uint8*) encvid->subpel_pred; // all 16 sub-pel positions uint8 **hpel_cand = (uint8**) encvid->hpel_cand; /* half-pel position */ int xh[9] = {0, 0, 2, 2, 2, 0, -2, -2, -2}; int yh[9] = {0, -2, -2, 0, 2, 2, 2, 0, -2}; int xq[8] = {0, 1, 1, 1, 0, -1, -1, -1}; int yq[8] = { -1, -1, 0, 1, 1, 1, 0, -1}; int h, hmin, q, qmin; OSCL_UNUSED_ARG(xpos); OSCL_UNUSED_ARG(ypos); OSCL_UNUSED_ARG(hp_guess); GenerateHalfPelPred(subpel_pred, ncand, lx); cur = encvid->currYMB; // pre-load current original MB cand = hpel_cand[0]; // find cost for the current full-pel position dmin = SATD_MB(cand, cur, 65535); // get Hadamaard transform SAD mvcost = MV_COST_S(lambda_motion, mot->x, mot->y, cmvx, cmvy); satd_min = dmin; dmin += mvcost; hmin = 0; /* find half-pel */ for (h = 1; h < 9; h++) { d = SATD_MB(hpel_cand[h], cur, dmin); mvcost = MV_COST_S(lambda_motion, mot->x + xh[h], mot->y + yh[h], cmvx, cmvy); d += mvcost; if (d < dmin) { dmin = d; hmin = h; satd_min = d - mvcost; } } mot->sad = dmin; mot->x += xh[hmin]; mot->y += yh[hmin]; encvid->best_hpel_pos = hmin; /*** search for quarter-pel ****/ GenerateQuartPelPred(encvid->bilin_base[hmin], &(encvid->qpel_cand[0][0]), hmin); encvid->best_qpel_pos = qmin = -1; for (q = 0; q < 8; q++) { d = SATD_MB(encvid->qpel_cand[q], cur, dmin); mvcost = MV_COST_S(lambda_motion, mot->x + xq[q], mot->y + yq[q], cmvx, cmvy); d += mvcost; if (d < dmin) { dmin = d; qmin = q; satd_min = d - mvcost; } } if (qmin != -1) { mot->sad = dmin; mot->x += xq[qmin]; mot->y += yq[qmin]; encvid->best_qpel_pos = qmin; } return satd_min; } /** This function generates sub-pel prediction around the full-pel candidate. Each sub-pel position array is 20 pixel wide (for word-alignment) and 17 pixel tall. */ /** The sub-pel position is labeled in spiral manner from the center. */ void GenerateHalfPelPred(uint8* subpel_pred, uint8 *ncand, int lx) { /* let's do straightforward way first */ uint8 *ref; uint8 *dst; uint8 tmp8; int32 tmp32; int16 tmp_horz[18*22], *dst_16, *src_16; register int a = 0, b = 0, c = 0, d = 0, e = 0, f = 0; // temp register int msk; int i, j; /* first copy full-pel to the first array */ /* to be optimized later based on byte-offset load */ ref = ncand - 3 - lx - (lx << 1); /* move back (-3,-3) */ dst = subpel_pred; dst -= 4; /* offset */ for (j = 0; j < 22; j++) /* 24x22 */ { i = 6; while (i > 0) { tmp32 = *ref++; tmp8 = *ref++; tmp32 |= (tmp8 << 8); tmp8 = *ref++; tmp32 |= (tmp8 << 16); tmp8 = *ref++; tmp32 |= (tmp8 << 24); *((uint32*)(dst += 4)) = tmp32; i--; } ref += (lx - 24); } /* from the first array, we do horizontal interp */ ref = subpel_pred + 2; dst_16 = tmp_horz; /* 17 x 22 */ for (j = 4; j > 0; j--) { for (i = 16; i > 0; i -= 4) { a = ref[-2]; b = ref[-1]; c = ref[0]; d = ref[1]; e = ref[2]; f = ref[3]; *dst_16++ = a + f - 5 * (b + e) + 20 * (c + d); a = ref[4]; *dst_16++ = b + a - 5 * (c + f) + 20 * (d + e); b = ref[5]; *dst_16++ = c + b - 5 * (d + a) + 20 * (e + f); c = ref[6]; *dst_16++ = d + c - 5 * (e + b) + 20 * (f + a); ref += 4; } /* do the 17th column here */ d = ref[3]; *dst_16 = e + d - 5 * (f + c) + 20 * (a + b); dst_16 += 2; /* stride for tmp_horz is 18 */ ref += 8; /* stride for ref is 24 */ if (j == 3) // move 18 lines down { dst_16 += 324;//18*18; ref += 432;//18*24; } } ref -= 480;//20*24; dst_16 -= 360;//20*18; dst = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE; /* go to the 14th array 17x18*/ for (j = 18; j > 0; j--) { for (i = 16; i > 0; i -= 4) { a = ref[-2]; b = ref[-1]; c = ref[0]; d = ref[1]; e = ref[2]; f = ref[3]; tmp32 = a + f - 5 * (b + e) + 20 * (c + d); *dst_16++ = tmp32; tmp32 = (tmp32 + 16) >> 5; CLIP_RESULT(tmp32) *dst++ = tmp32; a = ref[4]; tmp32 = b + a - 5 * (c + f) + 20 * (d + e); *dst_16++ = tmp32; tmp32 = (tmp32 + 16) >> 5; CLIP_RESULT(tmp32) *dst++ = tmp32; b = ref[5]; tmp32 = c + b - 5 * (d + a) + 20 * (e + f); *dst_16++ = tmp32; tmp32 = (tmp32 + 16) >> 5; CLIP_RESULT(tmp32) *dst++ = tmp32; c = ref[6]; tmp32 = d + c - 5 * (e + b) + 20 * (f + a); *dst_16++ = tmp32; tmp32 = (tmp32 + 16) >> 5; CLIP_RESULT(tmp32) *dst++ = tmp32; ref += 4; } /* do the 17th column here */ d = ref[3]; tmp32 = e + d - 5 * (f + c) + 20 * (a + b); *dst_16 = tmp32; tmp32 = (tmp32 + 16) >> 5; CLIP_RESULT(tmp32) *dst = tmp32; dst += 8; /* stride for dst is 24 */ dst_16 += 2; /* stride for tmp_horz is 18 */ ref += 8; /* stride for ref is 24 */ } /* Do middle point filtering*/ src_16 = tmp_horz; /* 17 x 22 */ dst = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE; /* 12th array 17x17*/ dst -= 24; // offset for (i = 0; i < 17; i++) { for (j = 16; j > 0; j -= 4) { a = *src_16; b = *(src_16 += 18); c = *(src_16 += 18); d = *(src_16 += 18); e = *(src_16 += 18); f = *(src_16 += 18); tmp32 = a + f - 5 * (b + e) + 20 * (c + d); tmp32 = (tmp32 + 512) >> 10; CLIP_RESULT(tmp32) *(dst += 24) = tmp32; a = *(src_16 += 18); tmp32 = b + a - 5 * (c + f) + 20 * (d + e); tmp32 = (tmp32 + 512) >> 10; CLIP_RESULT(tmp32) *(dst += 24) = tmp32; b = *(src_16 += 18); tmp32 = c + b - 5 * (d + a) + 20 * (e + f); tmp32 = (tmp32 + 512) >> 10; CLIP_RESULT(tmp32) *(dst += 24) = tmp32; c = *(src_16 += 18); tmp32 = d + c - 5 * (e + b) + 20 * (f + a); tmp32 = (tmp32 + 512) >> 10; CLIP_RESULT(tmp32) *(dst += 24) = tmp32; src_16 -= (18 << 2); } d = src_16[90]; // 18*5 tmp32 = e + d - 5 * (f + c) + 20 * (a + b); tmp32 = (tmp32 + 512) >> 10; CLIP_RESULT(tmp32) dst[24] = tmp32; src_16 -= ((18 << 4) - 1); dst -= ((24 << 4) - 1); } /* do vertical interpolation */ ref = subpel_pred + 2; dst = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE; /* 10th array 18x17 */ dst -= 24; // offset for (i = 2; i > 0; i--) { for (j = 16; j > 0; j -= 4) { a = *ref; b = *(ref += 24); c = *(ref += 24); d = *(ref += 24); e = *(ref += 24); f = *(ref += 24); tmp32 = a + f - 5 * (b + e) + 20 * (c + d); tmp32 = (tmp32 + 16) >> 5; CLIP_RESULT(tmp32) *(dst += 24) = tmp32; // 10th a = *(ref += 24); tmp32 = b + a - 5 * (c + f) + 20 * (d + e); tmp32 = (tmp32 + 16) >> 5; CLIP_RESULT(tmp32) *(dst += 24) = tmp32; // 10th b = *(ref += 24); tmp32 = c + b - 5 * (d + a) + 20 * (e + f); tmp32 = (tmp32 + 16) >> 5; CLIP_RESULT(tmp32) *(dst += 24) = tmp32; // 10th c = *(ref += 24); tmp32 = d + c - 5 * (e + b) + 20 * (f + a); tmp32 = (tmp32 + 16) >> 5; CLIP_RESULT(tmp32) *(dst += 24) = tmp32; // 10th ref -= (24 << 2); } d = ref[120]; // 24*5 tmp32 = e + d - 5 * (f + c) + 20 * (a + b); tmp32 = (tmp32 + 16) >> 5; CLIP_RESULT(tmp32) dst[24] = tmp32; // 10th dst -= ((24 << 4) - 1); ref -= ((24 << 4) - 1); } // note that using SIMD here doesn't help much, the cycle almost stays the same // one can just use the above code and change the for(i=2 to for(i=18 for (i = 16; i > 0; i -= 4) { msk = 0; for (j = 17; j > 0; j--) { a = *((uint32*)ref); /* load 4 bytes */ b = (a >> 8) & 0xFF00FF; /* second and fourth byte */ a &= 0xFF00FF; c = *((uint32*)(ref + 120)); d = (c >> 8) & 0xFF00FF; c &= 0xFF00FF; a += c; b += d; e = *((uint32*)(ref + 72)); /* e, f */ f = (e >> 8) & 0xFF00FF; e &= 0xFF00FF; c = *((uint32*)(ref + 48)); /* c, d */ d = (c >> 8) & 0xFF00FF; c &= 0xFF00FF; c += e; d += f; a += 20 * c; b += 20 * d; a += 0x100010; b += 0x100010; e = *((uint32*)(ref += 24)); /* e, f */ f = (e >> 8) & 0xFF00FF; e &= 0xFF00FF; c = *((uint32*)(ref + 72)); /* c, d */ d = (c >> 8) & 0xFF00FF; c &= 0xFF00FF; c += e; d += f; a -= 5 * c; b -= 5 * d; c = a << 16; d = b << 16; CLIP_UPPER16(a) CLIP_UPPER16(c) CLIP_UPPER16(b) CLIP_UPPER16(d) a |= (c >> 16); b |= (d >> 16); // a>>=5; // b>>=5; /* clip */ // msk |= b; msk|=a; // a &= 0xFF00FF; // b &= 0xFF00FF; a |= (b << 8); /* pack it back */ *((uint16*)(dst += 24)) = a & 0xFFFF; //dst is not word-aligned. *((uint16*)(dst + 2)) = a >> 16; } dst -= 404; // 24*17-4 ref -= 404; /* if(msk & 0xFF00FF00) // need clipping { VertInterpWClip(dst,ref); // re-do 4 column with clip }*/ } return ; } void VertInterpWClip(uint8 *dst, uint8 *ref) { int i, j; int a, b, c, d, e, f; int32 tmp32; dst -= 4; ref -= 4; for (i = 4; i > 0; i--) { for (j = 16; j > 0; j -= 4) { a = *ref; b = *(ref += 24); c = *(ref += 24); d = *(ref += 24); e = *(ref += 24); f = *(ref += 24); tmp32 = a + f - 5 * (b + e) + 20 * (c + d); tmp32 = (tmp32 + 16) >> 5; CLIP_RESULT(tmp32) *(dst += 24) = tmp32; // 10th a = *(ref += 24); tmp32 = b + a - 5 * (c + f) + 20 * (d + e); tmp32 = (tmp32 + 16) >> 5; CLIP_RESULT(tmp32) *(dst += 24) = tmp32; // 10th b = *(ref += 24); tmp32 = c + b - 5 * (d + a) + 20 * (e + f); tmp32 = (tmp32 + 16) >> 5; CLIP_RESULT(tmp32) *(dst += 24) = tmp32; // 10th c = *(ref += 24); tmp32 = d + c - 5 * (e + b) + 20 * (f + a); tmp32 = (tmp32 + 16) >> 5; CLIP_RESULT(tmp32) *(dst += 24) = tmp32; // 10th ref -= (24 << 2); } d = ref[120]; // 24*5 tmp32 = e + d - 5 * (f + c) + 20 * (a + b); tmp32 = (tmp32 + 16) >> 5; CLIP_RESULT(tmp32) dst[24] = tmp32; // 10th dst -= ((24 << 4) - 1); ref -= ((24 << 4) - 1); } return ; } void GenerateQuartPelPred(uint8 **bilin_base, uint8 *qpel_cand, int hpel_pos) { // for even value of hpel_pos, start with pattern 1, otherwise, start with pattern 2 int i, j; uint8 *c1 = qpel_cand; uint8 *tl = bilin_base[0]; uint8 *tr = bilin_base[1]; uint8 *bl = bilin_base[2]; uint8 *br = bilin_base[3]; int a, b, c, d; int offset = 1 - (384 * 7); if (!(hpel_pos&1)) // diamond pattern { j = 16; while (j--) { i = 16; while (i--) { d = tr[24]; a = *tr++; b = bl[1]; c = *br++; *c1 = (c + a + 1) >> 1; *(c1 += 384) = (b + a + 1) >> 1; /* c2 */ *(c1 += 384) = (b + c + 1) >> 1; /* c3 */ *(c1 += 384) = (b + d + 1) >> 1; /* c4 */ b = *bl++; *(c1 += 384) = (c + d + 1) >> 1; /* c5 */ *(c1 += 384) = (b + d + 1) >> 1; /* c6 */ *(c1 += 384) = (b + c + 1) >> 1; /* c7 */ *(c1 += 384) = (b + a + 1) >> 1; /* c8 */ c1 += offset; } // advance to the next line, pitch is 24 tl += 8; tr += 8; bl += 8; br += 8; c1 += 8; } } else // star pattern { j = 16; while (j--) { i = 16; while (i--) { a = *br++; b = *tr++; c = tl[1]; *c1 = (a + b + 1) >> 1; b = bl[1]; *(c1 += 384) = (a + c + 1) >> 1; /* c2 */ c = tl[25]; *(c1 += 384) = (a + b + 1) >> 1; /* c3 */ b = tr[23]; *(c1 += 384) = (a + c + 1) >> 1; /* c4 */ c = tl[24]; *(c1 += 384) = (a + b + 1) >> 1; /* c5 */ b = *bl++; *(c1 += 384) = (a + c + 1) >> 1; /* c6 */ c = *tl++; *(c1 += 384) = (a + b + 1) >> 1; /* c7 */ *(c1 += 384) = (a + c + 1) >> 1; /* c8 */ c1 += offset; } // advance to the next line, pitch is 24 tl += 8; tr += 8; bl += 8; br += 8; c1 += 8; } } return ; } /* assuming cand always has a pitch of 24 */ int SATD_MB(uint8 *cand, uint8 *cur, int dmin) { int cost; dmin = (dmin << 16) | 24; cost = AVCSAD_Macroblock_C(cand, cur, dmin, NULL); return cost; } ================================================ FILE: RtspCamera/jni/avc_h264/enc/src/header.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "avcenc_lib.h" #include "avcenc_api.h" /** see subclause 7.4.2.1 */ /* no need for checking the valid range , already done in SetEncodeParam(), if we have to send another SPS, the ranges should be verified first before users call PVAVCEncodeSPS() */ AVCEnc_Status EncodeSPS(AVCEncObject *encvid, AVCEncBitstream *stream) { AVCCommonObj *video = encvid->common; AVCSeqParamSet *seqParam = video->currSeqParams; AVCVUIParams *vui = &(seqParam->vui_parameters); int i; AVCEnc_Status status = AVCENC_SUCCESS; //DEBUG_LOG(userData,AVC_LOGTYPE_INFO,"EncodeSPS",-1,-1); status = BitstreamWriteBits(stream, 8, seqParam->profile_idc); status = BitstreamWrite1Bit(stream, seqParam->constrained_set0_flag); status = BitstreamWrite1Bit(stream, seqParam->constrained_set1_flag); status = BitstreamWrite1Bit(stream, seqParam->constrained_set2_flag); status = BitstreamWrite1Bit(stream, seqParam->constrained_set3_flag); status = BitstreamWriteBits(stream, 4, 0); /* forbidden zero bits */ if (status != AVCENC_SUCCESS) /* we can check after each write also */ { return status; } status = BitstreamWriteBits(stream, 8, seqParam->level_idc); status = ue_v(stream, seqParam->seq_parameter_set_id); status = ue_v(stream, seqParam->log2_max_frame_num_minus4); status = ue_v(stream, seqParam->pic_order_cnt_type); if (status != AVCENC_SUCCESS) { return status; } if (seqParam->pic_order_cnt_type == 0) { status = ue_v(stream, seqParam->log2_max_pic_order_cnt_lsb_minus4); } else if (seqParam->pic_order_cnt_type == 1) { status = BitstreamWrite1Bit(stream, seqParam->delta_pic_order_always_zero_flag); status = se_v(stream, seqParam->offset_for_non_ref_pic); /* upto 32 bits */ status = se_v(stream, seqParam->offset_for_top_to_bottom_field); /* upto 32 bits */ status = ue_v(stream, seqParam->num_ref_frames_in_pic_order_cnt_cycle); for (i = 0; i < (int)(seqParam->num_ref_frames_in_pic_order_cnt_cycle); i++) { status = se_v(stream, seqParam->offset_for_ref_frame[i]); /* upto 32 bits */ } } if (status != AVCENC_SUCCESS) { return status; } status = ue_v(stream, seqParam->num_ref_frames); status = BitstreamWrite1Bit(stream, seqParam->gaps_in_frame_num_value_allowed_flag); status = ue_v(stream, seqParam->pic_width_in_mbs_minus1); status = ue_v(stream, seqParam->pic_height_in_map_units_minus1); status = BitstreamWrite1Bit(stream, seqParam->frame_mbs_only_flag); if (status != AVCENC_SUCCESS) { return status; } /* if frame_mbs_only_flag is 0, then write, mb_adaptive_frame_field_frame here */ status = BitstreamWrite1Bit(stream, seqParam->direct_8x8_inference_flag); status = BitstreamWrite1Bit(stream, seqParam->frame_cropping_flag); if (seqParam->frame_cropping_flag) { status = ue_v(stream, seqParam->frame_crop_left_offset); status = ue_v(stream, seqParam->frame_crop_right_offset); status = ue_v(stream, seqParam->frame_crop_top_offset); status = ue_v(stream, seqParam->frame_crop_bottom_offset); } if (status != AVCENC_SUCCESS) { return status; } status = BitstreamWrite1Bit(stream, seqParam->vui_parameters_present_flag); if (seqParam->vui_parameters_present_flag) { /* not supported */ //return AVCENC_SPS_FAIL; EncodeVUI(stream, vui); } return status; } void EncodeVUI(AVCEncBitstream* stream, AVCVUIParams* vui) { int temp; temp = vui->aspect_ratio_info_present_flag; BitstreamWrite1Bit(stream, temp); if (temp) { BitstreamWriteBits(stream, 8, vui->aspect_ratio_idc); if (vui->aspect_ratio_idc == 255) { BitstreamWriteBits(stream, 16, vui->sar_width); BitstreamWriteBits(stream, 16, vui->sar_height); } } temp = vui->overscan_info_present_flag; BitstreamWrite1Bit(stream, temp); if (temp) { BitstreamWrite1Bit(stream, vui->overscan_appropriate_flag); } temp = vui->video_signal_type_present_flag; BitstreamWrite1Bit(stream, temp); if (temp) { BitstreamWriteBits(stream, 3, vui->video_format); BitstreamWrite1Bit(stream, vui->video_full_range_flag); temp = vui->colour_description_present_flag; BitstreamWrite1Bit(stream, temp); if (temp) { BitstreamWriteBits(stream, 8, vui->colour_primaries); BitstreamWriteBits(stream, 8, vui->transfer_characteristics); BitstreamWriteBits(stream, 8, vui->matrix_coefficients); } } temp = vui->chroma_location_info_present_flag; BitstreamWrite1Bit(stream, temp); if (temp) { ue_v(stream, vui->chroma_sample_loc_type_top_field); ue_v(stream, vui->chroma_sample_loc_type_bottom_field); } temp = vui->timing_info_present_flag; BitstreamWrite1Bit(stream, temp); if (temp) { BitstreamWriteBits(stream, 32, vui->num_units_in_tick); BitstreamWriteBits(stream, 32, vui->time_scale); BitstreamWrite1Bit(stream, vui->fixed_frame_rate_flag); } temp = vui->nal_hrd_parameters_present_flag; BitstreamWrite1Bit(stream, temp); if (temp) { EncodeHRD(stream, &(vui->nal_hrd_parameters)); } temp = vui->vcl_hrd_parameters_present_flag; BitstreamWrite1Bit(stream, temp); if (temp) { EncodeHRD(stream, &(vui->vcl_hrd_parameters)); } if (vui->nal_hrd_parameters_present_flag || vui->vcl_hrd_parameters_present_flag) { BitstreamWrite1Bit(stream, vui->low_delay_hrd_flag); } BitstreamWrite1Bit(stream, vui->pic_struct_present_flag); temp = vui->bitstream_restriction_flag; BitstreamWrite1Bit(stream, temp); if (temp) { BitstreamWrite1Bit(stream, vui->motion_vectors_over_pic_boundaries_flag); ue_v(stream, vui->max_bytes_per_pic_denom); ue_v(stream, vui->max_bits_per_mb_denom); ue_v(stream, vui->log2_max_mv_length_horizontal); ue_v(stream, vui->log2_max_mv_length_vertical); ue_v(stream, vui->max_dec_frame_reordering); ue_v(stream, vui->max_dec_frame_buffering); } return ; } void EncodeHRD(AVCEncBitstream* stream, AVCHRDParams* hrd) { int i; ue_v(stream, hrd->cpb_cnt_minus1); BitstreamWriteBits(stream, 4, hrd->bit_rate_scale); BitstreamWriteBits(stream, 4, hrd->cpb_size_scale); for (i = 0; i <= (int)hrd->cpb_cnt_minus1; i++) { ue_v(stream, hrd->bit_rate_value_minus1[i]); ue_v(stream, hrd->cpb_size_value_minus1[i]); ue_v(stream, hrd->cbr_flag[i]); } BitstreamWriteBits(stream, 5, hrd->initial_cpb_removal_delay_length_minus1); BitstreamWriteBits(stream, 5, hrd->cpb_removal_delay_length_minus1); BitstreamWriteBits(stream, 5, hrd->dpb_output_delay_length_minus1); BitstreamWriteBits(stream, 5, hrd->time_offset_length); return ; } /** see subclause 7.4.2.2 */ /* no need for checking the valid range , already done in SetEncodeParam(). If we have to send another SPS, the ranges should be verified first before users call PVAVCEncodeSPS()*/ AVCEnc_Status EncodePPS(AVCEncObject *encvid, AVCEncBitstream *stream) { AVCCommonObj *video = encvid->common; AVCEnc_Status status = AVCENC_SUCCESS; AVCPicParamSet *picParam = video->currPicParams; int i, iGroup, numBits; uint temp; status = ue_v(stream, picParam->pic_parameter_set_id); status = ue_v(stream, picParam->seq_parameter_set_id); status = BitstreamWrite1Bit(stream, picParam->entropy_coding_mode_flag); status = BitstreamWrite1Bit(stream, picParam->pic_order_present_flag); if (status != AVCENC_SUCCESS) { return status; } status = ue_v(stream, picParam->num_slice_groups_minus1); if (picParam->num_slice_groups_minus1 > 0) { status = ue_v(stream, picParam->slice_group_map_type); if (picParam->slice_group_map_type == 0) { for (iGroup = 0; iGroup <= (int)picParam->num_slice_groups_minus1; iGroup++) { status = ue_v(stream, picParam->run_length_minus1[iGroup]); } } else if (picParam->slice_group_map_type == 2) { for (iGroup = 0; iGroup < (int)picParam->num_slice_groups_minus1; iGroup++) { status = ue_v(stream, picParam->top_left[iGroup]); status = ue_v(stream, picParam->bottom_right[iGroup]); } } else if (picParam->slice_group_map_type == 3 || picParam->slice_group_map_type == 4 || picParam->slice_group_map_type == 5) { status = BitstreamWrite1Bit(stream, picParam->slice_group_change_direction_flag); status = ue_v(stream, picParam->slice_group_change_rate_minus1); } else /*if(picParam->slice_group_map_type == 6)*/ { status = ue_v(stream, picParam->pic_size_in_map_units_minus1); numBits = 0;/* ceil(log2(num_slice_groups_minus1+1)) bits */ i = picParam->num_slice_groups_minus1; while (i > 0) { numBits++; i >>= 1; } for (i = 0; i <= (int)picParam->pic_size_in_map_units_minus1; i++) { status = BitstreamWriteBits(stream, numBits, picParam->slice_group_id[i]); } } } if (status != AVCENC_SUCCESS) { return status; } status = ue_v(stream, picParam->num_ref_idx_l0_active_minus1); status = ue_v(stream, picParam->num_ref_idx_l1_active_minus1); status = BitstreamWrite1Bit(stream, picParam->weighted_pred_flag); status = BitstreamWriteBits(stream, 2, picParam->weighted_bipred_idc); if (status != AVCENC_SUCCESS) { return status; } status = se_v(stream, picParam->pic_init_qp_minus26); status = se_v(stream, picParam->pic_init_qs_minus26); status = se_v(stream, picParam->chroma_qp_index_offset); temp = picParam->deblocking_filter_control_present_flag << 2; temp |= (picParam->constrained_intra_pred_flag << 1); temp |= picParam->redundant_pic_cnt_present_flag; status = BitstreamWriteBits(stream, 3, temp); return status; } /** see subclause 7.4.3 */ AVCEnc_Status EncodeSliceHeader(AVCEncObject *encvid, AVCEncBitstream *stream) { AVCCommonObj *video = encvid->common; AVCSliceHeader *sliceHdr = video->sliceHdr; AVCPicParamSet *currPPS = video->currPicParams; AVCSeqParamSet *currSPS = video->currSeqParams; AVCEnc_Status status = AVCENC_SUCCESS; int slice_type, temp, i; int num_bits; num_bits = (stream->write_pos << 3) - stream->bit_left; status = ue_v(stream, sliceHdr->first_mb_in_slice); slice_type = video->slice_type; if (video->mbNum == 0) /* first mb in frame */ { status = ue_v(stream, sliceHdr->slice_type); } else { status = ue_v(stream, slice_type); } status = ue_v(stream, sliceHdr->pic_parameter_set_id); status = BitstreamWriteBits(stream, currSPS->log2_max_frame_num_minus4 + 4, sliceHdr->frame_num); if (status != AVCENC_SUCCESS) { return status; } /* if frame_mbs_only_flag is 0, encode field_pic_flag, bottom_field_flag here */ if (video->nal_unit_type == AVC_NALTYPE_IDR) { status = ue_v(stream, sliceHdr->idr_pic_id); } if (currSPS->pic_order_cnt_type == 0) { status = BitstreamWriteBits(stream, currSPS->log2_max_pic_order_cnt_lsb_minus4 + 4, sliceHdr->pic_order_cnt_lsb); if (currPPS->pic_order_present_flag && !sliceHdr->field_pic_flag) { status = se_v(stream, sliceHdr->delta_pic_order_cnt_bottom); /* 32 bits */ } } if (currSPS->pic_order_cnt_type == 1 && !currSPS->delta_pic_order_always_zero_flag) { status = se_v(stream, sliceHdr->delta_pic_order_cnt[0]); /* 32 bits */ if (currPPS->pic_order_present_flag && !sliceHdr->field_pic_flag) { status = se_v(stream, sliceHdr->delta_pic_order_cnt[1]); /* 32 bits */ } } if (currPPS->redundant_pic_cnt_present_flag) { status = ue_v(stream, sliceHdr->redundant_pic_cnt); } if (slice_type == AVC_B_SLICE) { status = BitstreamWrite1Bit(stream, sliceHdr->direct_spatial_mv_pred_flag); } if (status != AVCENC_SUCCESS) { return status; } if (slice_type == AVC_P_SLICE || slice_type == AVC_SP_SLICE || slice_type == AVC_B_SLICE) { status = BitstreamWrite1Bit(stream, sliceHdr->num_ref_idx_active_override_flag); if (sliceHdr->num_ref_idx_active_override_flag) { /* we shouldn't enter this part at all */ status = ue_v(stream, sliceHdr->num_ref_idx_l0_active_minus1); if (slice_type == AVC_B_SLICE) { status = ue_v(stream, sliceHdr->num_ref_idx_l1_active_minus1); } } } if (status != AVCENC_SUCCESS) { return status; } /* ref_pic_list_reordering() */ status = ref_pic_list_reordering(video, stream, sliceHdr, slice_type); if (status != AVCENC_SUCCESS) { return status; } if ((currPPS->weighted_pred_flag && (slice_type == AVC_P_SLICE || slice_type == AVC_SP_SLICE)) || (currPPS->weighted_bipred_idc == 1 && slice_type == AVC_B_SLICE)) { // pred_weight_table(); // not supported !! return AVCENC_PRED_WEIGHT_TAB_FAIL; } if (video->nal_ref_idc != 0) { status = dec_ref_pic_marking(video, stream, sliceHdr); if (status != AVCENC_SUCCESS) { return status; } } if (currPPS->entropy_coding_mode_flag && slice_type != AVC_I_SLICE && slice_type != AVC_SI_SLICE) { return AVCENC_CABAC_FAIL; /* ue_v(stream,&(sliceHdr->cabac_init_idc)); if(sliceHdr->cabac_init_idc > 2){ // not supported !!!! }*/ } status = se_v(stream, sliceHdr->slice_qp_delta); if (status != AVCENC_SUCCESS) { return status; } if (slice_type == AVC_SP_SLICE || slice_type == AVC_SI_SLICE) { if (slice_type == AVC_SP_SLICE) { status = BitstreamWrite1Bit(stream, sliceHdr->sp_for_switch_flag); /* if sp_for_switch_flag is 0, P macroblocks in SP slice is decoded using SP decoding process for non-switching pictures in 8.6.1 */ /* else, P macroblocks in SP slice is decoded using SP and SI decoding process for switching picture in 8.6.2 */ } status = se_v(stream, sliceHdr->slice_qs_delta); if (status != AVCENC_SUCCESS) { return status; } } if (currPPS->deblocking_filter_control_present_flag) { status = ue_v(stream, sliceHdr->disable_deblocking_filter_idc); if (sliceHdr->disable_deblocking_filter_idc != 1) { status = se_v(stream, sliceHdr->slice_alpha_c0_offset_div2); status = se_v(stream, sliceHdr->slice_beta_offset_div_2); } if (status != AVCENC_SUCCESS) { return status; } } if (currPPS->num_slice_groups_minus1 > 0 && currPPS->slice_group_map_type >= 3 && currPPS->slice_group_map_type <= 5) { /* Ceil(Log2(PicSizeInMapUnits/(float)SliceGroupChangeRate + 1)) */ temp = video->PicSizeInMapUnits / video->SliceGroupChangeRate; if (video->PicSizeInMapUnits % video->SliceGroupChangeRate) { temp++; } i = 0; while (temp > 1) { temp >>= 1; i++; } BitstreamWriteBits(stream, i, sliceHdr->slice_group_change_cycle); } encvid->rateCtrl->NumberofHeaderBits += (stream->write_pos << 3) - stream->bit_left - num_bits; return AVCENC_SUCCESS; } /** see subclause 7.4.3.1 */ AVCEnc_Status ref_pic_list_reordering(AVCCommonObj *video, AVCEncBitstream *stream, AVCSliceHeader *sliceHdr, int slice_type) { (void)(video); int i; AVCEnc_Status status = AVCENC_SUCCESS; if (slice_type != AVC_I_SLICE && slice_type != AVC_SI_SLICE) { status = BitstreamWrite1Bit(stream, sliceHdr->ref_pic_list_reordering_flag_l0); if (sliceHdr->ref_pic_list_reordering_flag_l0) { i = 0; do { status = ue_v(stream, sliceHdr->reordering_of_pic_nums_idc_l0[i]); if (sliceHdr->reordering_of_pic_nums_idc_l0[i] == 0 || sliceHdr->reordering_of_pic_nums_idc_l0[i] == 1) { status = ue_v(stream, sliceHdr->abs_diff_pic_num_minus1_l0[i]); /* this check should be in InitSlice(), if we ever use it */ /*if(sliceHdr->reordering_of_pic_nums_idc_l0[i] == 0 && sliceHdr->abs_diff_pic_num_minus1_l0[i] > video->MaxPicNum/2 -1) { return AVCENC_REF_PIC_REORDER_FAIL; // out of range } if(sliceHdr->reordering_of_pic_nums_idc_l0[i] == 1 && sliceHdr->abs_diff_pic_num_minus1_l0[i] > video->MaxPicNum/2 -2) { return AVCENC_REF_PIC_REORDER_FAIL; // out of range }*/ } else if (sliceHdr->reordering_of_pic_nums_idc_l0[i] == 2) { status = ue_v(stream, sliceHdr->long_term_pic_num_l0[i]); } i++; } while (sliceHdr->reordering_of_pic_nums_idc_l0[i] != 3 && i <= (int)sliceHdr->num_ref_idx_l0_active_minus1 + 1) ; } } if (slice_type == AVC_B_SLICE) { status = BitstreamWrite1Bit(stream, sliceHdr->ref_pic_list_reordering_flag_l1); if (sliceHdr->ref_pic_list_reordering_flag_l1) { i = 0; do { status = ue_v(stream, sliceHdr->reordering_of_pic_nums_idc_l1[i]); if (sliceHdr->reordering_of_pic_nums_idc_l1[i] == 0 || sliceHdr->reordering_of_pic_nums_idc_l1[i] == 1) { status = ue_v(stream, sliceHdr->abs_diff_pic_num_minus1_l1[i]); /* This check should be in InitSlice() if we ever use it if(sliceHdr->reordering_of_pic_nums_idc_l1[i] == 0 && sliceHdr->abs_diff_pic_num_minus1_l1[i] > video->MaxPicNum/2 -1) { return AVCENC_REF_PIC_REORDER_FAIL; // out of range } if(sliceHdr->reordering_of_pic_nums_idc_l1[i] == 1 && sliceHdr->abs_diff_pic_num_minus1_l1[i] > video->MaxPicNum/2 -2) { return AVCENC_REF_PIC_REORDER_FAIL; // out of range }*/ } else if (sliceHdr->reordering_of_pic_nums_idc_l1[i] == 2) { status = ue_v(stream, sliceHdr->long_term_pic_num_l1[i]); } i++; } while (sliceHdr->reordering_of_pic_nums_idc_l1[i] != 3 && i <= (int)sliceHdr->num_ref_idx_l1_active_minus1 + 1) ; } } return status; } /** see subclause 7.4.3.3 */ AVCEnc_Status dec_ref_pic_marking(AVCCommonObj *video, AVCEncBitstream *stream, AVCSliceHeader *sliceHdr) { int i; AVCEnc_Status status = AVCENC_SUCCESS; if (video->nal_unit_type == AVC_NALTYPE_IDR) { status = BitstreamWrite1Bit(stream, sliceHdr->no_output_of_prior_pics_flag); status = BitstreamWrite1Bit(stream, sliceHdr->long_term_reference_flag); if (sliceHdr->long_term_reference_flag == 0) /* used for short-term */ { video->MaxLongTermFrameIdx = -1; /* no long-term frame indx */ } else /* used for long-term */ { video->MaxLongTermFrameIdx = 0; video->LongTermFrameIdx = 0; } } else { status = BitstreamWrite1Bit(stream, sliceHdr->adaptive_ref_pic_marking_mode_flag); /* default to zero */ if (sliceHdr->adaptive_ref_pic_marking_mode_flag) { i = 0; do { status = ue_v(stream, sliceHdr->memory_management_control_operation[i]); if (sliceHdr->memory_management_control_operation[i] == 1 || sliceHdr->memory_management_control_operation[i] == 3) { status = ue_v(stream, sliceHdr->difference_of_pic_nums_minus1[i]); } if (sliceHdr->memory_management_control_operation[i] == 2) { status = ue_v(stream, sliceHdr->long_term_pic_num[i]); } if (sliceHdr->memory_management_control_operation[i] == 3 || sliceHdr->memory_management_control_operation[i] == 6) { status = ue_v(stream, sliceHdr->long_term_frame_idx[i]); } if (sliceHdr->memory_management_control_operation[i] == 4) { status = ue_v(stream, sliceHdr->max_long_term_frame_idx_plus1[i]); } i++; } while (sliceHdr->memory_management_control_operation[i] != 0 && i < MAX_DEC_REF_PIC_MARKING); if (i >= MAX_DEC_REF_PIC_MARKING && sliceHdr->memory_management_control_operation[i] != 0) { return AVCENC_DEC_REF_PIC_MARK_FAIL; /* we're screwed!!, not enough memory */ } } } return status; } /* see subclause 8.2.1 Decoding process for picture order count. See also PostPOC() for initialization of some variables. */ AVCEnc_Status InitPOC(AVCEncObject *encvid) { AVCCommonObj *video = encvid->common; AVCSeqParamSet *currSPS = video->currSeqParams; AVCSliceHeader *sliceHdr = video->sliceHdr; AVCFrameIO *currInput = encvid->currInput; int i; switch (currSPS->pic_order_cnt_type) { case 0: /* POC MODE 0 , subclause 8.2.1.1 */ /* encoding part */ if (video->nal_unit_type == AVC_NALTYPE_IDR) { encvid->dispOrdPOCRef = currInput->disp_order; } while (currInput->disp_order < encvid->dispOrdPOCRef) { encvid->dispOrdPOCRef -= video->MaxPicOrderCntLsb; } sliceHdr->pic_order_cnt_lsb = currInput->disp_order - encvid->dispOrdPOCRef; while (sliceHdr->pic_order_cnt_lsb >= video->MaxPicOrderCntLsb) { sliceHdr->pic_order_cnt_lsb -= video->MaxPicOrderCntLsb; } /* decoding part */ /* Calculate the MSBs of current picture */ if (video->nal_unit_type == AVC_NALTYPE_IDR) { video->prevPicOrderCntMsb = 0; video->prevPicOrderCntLsb = 0; } if (sliceHdr->pic_order_cnt_lsb < video->prevPicOrderCntLsb && (video->prevPicOrderCntLsb - sliceHdr->pic_order_cnt_lsb) >= (video->MaxPicOrderCntLsb / 2)) video->PicOrderCntMsb = video->prevPicOrderCntMsb + video->MaxPicOrderCntLsb; else if (sliceHdr->pic_order_cnt_lsb > video->prevPicOrderCntLsb && (sliceHdr->pic_order_cnt_lsb - video->prevPicOrderCntLsb) > (video->MaxPicOrderCntLsb / 2)) video->PicOrderCntMsb = video->prevPicOrderCntMsb - video->MaxPicOrderCntLsb; else video->PicOrderCntMsb = video->prevPicOrderCntMsb; /* JVT-I010 page 81 is different from JM7.3 */ if (!sliceHdr->field_pic_flag || !sliceHdr->bottom_field_flag) { video->PicOrderCnt = video->TopFieldOrderCnt = video->PicOrderCntMsb + sliceHdr->pic_order_cnt_lsb; } if (!sliceHdr->field_pic_flag) { video->BottomFieldOrderCnt = video->TopFieldOrderCnt + sliceHdr->delta_pic_order_cnt_bottom; } else if (sliceHdr->bottom_field_flag) { video->PicOrderCnt = video->BottomFieldOrderCnt = video->PicOrderCntMsb + sliceHdr->pic_order_cnt_lsb; } if (!sliceHdr->field_pic_flag) { video->PicOrderCnt = AVC_MIN(video->TopFieldOrderCnt, video->BottomFieldOrderCnt); } if (video->currPicParams->pic_order_present_flag && !sliceHdr->field_pic_flag) { sliceHdr->delta_pic_order_cnt_bottom = 0; /* defaulted to zero */ } break; case 1: /* POC MODE 1, subclause 8.2.1.2 */ /* calculate FrameNumOffset */ if (video->nal_unit_type == AVC_NALTYPE_IDR) { encvid->dispOrdPOCRef = currInput->disp_order; /* reset the reference point */ video->prevFrameNumOffset = 0; video->FrameNumOffset = 0; } else if (video->prevFrameNum > sliceHdr->frame_num) { video->FrameNumOffset = video->prevFrameNumOffset + video->MaxFrameNum; } else { video->FrameNumOffset = video->prevFrameNumOffset; } /* calculate absFrameNum */ if (currSPS->num_ref_frames_in_pic_order_cnt_cycle) { video->absFrameNum = video->FrameNumOffset + sliceHdr->frame_num; } else { video->absFrameNum = 0; } if (video->absFrameNum > 0 && video->nal_ref_idc == 0) { video->absFrameNum--; } /* derive picOrderCntCycleCnt and frameNumInPicOrderCntCycle */ if (video->absFrameNum > 0) { video->picOrderCntCycleCnt = (video->absFrameNum - 1) / currSPS->num_ref_frames_in_pic_order_cnt_cycle; video->frameNumInPicOrderCntCycle = (video->absFrameNum - 1) % currSPS->num_ref_frames_in_pic_order_cnt_cycle; } /* derive expectedDeltaPerPicOrderCntCycle, this value can be computed up front. */ video->expectedDeltaPerPicOrderCntCycle = 0; for (i = 0; i < (int)currSPS->num_ref_frames_in_pic_order_cnt_cycle; i++) { video->expectedDeltaPerPicOrderCntCycle += currSPS->offset_for_ref_frame[i]; } /* derive expectedPicOrderCnt */ if (video->absFrameNum) { video->expectedPicOrderCnt = video->picOrderCntCycleCnt * video->expectedDeltaPerPicOrderCntCycle; for (i = 0; i <= video->frameNumInPicOrderCntCycle; i++) { video->expectedPicOrderCnt += currSPS->offset_for_ref_frame[i]; } } else { video->expectedPicOrderCnt = 0; } if (video->nal_ref_idc == 0) { video->expectedPicOrderCnt += currSPS->offset_for_non_ref_pic; } /* derive TopFieldOrderCnt and BottomFieldOrderCnt */ /* encoding part */ if (!currSPS->delta_pic_order_always_zero_flag) { sliceHdr->delta_pic_order_cnt[0] = currInput->disp_order - encvid->dispOrdPOCRef - video->expectedPicOrderCnt; if (video->currPicParams->pic_order_present_flag && !sliceHdr->field_pic_flag) { sliceHdr->delta_pic_order_cnt[1] = sliceHdr->delta_pic_order_cnt[0]; /* should be calculated from currInput->bottom_field->disp_order */ } else { sliceHdr->delta_pic_order_cnt[1] = 0; } } else { sliceHdr->delta_pic_order_cnt[0] = sliceHdr->delta_pic_order_cnt[1] = 0; } if (sliceHdr->field_pic_flag == 0) { video->TopFieldOrderCnt = video->expectedPicOrderCnt + sliceHdr->delta_pic_order_cnt[0]; video->BottomFieldOrderCnt = video->TopFieldOrderCnt + currSPS->offset_for_top_to_bottom_field + sliceHdr->delta_pic_order_cnt[1]; video->PicOrderCnt = AVC_MIN(video->TopFieldOrderCnt, video->BottomFieldOrderCnt); } else if (sliceHdr->bottom_field_flag == 0) { video->TopFieldOrderCnt = video->expectedPicOrderCnt + sliceHdr->delta_pic_order_cnt[0]; video->PicOrderCnt = video->TopFieldOrderCnt; } else { video->BottomFieldOrderCnt = video->expectedPicOrderCnt + currSPS->offset_for_top_to_bottom_field + sliceHdr->delta_pic_order_cnt[0]; video->PicOrderCnt = video->BottomFieldOrderCnt; } break; case 2: /* POC MODE 2, subclause 8.2.1.3 */ /* decoding order must be the same as display order */ /* we don't check for that. The decoder will just output in decoding order. */ /* Check for 2 consecutive non-reference frame */ if (video->nal_ref_idc == 0) { if (encvid->dispOrdPOCRef == 1) { return AVCENC_CONSECUTIVE_NONREF; } encvid->dispOrdPOCRef = 1; /* act as a flag for non ref */ } else { encvid->dispOrdPOCRef = 0; } if (video->nal_unit_type == AVC_NALTYPE_IDR) { video->FrameNumOffset = 0; } else if (video->prevFrameNum > sliceHdr->frame_num) { video->FrameNumOffset = video->prevFrameNumOffset + video->MaxFrameNum; } else { video->FrameNumOffset = video->prevFrameNumOffset; } /* derive tempPicOrderCnt, we just use PicOrderCnt */ if (video->nal_unit_type == AVC_NALTYPE_IDR) { video->PicOrderCnt = 0; } else if (video->nal_ref_idc == 0) { video->PicOrderCnt = 2 * (video->FrameNumOffset + sliceHdr->frame_num) - 1; } else { video->PicOrderCnt = 2 * (video->FrameNumOffset + sliceHdr->frame_num); } /* derive TopFieldOrderCnt and BottomFieldOrderCnt */ if (sliceHdr->field_pic_flag == 0) { video->TopFieldOrderCnt = video->BottomFieldOrderCnt = video->PicOrderCnt; } else if (sliceHdr->bottom_field_flag) { video->BottomFieldOrderCnt = video->PicOrderCnt; } else { video->TopFieldOrderCnt = video->PicOrderCnt; } break; default: return AVCENC_POC_FAIL; } return AVCENC_SUCCESS; } /** see subclause 8.2.1 */ AVCEnc_Status PostPOC(AVCCommonObj *video) { AVCSliceHeader *sliceHdr = video->sliceHdr; AVCSeqParamSet *currSPS = video->currSeqParams; video->prevFrameNum = sliceHdr->frame_num; switch (currSPS->pic_order_cnt_type) { case 0: /* subclause 8.2.1.1 */ if (video->mem_mgr_ctrl_eq_5) { video->prevPicOrderCntMsb = 0; video->prevPicOrderCntLsb = video->TopFieldOrderCnt; } else { video->prevPicOrderCntMsb = video->PicOrderCntMsb; video->prevPicOrderCntLsb = sliceHdr->pic_order_cnt_lsb; } break; case 1: /* subclause 8.2.1.2 and 8.2.1.3 */ case 2: if (video->mem_mgr_ctrl_eq_5) { video->prevFrameNumOffset = 0; } else { video->prevFrameNumOffset = video->FrameNumOffset; } break; } return AVCENC_SUCCESS; } ================================================ FILE: RtspCamera/jni/avc_h264/enc/src/init.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2010 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "avcenc_lib.h" #include "avcenc_api.h" #include "oscl_string.h" #define LOG2_MAX_FRAME_NUM_MINUS4 12 /* 12 default */ #define SLICE_GROUP_CHANGE_CYCLE 1 /* default */ /* initialized variables to be used in SPS*/ AVCEnc_Status SetEncodeParam(AVCHandle* avcHandle, AVCEncParams* encParam, void* extSPS, void* extPPS) { AVCEncObject *encvid = (AVCEncObject*) avcHandle->AVCObject; AVCCommonObj *video = encvid->common; AVCSeqParamSet *seqParam = video->currSeqParams; AVCPicParamSet *picParam = video->currPicParams; AVCSliceHeader *sliceHdr = video->sliceHdr; AVCRateControl *rateCtrl = encvid->rateCtrl; AVCEnc_Status status; void *userData = avcHandle->userData; int ii, maxFrameNum; AVCSeqParamSet* extS = NULL; AVCPicParamSet* extP = NULL; if (extSPS) extS = (AVCSeqParamSet*) extSPS; if (extPPS) extP = (AVCPicParamSet*) extPPS; /* This part sets the default values of the encoding options this library supports in seqParam, picParam and sliceHdr structures and also copy the values from the encParam into the above 3 structures. Some parameters will be assigned later when we encode SPS or PPS such as the seq_parameter_id or pic_parameter_id. Also some of the slice parameters have to be re-assigned per slice basis such as frame_num, slice_type, first_mb_in_slice, pic_order_cnt_lsb, slice_qp_delta, slice_group_change_cycle */ /* profile_idc, constrained_setx_flag and level_idc is set by VerifyProfile(), and VerifyLevel() functions later. */ encvid->fullsearch_enable = encParam->fullsearch; encvid->outOfBandParamSet = ((encParam->out_of_band_param_set == AVC_ON) ? TRUE : FALSE); /* parameters derived from the the encParam that are used in SPS */ if (extS) { video->MaxPicOrderCntLsb = 1 << (extS->log2_max_pic_order_cnt_lsb_minus4 + 4); video->PicWidthInMbs = extS->pic_width_in_mbs_minus1 + 1; video->PicHeightInMapUnits = extS->pic_height_in_map_units_minus1 + 1 ; video->FrameHeightInMbs = (2 - extS->frame_mbs_only_flag) * video->PicHeightInMapUnits ; } else { video->MaxPicOrderCntLsb = 1 << (encParam->log2_max_poc_lsb_minus_4 + 4); video->PicWidthInMbs = (encParam->width + 15) >> 4; /* round it to multiple of 16 */ video->FrameHeightInMbs = (encParam->height + 15) >> 4; /* round it to multiple of 16 */ video->PicHeightInMapUnits = video->FrameHeightInMbs; } video->PicWidthInSamplesL = video->PicWidthInMbs * 16 ; if (video->PicWidthInSamplesL + 32 > 0xFFFF) { return AVCENC_NOT_SUPPORTED; // we use 2-bytes for pitch } video->PicWidthInSamplesC = video->PicWidthInMbs * 8 ; video->PicHeightInMbs = video->FrameHeightInMbs; video->PicSizeInMapUnits = video->PicWidthInMbs * video->PicHeightInMapUnits ; video->PicHeightInSamplesL = video->PicHeightInMbs * 16; video->PicHeightInSamplesC = video->PicHeightInMbs * 8; video->PicSizeInMbs = video->PicWidthInMbs * video->PicHeightInMbs; if (!extS && !extP) { maxFrameNum = (encParam->idr_period == 0) ? (1 << 16) : encParam->idr_period; ii = 0; while (maxFrameNum > 0) { ii++; maxFrameNum >>= 1; } if (ii < 4) ii = 4; else if (ii > 16) ii = 16; seqParam->log2_max_frame_num_minus4 = ii - 4;//LOG2_MAX_FRAME_NUM_MINUS4; /* default */ video->MaxFrameNum = 1 << ii; //(LOG2_MAX_FRAME_NUM_MINUS4 + 4); /* default */ video->MaxPicNum = video->MaxFrameNum; /************* set the SPS *******************/ seqParam->seq_parameter_set_id = 0; /* start with zero */ /* POC */ seqParam->pic_order_cnt_type = encParam->poc_type; /* POC type */ if (encParam->poc_type == 0) { if (/*encParam->log2_max_poc_lsb_minus_4<0 || (no need, it's unsigned)*/ encParam->log2_max_poc_lsb_minus_4 > 12) { return AVCENC_INVALID_POC_LSB; } seqParam->log2_max_pic_order_cnt_lsb_minus4 = encParam->log2_max_poc_lsb_minus_4; } else if (encParam->poc_type == 1) { seqParam->delta_pic_order_always_zero_flag = encParam->delta_poc_zero_flag; seqParam->offset_for_non_ref_pic = encParam->offset_poc_non_ref; seqParam->offset_for_top_to_bottom_field = encParam->offset_top_bottom; seqParam->num_ref_frames_in_pic_order_cnt_cycle = encParam->num_ref_in_cycle; if (encParam->offset_poc_ref == NULL) { return AVCENC_ENCPARAM_MEM_FAIL; } for (ii = 0; ii < encParam->num_ref_frame; ii++) { seqParam->offset_for_ref_frame[ii] = encParam->offset_poc_ref[ii]; } } /* number of reference frame */ if (encParam->num_ref_frame > 16 || encParam->num_ref_frame < 0) { return AVCENC_INVALID_NUM_REF; } seqParam->num_ref_frames = encParam->num_ref_frame; /* num reference frame range 0...16*/ seqParam->gaps_in_frame_num_value_allowed_flag = FALSE; seqParam->pic_width_in_mbs_minus1 = video->PicWidthInMbs - 1; seqParam->pic_height_in_map_units_minus1 = video->PicHeightInMapUnits - 1; seqParam->frame_mbs_only_flag = TRUE; seqParam->mb_adaptive_frame_field_flag = FALSE; seqParam->direct_8x8_inference_flag = TRUE; /* default */ seqParam->frame_cropping_flag = FALSE; seqParam->frame_crop_bottom_offset = 0; seqParam->frame_crop_left_offset = 0; seqParam->frame_crop_right_offset = 0; seqParam->frame_crop_top_offset = 0; seqParam->vui_parameters_present_flag = FALSE; /* default */ } else if (extS) // use external SPS and PPS { seqParam->seq_parameter_set_id = extS->seq_parameter_set_id; seqParam->log2_max_frame_num_minus4 = extS->log2_max_frame_num_minus4; video->MaxFrameNum = 1 << (extS->log2_max_frame_num_minus4 + 4); video->MaxPicNum = video->MaxFrameNum; if (encParam->idr_period > (int)(video->MaxFrameNum) || (encParam->idr_period == -1)) { encParam->idr_period = (int)video->MaxFrameNum; } seqParam->pic_order_cnt_type = extS->pic_order_cnt_type; if (seqParam->pic_order_cnt_type == 0) { if (/*extS->log2_max_pic_order_cnt_lsb_minus4<0 || (no need it's unsigned)*/ extS->log2_max_pic_order_cnt_lsb_minus4 > 12) { return AVCENC_INVALID_POC_LSB; } seqParam->log2_max_pic_order_cnt_lsb_minus4 = extS->log2_max_pic_order_cnt_lsb_minus4; } else if (seqParam->pic_order_cnt_type == 1) { seqParam->delta_pic_order_always_zero_flag = extS->delta_pic_order_always_zero_flag; seqParam->offset_for_non_ref_pic = extS->offset_for_non_ref_pic; seqParam->offset_for_top_to_bottom_field = extS->offset_for_top_to_bottom_field; seqParam->num_ref_frames_in_pic_order_cnt_cycle = extS->num_ref_frames_in_pic_order_cnt_cycle; if (extS->offset_for_ref_frame == NULL) { return AVCENC_ENCPARAM_MEM_FAIL; } for (ii = 0; ii < (int) extS->num_ref_frames; ii++) { seqParam->offset_for_ref_frame[ii] = extS->offset_for_ref_frame[ii]; } } /* number of reference frame */ if (extS->num_ref_frames > 16 /*|| extS->num_ref_frames<0 (no need, it's unsigned)*/) { return AVCENC_INVALID_NUM_REF; } seqParam->num_ref_frames = extS->num_ref_frames; /* num reference frame range 0...16*/ seqParam->gaps_in_frame_num_value_allowed_flag = extS->gaps_in_frame_num_value_allowed_flag; seqParam->pic_width_in_mbs_minus1 = extS->pic_width_in_mbs_minus1; seqParam->pic_height_in_map_units_minus1 = extS->pic_height_in_map_units_minus1; seqParam->frame_mbs_only_flag = extS->frame_mbs_only_flag; if (extS->frame_mbs_only_flag != TRUE) { return AVCENC_NOT_SUPPORTED; } seqParam->mb_adaptive_frame_field_flag = extS->mb_adaptive_frame_field_flag; if (extS->mb_adaptive_frame_field_flag != FALSE) { return AVCENC_NOT_SUPPORTED; } seqParam->direct_8x8_inference_flag = extS->direct_8x8_inference_flag; seqParam->frame_cropping_flag = extS->frame_cropping_flag ; if (extS->frame_cropping_flag != FALSE) { return AVCENC_NOT_SUPPORTED; } seqParam->frame_crop_bottom_offset = 0; seqParam->frame_crop_left_offset = 0; seqParam->frame_crop_right_offset = 0; seqParam->frame_crop_top_offset = 0; seqParam->vui_parameters_present_flag = extS->vui_parameters_present_flag; if (extS->vui_parameters_present_flag) { oscl_memcpy(&(seqParam->vui_parameters), &(extS->vui_parameters), sizeof(AVCVUIParams)); } } else { return AVCENC_NOT_SUPPORTED; } /***************** now PPS ******************************/ if (!extP && !extS) { picParam->pic_parameter_set_id = (uint)(-1); /* start with zero */ picParam->seq_parameter_set_id = (uint)(-1); /* start with zero */ picParam->entropy_coding_mode_flag = 0; /* default to CAVLC */ picParam->pic_order_present_flag = 0; /* default for now, will need it for B-slice */ /* FMO */ if (encParam->num_slice_group < 1 || encParam->num_slice_group > MAX_NUM_SLICE_GROUP) { return AVCENC_INVALID_NUM_SLICEGROUP; } picParam->num_slice_groups_minus1 = encParam->num_slice_group - 1; if (picParam->num_slice_groups_minus1 > 0) { picParam->slice_group_map_type = encParam->fmo_type; switch (encParam->fmo_type) { case 0: for (ii = 0; ii <= (int)picParam->num_slice_groups_minus1; ii++) { picParam->run_length_minus1[ii] = encParam->run_length_minus1[ii]; } break; case 2: for (ii = 0; ii < (int)picParam->num_slice_groups_minus1; ii++) { picParam->top_left[ii] = encParam->top_left[ii]; picParam->bottom_right[ii] = encParam->bottom_right[ii]; } break; case 3: case 4: case 5: if (encParam->change_dir_flag == AVC_ON) { picParam->slice_group_change_direction_flag = TRUE; } else { picParam->slice_group_change_direction_flag = FALSE; } if (/*encParam->change_rate_minus1 < 0 || (no need it's unsigned) */ encParam->change_rate_minus1 > video->PicSizeInMapUnits - 1) { return AVCENC_INVALID_CHANGE_RATE; } picParam->slice_group_change_rate_minus1 = encParam->change_rate_minus1; video->SliceGroupChangeRate = picParam->slice_group_change_rate_minus1 + 1; break; case 6: picParam->pic_size_in_map_units_minus1 = video->PicSizeInMapUnits - 1; /* allocate picParam->slice_group_id */ picParam->slice_group_id = (uint*)avcHandle->CBAVC_Malloc(userData, sizeof(uint) * video->PicSizeInMapUnits, DEFAULT_ATTR); if (picParam->slice_group_id == NULL) { return AVCENC_MEMORY_FAIL; } if (encParam->slice_group == NULL) { return AVCENC_ENCPARAM_MEM_FAIL; } for (ii = 0; ii < (int)video->PicSizeInMapUnits; ii++) { picParam->slice_group_id[ii] = encParam->slice_group[ii]; } break; default: return AVCENC_INVALID_FMO_TYPE; } } picParam->num_ref_idx_l0_active_minus1 = encParam->num_ref_frame - 1; /* assume frame only */ picParam->num_ref_idx_l1_active_minus1 = 0; /* default value */ picParam->weighted_pred_flag = 0; /* no weighted prediction supported */ picParam->weighted_bipred_idc = 0; /* range 0,1,2 */ if (/*picParam->weighted_bipred_idc < 0 || (no need, it's unsigned) */ picParam->weighted_bipred_idc > 2) { return AVCENC_WEIGHTED_BIPRED_FAIL; } picParam->pic_init_qp_minus26 = encParam->initQP - 26; if (picParam->pic_init_qp_minus26 < -26 || picParam->pic_init_qp_minus26 > 25) { return AVCENC_INIT_QP_FAIL; /* out of range */ } picParam->pic_init_qs_minus26 = 0; if (picParam->pic_init_qs_minus26 < -26 || picParam->pic_init_qs_minus26 > 25) { return AVCENC_INIT_QS_FAIL; /* out of range */ } picParam->chroma_qp_index_offset = 0; /* default to zero for now */ if (picParam->chroma_qp_index_offset < -12 || picParam->chroma_qp_index_offset > 12) { return AVCENC_CHROMA_QP_FAIL; /* out of range */ } /* deblocking */ picParam->deblocking_filter_control_present_flag = (encParam->db_filter == AVC_ON) ? TRUE : FALSE ; /* constrained intra prediction */ picParam->constrained_intra_pred_flag = (encParam->constrained_intra_pred == AVC_ON) ? TRUE : FALSE; picParam->redundant_pic_cnt_present_flag = 0; /* default */ } else if (extP)// external PPS { picParam->pic_parameter_set_id = extP->pic_parameter_set_id - 1; /* to be increased by one */ picParam->seq_parameter_set_id = extP->seq_parameter_set_id; picParam->entropy_coding_mode_flag = extP->entropy_coding_mode_flag; if (extP->entropy_coding_mode_flag != 0) /* default to CAVLC */ { return AVCENC_NOT_SUPPORTED; } picParam->pic_order_present_flag = extP->pic_order_present_flag; /* default for now, will need it for B-slice */ if (extP->pic_order_present_flag != 0) { return AVCENC_NOT_SUPPORTED; } /* FMO */ if (/*(extP->num_slice_groups_minus1<0) || (no need it's unsigned) */ (extP->num_slice_groups_minus1 > MAX_NUM_SLICE_GROUP - 1)) { return AVCENC_INVALID_NUM_SLICEGROUP; } picParam->num_slice_groups_minus1 = extP->num_slice_groups_minus1; if (picParam->num_slice_groups_minus1 > 0) { picParam->slice_group_map_type = extP->slice_group_map_type; switch (extP->slice_group_map_type) { case 0: for (ii = 0; ii <= (int)extP->num_slice_groups_minus1; ii++) { picParam->run_length_minus1[ii] = extP->run_length_minus1[ii]; } break; case 2: for (ii = 0; ii < (int)picParam->num_slice_groups_minus1; ii++) { picParam->top_left[ii] = extP->top_left[ii]; picParam->bottom_right[ii] = extP->bottom_right[ii]; } break; case 3: case 4: case 5: picParam->slice_group_change_direction_flag = extP->slice_group_change_direction_flag; if (/*extP->slice_group_change_rate_minus1 < 0 || (no need, it's unsigned) */ extP->slice_group_change_rate_minus1 > video->PicSizeInMapUnits - 1) { return AVCENC_INVALID_CHANGE_RATE; } picParam->slice_group_change_rate_minus1 = extP->slice_group_change_rate_minus1; video->SliceGroupChangeRate = picParam->slice_group_change_rate_minus1 + 1; break; case 6: if (extP->pic_size_in_map_units_minus1 != video->PicSizeInMapUnits - 1) { return AVCENC_NOT_SUPPORTED; } picParam->pic_size_in_map_units_minus1 = extP->pic_size_in_map_units_minus1; /* allocate picParam->slice_group_id */ picParam->slice_group_id = (uint*)avcHandle->CBAVC_Malloc(userData, sizeof(uint) * video->PicSizeInMapUnits, DEFAULT_ATTR); if (picParam->slice_group_id == NULL) { return AVCENC_MEMORY_FAIL; } if (extP->slice_group_id == NULL) { return AVCENC_ENCPARAM_MEM_FAIL; } for (ii = 0; ii < (int)video->PicSizeInMapUnits; ii++) { picParam->slice_group_id[ii] = extP->slice_group_id[ii]; } break; default: return AVCENC_INVALID_FMO_TYPE; } } picParam->num_ref_idx_l0_active_minus1 = extP->num_ref_idx_l0_active_minus1; picParam->num_ref_idx_l1_active_minus1 = extP->num_ref_idx_l1_active_minus1; /* default value */ if (picParam->num_ref_idx_l1_active_minus1 != 0) { return AVCENC_NOT_SUPPORTED; } if (extP->weighted_pred_flag) { return AVCENC_NOT_SUPPORTED; } picParam->weighted_pred_flag = 0; /* no weighted prediction supported */ picParam->weighted_bipred_idc = extP->weighted_bipred_idc; /* range 0,1,2 */ if (/*picParam->weighted_bipred_idc < 0 || (no need, it's unsigned) */ picParam->weighted_bipred_idc > 2) { return AVCENC_WEIGHTED_BIPRED_FAIL; } picParam->pic_init_qp_minus26 = extP->pic_init_qp_minus26; /* default, will be changed at slice level anyway */ if (picParam->pic_init_qp_minus26 < -26 || picParam->pic_init_qp_minus26 > 25) { return AVCENC_INIT_QP_FAIL; /* out of range */ } picParam->pic_init_qs_minus26 = extP->pic_init_qs_minus26; if (picParam->pic_init_qs_minus26 < -26 || picParam->pic_init_qs_minus26 > 25) { return AVCENC_INIT_QS_FAIL; /* out of range */ } picParam->chroma_qp_index_offset = extP->chroma_qp_index_offset; /* default to zero for now */ if (picParam->chroma_qp_index_offset < -12 || picParam->chroma_qp_index_offset > 12) { return AVCENC_CHROMA_QP_FAIL; /* out of range */ } /* deblocking */ picParam->deblocking_filter_control_present_flag = extP->deblocking_filter_control_present_flag; /* constrained intra prediction */ picParam->constrained_intra_pred_flag = extP->constrained_intra_pred_flag; if (extP->redundant_pic_cnt_present_flag != 0) { return AVCENC_NOT_SUPPORTED; } picParam->redundant_pic_cnt_present_flag = extP->redundant_pic_cnt_present_flag; /* default */ } else { return AVCENC_NOT_SUPPORTED; } /****************** now set up some SliceHeader parameters ***********/ if (picParam->deblocking_filter_control_present_flag == TRUE) { /* these values only present when db_filter is ON */ if (encParam->disable_db_idc > 2) { return AVCENC_INVALID_DEBLOCK_IDC; /* out of range */ } sliceHdr->disable_deblocking_filter_idc = encParam->disable_db_idc; if (encParam->alpha_offset < -6 || encParam->alpha_offset > 6) { return AVCENC_INVALID_ALPHA_OFFSET; } sliceHdr->slice_alpha_c0_offset_div2 = encParam->alpha_offset; if (encParam->beta_offset < -6 || encParam->beta_offset > 6) { return AVCENC_INVALID_BETA_OFFSET; } sliceHdr->slice_beta_offset_div_2 = encParam->beta_offset; } if (encvid->outOfBandParamSet == TRUE) { sliceHdr->idr_pic_id = 0; } else { sliceHdr->idr_pic_id = (uint)(-1); /* start with zero */ } sliceHdr->field_pic_flag = FALSE; sliceHdr->bottom_field_flag = FALSE; /* won't be used anyway */ video->MbaffFrameFlag = (seqParam->mb_adaptive_frame_field_flag && !sliceHdr->field_pic_flag); /* the rest will be set in InitSlice() */ /* now the rate control and performance related parameters */ rateCtrl->scdEnable = (encParam->auto_scd == AVC_ON) ? TRUE : FALSE; rateCtrl->idrPeriod = encParam->idr_period;// + 1; rateCtrl->intraMBRate = encParam->intramb_refresh; rateCtrl->dpEnable = (encParam->data_par == AVC_ON) ? TRUE : FALSE; rateCtrl->subPelEnable = (encParam->sub_pel == AVC_ON) ? TRUE : FALSE; rateCtrl->mvRange = encParam->search_range; rateCtrl->subMBEnable = (encParam->submb_pred == AVC_ON) ? TRUE : FALSE; rateCtrl->rdOptEnable = (encParam->rdopt_mode == AVC_ON) ? TRUE : FALSE; rateCtrl->bidirPred = (encParam->bidir_pred == AVC_ON) ? TRUE : FALSE; rateCtrl->rcEnable = (encParam->rate_control == AVC_ON) ? TRUE : FALSE; rateCtrl->initQP = encParam->initQP; rateCtrl->initQP = AVC_CLIP3(0, 51, rateCtrl->initQP); rateCtrl->bitRate = encParam->bitrate; rateCtrl->cpbSize = encParam->CPB_size; rateCtrl->initDelayOffset = (rateCtrl->bitRate * encParam->init_CBP_removal_delay / 1000); if (encParam->frame_rate == 0) { return AVCENC_INVALID_FRAMERATE; } rateCtrl->frame_rate = (OsclFloat)(encParam->frame_rate * 1.0 / 1000); // rateCtrl->srcInterval = encParam->src_interval; rateCtrl->first_frame = 1; /* set this flag for the first time */ /* contrained_setx_flag will be set inside the VerifyProfile called below.*/ if (!extS && !extP) { seqParam->profile_idc = encParam->profile; seqParam->constrained_set0_flag = FALSE; seqParam->constrained_set1_flag = FALSE; seqParam->constrained_set2_flag = FALSE; seqParam->constrained_set3_flag = FALSE; seqParam->level_idc = encParam->level; } else { seqParam->profile_idc = extS->profile_idc; seqParam->constrained_set0_flag = extS->constrained_set0_flag; seqParam->constrained_set1_flag = extS->constrained_set1_flag; seqParam->constrained_set2_flag = extS->constrained_set2_flag; seqParam->constrained_set3_flag = extS->constrained_set3_flag; seqParam->level_idc = extS->level_idc; } status = VerifyProfile(encvid, seqParam, picParam); if (status != AVCENC_SUCCESS) { return status; } status = VerifyLevel(encvid, seqParam, picParam); if (status != AVCENC_SUCCESS) { return status; } return AVCENC_SUCCESS; } /* verify the profile setting */ AVCEnc_Status VerifyProfile(AVCEncObject *encvid, AVCSeqParamSet *seqParam, AVCPicParamSet *picParam) { AVCRateControl *rateCtrl = encvid->rateCtrl; AVCEnc_Status status = AVCENC_SUCCESS; if (seqParam->profile_idc == 0) /* find profile for this setting */ { /* find the right profile for it */ if (seqParam->direct_8x8_inference_flag == TRUE && picParam->entropy_coding_mode_flag == FALSE && picParam->num_slice_groups_minus1 <= 7 /*&& picParam->num_slice_groups_minus1>=0 (no need, it's unsigned) */) { seqParam->profile_idc = AVC_EXTENDED; seqParam->constrained_set2_flag = TRUE; } if (rateCtrl->dpEnable == FALSE && picParam->num_slice_groups_minus1 == 0 && picParam->redundant_pic_cnt_present_flag == FALSE) { seqParam->profile_idc = AVC_MAIN; seqParam->constrained_set1_flag = TRUE; } if (rateCtrl->bidirPred == FALSE && rateCtrl->dpEnable == FALSE && seqParam->frame_mbs_only_flag == TRUE && picParam->weighted_pred_flag == FALSE && picParam->weighted_bipred_idc == 0 && picParam->entropy_coding_mode_flag == FALSE && picParam->num_slice_groups_minus1 <= 7 /*&& picParam->num_slice_groups_minus1>=0 (no need, it's unsigned)*/) { seqParam->profile_idc = AVC_BASELINE; seqParam->constrained_set0_flag = TRUE; } if (seqParam->profile_idc == 0) /* still zero */ { return AVCENC_PROFILE_NOT_SUPPORTED; } } /* check the list of supported profile by this library */ switch (seqParam->profile_idc) { case AVC_BASELINE: if (rateCtrl->bidirPred == TRUE || rateCtrl->dpEnable == TRUE || seqParam->frame_mbs_only_flag != TRUE || picParam->weighted_pred_flag == TRUE || picParam->weighted_bipred_idc != 0 || picParam->entropy_coding_mode_flag == TRUE || picParam->num_slice_groups_minus1 > 7 /*|| picParam->num_slice_groups_minus1<0 (no need, it's unsigned) */) { status = AVCENC_TOOLS_NOT_SUPPORTED; } break; case AVC_MAIN: case AVC_EXTENDED: status = AVCENC_PROFILE_NOT_SUPPORTED; } return status; } /* verify the level setting */ AVCEnc_Status VerifyLevel(AVCEncObject *encvid, AVCSeqParamSet *seqParam, AVCPicParamSet *picParam) { (void)(picParam); AVCRateControl *rateCtrl = encvid->rateCtrl; AVCCommonObj *video = encvid->common; int mb_per_sec, ii; int lev_idx; int dpb_size; mb_per_sec = (int)(video->PicSizeInMbs * rateCtrl->frame_rate + 0.5); dpb_size = (seqParam->num_ref_frames * video->PicSizeInMbs * 3) >> 6; if (seqParam->level_idc == 0) /* find level for this setting */ { for (ii = 0; ii < MAX_LEVEL_IDX; ii++) { if (mb_per_sec <= MaxMBPS[ii] && video->PicSizeInMbs <= (uint)MaxFS[ii] && rateCtrl->bitRate <= (int32)MaxBR[ii]*1000 && rateCtrl->cpbSize <= (int32)MaxCPB[ii]*1000 && rateCtrl->mvRange <= MaxVmvR[ii] && dpb_size <= MaxDPBX2[ii]*512) { seqParam->level_idc = mapIdx2Lev[ii]; break; } } if (seqParam->level_idc == 0) { return AVCENC_LEVEL_NOT_SUPPORTED; } } /* check if this level is supported by this library */ lev_idx = mapLev2Idx[seqParam->level_idc]; if (seqParam->level_idc == AVC_LEVEL1_B) { seqParam->constrained_set3_flag = 1; } if (lev_idx == 255) /* not defined */ { return AVCENC_LEVEL_NOT_SUPPORTED; } /* check if the encoding setting complies with the level */ if (mb_per_sec > MaxMBPS[lev_idx] || video->PicSizeInMbs > (uint)MaxFS[lev_idx] || rateCtrl->bitRate > (int32)MaxBR[lev_idx]*1000 || rateCtrl->cpbSize > (int32)MaxCPB[lev_idx]*1000 || rateCtrl->mvRange > MaxVmvR[lev_idx]) { return AVCENC_LEVEL_FAIL; } return AVCENC_SUCCESS; } /* initialize variables at the beginning of each frame */ /* determine the picture type */ /* encode POC */ /* maybe we should do more stuff here. MotionEstimation+SCD and generate a new SPS and PPS */ AVCEnc_Status InitFrame(AVCEncObject *encvid) { AVCStatus ret; AVCEnc_Status status; AVCCommonObj *video = encvid->common; AVCSliceHeader *sliceHdr = video->sliceHdr; /* look for the next frame in coding_order and look for available picture in the DPB. Note, video->currFS->PicOrderCnt, currFS->FrameNum and currPic->PicNum are set to wrong number in this function (right for decoder). */ if (video->nal_unit_type == AVC_NALTYPE_IDR) { // call init DPB in here. ret = AVCConfigureSequence(encvid->avcHandle, video, TRUE); if (ret != AVC_SUCCESS) { return AVCENC_FAIL; } } /* flexible macroblock ordering (every frame)*/ /* populate video->mapUnitToSliceGroupMap and video->MbToSliceGroupMap */ /* It changes once per each PPS. */ FMOInit(video); ret = DPBInitBuffer(encvid->avcHandle, video); // get new buffer if (ret != AVC_SUCCESS) { return (AVCEnc_Status)ret; // AVCENC_PICTURE_READY, FAIL } DPBInitPic(video, 0); /* 0 is dummy */ /************* determine picture type IDR or non-IDR ***********/ video->currPicType = AVC_FRAME; video->slice_data_partitioning = FALSE; encvid->currInput->is_reference = 1; /* default to all frames */ video->nal_ref_idc = 1; /* need to set this for InitPOC */ video->currPic->isReference = TRUE; /************* set frame_num ********************/ if (video->nal_unit_type == AVC_NALTYPE_IDR) { video->prevFrameNum = video->MaxFrameNum; video->PrevRefFrameNum = 0; sliceHdr->frame_num = 0; } /* otherwise, it's set to previous reference frame access unit's frame_num in decoding order, see the end of PVAVCDecodeSlice()*/ /* There's also restriction on the frame_num, see page 59 of JVT-I1010.doc. */ /* Basically, frame_num can't be repeated unless it's opposite fields or non reference fields */ else { sliceHdr->frame_num = (video->PrevRefFrameNum + 1) % video->MaxFrameNum; } video->CurrPicNum = sliceHdr->frame_num; /* for field_pic_flag = 0 */ //video->CurrPicNum = 2*sliceHdr->frame_num + 1; /* for field_pic_flag = 1 */ /* assign pic_order_cnt, video->PicOrderCnt */ status = InitPOC(encvid); if (status != AVCENC_SUCCESS) /* incorrigable fail */ { return status; } /* Initialize refListIdx for this picture */ RefListInit(video); /************* motion estimation and scene analysis ************/ // , to move this to MB-based MV search for comparison // use sub-optimal QP for mv search AVCMotionEstimation(encvid); /* AVCENC_SUCCESS or AVCENC_NEW_IDR */ /* after this point, the picture type will be fixed to either IDR or non-IDR */ video->currFS->PicOrderCnt = video->PicOrderCnt; video->currFS->FrameNum = video->sliceHdr->frame_num; video->currPic->PicNum = video->CurrPicNum; video->mbNum = 0; /* start from zero MB */ encvid->currSliceGroup = 0; /* start from slice group #0 */ encvid->numIntraMB = 0; /* reset this counter */ if (video->nal_unit_type == AVC_NALTYPE_IDR) { RCInitGOP(encvid); /* calculate picture QP */ RCInitFrameQP(encvid); return AVCENC_NEW_IDR; } /* calculate picture QP */ RCInitFrameQP(encvid); /* get QP after MV search */ return AVCENC_SUCCESS; } /* initialize variables for this slice */ AVCEnc_Status InitSlice(AVCEncObject *encvid) { AVCCommonObj *video = encvid->common; AVCSliceHeader *sliceHdr = video->sliceHdr; AVCPicParamSet *currPPS = video->currPicParams; AVCSeqParamSet *currSPS = video->currSeqParams; int slice_type = video->slice_type; sliceHdr->first_mb_in_slice = video->mbNum; if (video->mbNum) // not first slice of a frame { video->sliceHdr->slice_type = (AVCSliceType)slice_type; } /* sliceHdr->slice_type already set in InitFrame */ sliceHdr->pic_parameter_set_id = video->currPicParams->pic_parameter_set_id; /* sliceHdr->frame_num already set in InitFrame */ if (!currSPS->frame_mbs_only_flag) /* we shouldn't need this check */ { sliceHdr->field_pic_flag = sliceHdr->bottom_field_flag = FALSE; return AVCENC_TOOLS_NOT_SUPPORTED; } /* sliceHdr->idr_pic_id already set in PVAVCEncodeNAL sliceHdr->pic_order_cnt_lsb already set in InitFrame..InitPOC sliceHdr->delta_pic_order_cnt_bottom already set in InitPOC sliceHdr->delta_pic_order_cnt[0] already set in InitPOC sliceHdr->delta_pic_order_cnt[1] already set in InitPOC */ sliceHdr->redundant_pic_cnt = 0; /* default if(currPPS->redundant_pic_cnt_present_flag), range 0..127 */ sliceHdr->direct_spatial_mv_pred_flag = 0; // default if(slice_type == AVC_B_SLICE) sliceHdr->num_ref_idx_active_override_flag = FALSE; /* default, if(slice_type== P,SP or B)*/ sliceHdr->num_ref_idx_l0_active_minus1 = 0; /* default, if (num_ref_idx_active_override_flag) */ sliceHdr->num_ref_idx_l1_active_minus1 = 0; /* default, if above and B_slice */ /* the above 2 values range from 0..15 for frame picture and 0..31 for field picture */ /* ref_pic_list_reordering(), currently we don't do anything */ sliceHdr->ref_pic_list_reordering_flag_l0 = FALSE; /* default */ sliceHdr->ref_pic_list_reordering_flag_l1 = FALSE; /* default */ /* if the above are TRUE, some other params must be set */ if ((currPPS->weighted_pred_flag && (slice_type == AVC_P_SLICE || slice_type == AVC_SP_SLICE)) || (currPPS->weighted_bipred_idc == 1 && slice_type == AVC_B_SLICE)) { // pred_weight_table(); // not supported !! return AVCENC_TOOLS_NOT_SUPPORTED; } /* dec_ref_pic_marking(), this will be done later*/ sliceHdr->no_output_of_prior_pics_flag = FALSE; /* default */ sliceHdr->long_term_reference_flag = FALSE; /* for IDR frame, do not make it long term */ sliceHdr->adaptive_ref_pic_marking_mode_flag = FALSE; /* default */ /* other params are not set here because they are not used */ sliceHdr->cabac_init_idc = 0; /* default, if entropy_coding_mode_flag && slice_type==I or SI, range 0..2 */ sliceHdr->slice_qp_delta = 0; /* default for now */ sliceHdr->sp_for_switch_flag = FALSE; /* default, if slice_type == SP */ sliceHdr->slice_qs_delta = 0; /* default, if slice_type == SP or SI */ /* derived variables from encParam */ /* deblocking filter */ video->FilterOffsetA = video->FilterOffsetB = 0; if (currPPS->deblocking_filter_control_present_flag == TRUE) { video->FilterOffsetA = sliceHdr->slice_alpha_c0_offset_div2 << 1; video->FilterOffsetB = sliceHdr->slice_beta_offset_div_2 << 1; } /* flexible macroblock ordering */ /* populate video->mapUnitToSliceGroupMap and video->MbToSliceGroupMap */ /* We already call it at the end of PVAVCEncInitialize(). It changes once per each PPS. */ if (video->currPicParams->num_slice_groups_minus1 > 0 && video->currPicParams->slice_group_map_type >= 3 && video->currPicParams->slice_group_map_type <= 5) { sliceHdr->slice_group_change_cycle = SLICE_GROUP_CHANGE_CYCLE; /* default, don't understand how to set it!!!*/ video->MapUnitsInSliceGroup0 = AVC_MIN(sliceHdr->slice_group_change_cycle * video->SliceGroupChangeRate, video->PicSizeInMapUnits); FMOInit(video); } /* calculate SliceQPy first */ /* calculate QSy first */ sliceHdr->slice_qp_delta = video->QPy - 26 - currPPS->pic_init_qp_minus26; //sliceHdr->slice_qs_delta = video->QSy - 26 - currPPS->pic_init_qs_minus26; return AVCENC_SUCCESS; } ================================================ FILE: RtspCamera/jni/avc_h264/enc/src/intra_est.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "oscl_mem.h" #include "avcenc_lib.h" #define TH_I4 0 /* threshold biasing toward I16 mode instead of I4 mode */ #define TH_Intra 0 /* threshold biasing toward INTER mode instead of intra mode */ #define FIXED_INTRAPRED_MODE AVC_I16 #define FIXED_I16_MODE AVC_I16_DC #define FIXED_I4_MODE AVC_I4_Diagonal_Down_Left #define FIXED_INTRA_CHROMA_MODE AVC_IC_DC #define CLIP_RESULT(x) if((uint)x > 0xFF){ \ x = 0xFF & (~(x>>31));} bool IntraDecisionABE(AVCEncObject *encvid, int min_cost, uint8 *curL, int picPitch) { AVCCommonObj *video = encvid->common; AVCFrameIO *currInput = encvid->currInput; int orgPitch = currInput->pitch; int x_pos = (video->mb_x) << 4; int y_pos = (video->mb_y) << 4; uint8 *orgY = currInput->YCbCr[0] + y_pos * orgPitch + x_pos; int j; uint8 *topL, *leftL, *orgY_2, *orgY_3; int temp, SBE, offset; OsclFloat ABE; bool intra = true; if (((x_pos >> 4) != (int)video->PicWidthInMbs - 1) && ((y_pos >> 4) != (int)video->PicHeightInMbs - 1) && video->intraAvailA && video->intraAvailB) { SBE = 0; /* top neighbor */ topL = curL - picPitch; /* left neighbor */ leftL = curL - 1; orgY_2 = orgY - orgPitch; for (j = 0; j < 16; j++) { temp = *topL++ - orgY[j]; SBE += ((temp >= 0) ? temp : -temp); temp = *(leftL += picPitch) - *(orgY_2 += orgPitch); SBE += ((temp >= 0) ? temp : -temp); } /* calculate chroma */ offset = (y_pos >> 2) * picPitch + (x_pos >> 1); topL = video->currPic->Scb + offset; orgY_2 = currInput->YCbCr[1] + offset + (y_pos >> 2) * (orgPitch - picPitch); leftL = topL - 1; topL -= (picPitch >> 1); orgY_3 = orgY_2 - (orgPitch >> 1); for (j = 0; j < 8; j++) { temp = *topL++ - orgY_2[j]; SBE += ((temp >= 0) ? temp : -temp); temp = *(leftL += (picPitch >> 1)) - *(orgY_3 += (orgPitch >> 1)); SBE += ((temp >= 0) ? temp : -temp); } topL = video->currPic->Scr + offset; orgY_2 = currInput->YCbCr[2] + offset + (y_pos >> 2) * (orgPitch - picPitch); leftL = topL - 1; topL -= (picPitch >> 1); orgY_3 = orgY_2 - (orgPitch >> 1); for (j = 0; j < 8; j++) { temp = *topL++ - orgY_2[j]; SBE += ((temp >= 0) ? temp : -temp); temp = *(leftL += (picPitch >> 1)) - *(orgY_3 += (orgPitch >> 1)); SBE += ((temp >= 0) ? temp : -temp); } /* compare mincost/384 and SBE/64 */ ABE = SBE / 64.0; if (ABE*0.8 >= min_cost / 384.0) { intra = false; } } return intra; } /* perform searching for MB mode */ /* assuming that this is done inside the encoding loop, no need to call InitNeighborAvailability */ void MBIntraSearch(AVCEncObject *encvid, int mbnum, uint8 *curL, int picPitch) { AVCCommonObj *video = encvid->common; AVCFrameIO *currInput = encvid->currInput; AVCMacroblock *currMB = video->currMB; int min_cost; uint8 *orgY; int x_pos = (video->mb_x) << 4; int y_pos = (video->mb_y) << 4; uint32 *saved_inter; int j; int orgPitch = currInput->pitch; bool intra = true; currMB->CBP = 0; /* first do motion vector and variable block size search */ min_cost = encvid->min_cost[mbnum]; /* now perform intra prediction search */ /* need to add the check for encvid->intraSearch[video->mbNum] to skip intra if it's not worth checking. */ if (video->slice_type == AVC_P_SLICE) { /* Decide whether intra search is necessary or not */ /* This one, we do it in the encoding loop so the neighboring pixel are the actual reconstructed pixels. */ intra = IntraDecisionABE(encvid, min_cost, curL, picPitch); } if (intra == true || video->slice_type == AVC_I_SLICE) { orgY = currInput->YCbCr[0] + y_pos * orgPitch + x_pos; /* i16 mode search */ /* generate all the predictions */ intrapred_luma_16x16(encvid); /* evaluate them one by one */ find_cost_16x16(encvid, orgY, &min_cost); if (video->slice_type == AVC_P_SLICE) { /* save current inter prediction */ saved_inter = encvid->subpel_pred; /* reuse existing buffer */ j = 16; curL -= 4; picPitch -= 16; while (j--) { *saved_inter++ = *((uint32*)(curL += 4)); *saved_inter++ = *((uint32*)(curL += 4)); *saved_inter++ = *((uint32*)(curL += 4)); *saved_inter++ = *((uint32*)(curL += 4)); curL += picPitch; } } /* i4 mode search */ mb_intra4x4_search(encvid, &min_cost); encvid->min_cost[mbnum] = min_cost; /* update min_cost */ } if (currMB->mb_intra) { chroma_intra_search(encvid); /* need to set this in order for the MBInterPrediction to work!! */ oscl_memset(currMB->mvL0, 0, sizeof(int32)*16); currMB->ref_idx_L0[0] = currMB->ref_idx_L0[1] = currMB->ref_idx_L0[2] = currMB->ref_idx_L0[3] = -1; } else if (video->slice_type == AVC_P_SLICE && intra == true) { /* restore current inter prediction */ saved_inter = encvid->subpel_pred; /* reuse existing buffer */ j = 16; curL -= ((picPitch + 16) << 4); while (j--) { *((uint32*)(curL += 4)) = *saved_inter++; *((uint32*)(curL += 4)) = *saved_inter++; *((uint32*)(curL += 4)) = *saved_inter++; *((uint32*)(curL += 4)) = *saved_inter++; curL += picPitch; } } return ; } /* generate all the prediction values */ void intrapred_luma_16x16(AVCEncObject *encvid) { AVCCommonObj *video = encvid->common; AVCPictureData *currPic = video->currPic; int x_pos = (video->mb_x) << 4; int y_pos = (video->mb_y) << 4; int pitch = currPic->pitch; int offset = y_pos * pitch + x_pos; uint8 *pred, *top, *left; uint8 *curL = currPic->Sl + offset; /* point to reconstructed frame */ uint32 word1, word2, word3, word4; uint32 sum = 0; int a_16, b, c, factor_c; uint8 *comp_ref_x0, *comp_ref_x1, *comp_ref_y0, *comp_ref_y1; int H = 0, V = 0, tmp, value; int i; if (video->intraAvailB) { //get vertical prediction mode top = curL - pitch; pred = encvid->pred_i16[AVC_I16_Vertical] - 16; word1 = *((uint32*)(top)); /* read 4 bytes from top */ word2 = *((uint32*)(top + 4)); /* read 4 bytes from top */ word3 = *((uint32*)(top + 8)); /* read 4 bytes from top */ word4 = *((uint32*)(top + 12)); /* read 4 bytes from top */ for (i = 0; i < 16; i++) { *((uint32*)(pred += 16)) = word1; *((uint32*)(pred + 4)) = word2; *((uint32*)(pred + 8)) = word3; *((uint32*)(pred + 12)) = word4; } sum = word1 & 0xFF00FF; word1 = (word1 >> 8) & 0xFF00FF; sum += word1; word1 = (word2 & 0xFF00FF); sum += word1; word2 = (word2 >> 8) & 0xFF00FF; sum += word2; word1 = (word3 & 0xFF00FF); sum += word1; word3 = (word3 >> 8) & 0xFF00FF; sum += word3; word1 = (word4 & 0xFF00FF); sum += word1; word4 = (word4 >> 8) & 0xFF00FF; sum += word4; sum += (sum >> 16); sum &= 0xFFFF; if (!video->intraAvailA) { sum = (sum + 8) >> 4; } } if (video->intraAvailA) { // get horizontal mode left = curL - 1 - pitch; pred = encvid->pred_i16[AVC_I16_Horizontal] - 16; for (i = 0; i < 16; i++) { word1 = *(left += pitch); sum += word1; word1 = (word1 << 8) | word1; word1 = (word1 << 16) | word1; /* make it 4 */ *(uint32*)(pred += 16) = word1; *(uint32*)(pred + 4) = word1; *(uint32*)(pred + 8) = word1; *(uint32*)(pred + 12) = word1; } if (!video->intraAvailB) { sum = (sum + 8) >> 4; } else { sum = (sum + 16) >> 5; } } // get DC mode if (!video->intraAvailA && !video->intraAvailB) { sum = 0x80808080; } else { sum = (sum << 8) | sum; sum = (sum << 16) | sum; } pred = encvid->pred_i16[AVC_I16_DC] - 16; for (i = 0; i < 16; i++) { *((uint32*)(pred += 16)) = sum; *((uint32*)(pred + 4)) = sum; *((uint32*)(pred + 8)) = sum; *((uint32*)(pred + 12)) = sum; } // get plane mode if (video->intraAvailA && video->intraAvailB && video->intraAvailD) { pred = encvid->pred_i16[AVC_I16_Plane] - 16; comp_ref_x0 = curL - pitch + 8; comp_ref_x1 = curL - pitch + 6; comp_ref_y0 = curL - 1 + (pitch << 3); comp_ref_y1 = curL - 1 + 6 * pitch; for (i = 1; i < 8; i++) { H += i * (*comp_ref_x0++ - *comp_ref_x1--); V += i * (*comp_ref_y0 - *comp_ref_y1); comp_ref_y0 += pitch; comp_ref_y1 -= pitch; } H += i * (*comp_ref_x0++ - curL[-pitch-1]); V += i * (*comp_ref_y0 - *comp_ref_y1); a_16 = ((*(curL - pitch + 15) + *(curL - 1 + 15 * pitch)) << 4) + 16;; b = (5 * H + 32) >> 6; c = (5 * V + 32) >> 6; tmp = 0; for (i = 0; i < 16; i++) { factor_c = a_16 + c * (tmp++ - 7); factor_c -= 7 * b; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) word1 = value; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) word1 = (word1) | (value << 8); value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) word1 = (word1) | (value << 16); value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) word1 = (word1) | (value << 24); *((uint32*)(pred += 16)) = word1; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) word1 = value; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) word1 = (word1) | (value << 8); value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) word1 = (word1) | (value << 16); value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) word1 = (word1) | (value << 24); *((uint32*)(pred + 4)) = word1; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) word1 = value; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) word1 = (word1) | (value << 8); value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) word1 = (word1) | (value << 16); value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) word1 = (word1) | (value << 24); *((uint32*)(pred + 8)) = word1; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) word1 = value; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) word1 = (word1) | (value << 8); value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) word1 = (word1) | (value << 16); value = factor_c >> 5; CLIP_RESULT(value) word1 = (word1) | (value << 24); *((uint32*)(pred + 12)) = word1; } } return ; } /* evaluate each prediction mode of I16 */ void find_cost_16x16(AVCEncObject *encvid, uint8 *orgY, int *min_cost) { AVCCommonObj *video = encvid->common; AVCMacroblock *currMB = video->currMB; int cost; int org_pitch = encvid->currInput->pitch; /* evaluate vertical mode */ if (video->intraAvailB) { cost = cost_i16(orgY, org_pitch, encvid->pred_i16[AVC_I16_Vertical], *min_cost); if (cost < *min_cost) { *min_cost = cost; currMB->mbMode = AVC_I16; currMB->mb_intra = 1; currMB->i16Mode = AVC_I16_Vertical; } } /* evaluate horizontal mode */ if (video->intraAvailA) { cost = cost_i16(orgY, org_pitch, encvid->pred_i16[AVC_I16_Horizontal], *min_cost); if (cost < *min_cost) { *min_cost = cost; currMB->mbMode = AVC_I16; currMB->mb_intra = 1; currMB->i16Mode = AVC_I16_Horizontal; } } /* evaluate DC mode */ cost = cost_i16(orgY, org_pitch, encvid->pred_i16[AVC_I16_DC], *min_cost); if (cost < *min_cost) { *min_cost = cost; currMB->mbMode = AVC_I16; currMB->mb_intra = 1; currMB->i16Mode = AVC_I16_DC; } /* evaluate plane mode */ if (video->intraAvailA && video->intraAvailB && video->intraAvailD) { cost = cost_i16(orgY, org_pitch, encvid->pred_i16[AVC_I16_Plane], *min_cost); if (cost < *min_cost) { *min_cost = cost; currMB->mbMode = AVC_I16; currMB->mb_intra = 1; currMB->i16Mode = AVC_I16_Plane; } } return ; } int cost_i16(uint8 *org, int org_pitch, uint8 *pred, int min_cost) { int cost; int j, k; int16 res[256], *pres; // residue int m0, m1, m2, m3; // calculate SATD org_pitch -= 16; pres = res; // horizontal transform for (j = 0; j < 16; j++) { k = 4; while (k > 0) { m0 = org[0] - pred[0]; m3 = org[3] - pred[3]; m0 += m3; m3 = m0 - (m3 << 1); m1 = org[1] - pred[1]; m2 = org[2] - pred[2]; m1 += m2; m2 = m1 - (m2 << 1); pres[0] = m0 + m1; pres[2] = m0 - m1; pres[1] = m2 + m3; pres[3] = m3 - m2; org += 4; pres += 4; pred += 4; k--; } org += org_pitch; } /* vertical transform */ cost = 0; for (j = 0; j < 4; j++) { pres = res + (j << 6); k = 16; while (k > 0) { m0 = pres[0]; m3 = pres[3<<4]; m0 += m3; m3 = m0 - (m3 << 1); m1 = pres[1<<4]; m2 = pres[2<<4]; m1 += m2; m2 = m1 - (m2 << 1); pres[0] = m0 = m0 + m1; if (k&0x3) // only sum up non DC values. { cost += ((m0 > 0) ? m0 : -m0); } m1 = m0 - (m1 << 1); cost += ((m1 > 0) ? m1 : -m1); m3 = m2 + m3; cost += ((m3 > 0) ? m3 : -m3); m2 = m3 - (m2 << 1); cost += ((m2 > 0) ? m2 : -m2); pres++; k--; } if ((cost >> 1) > min_cost) /* early drop out */ { return (cost >> 1); } } /* Hadamard of the DC coefficient */ pres = res; k = 4; while (k > 0) { m0 = pres[0]; m3 = pres[3<<2]; m0 >>= 2; m0 += (m3 >> 2); m3 = m0 - (m3 >> 1); m1 = pres[1<<2]; m2 = pres[2<<2]; m1 >>= 2; m1 += (m2 >> 2); m2 = m1 - (m2 >> 1); pres[0] = (m0 + m1); pres[2<<2] = (m0 - m1); pres[1<<2] = (m2 + m3); pres[3<<2] = (m3 - m2); pres += (4 << 4); k--; } pres = res; k = 4; while (k > 0) { m0 = pres[0]; m3 = pres[3<<6]; m0 += m3; m3 = m0 - (m3 << 1); m1 = pres[1<<6]; m2 = pres[2<<6]; m1 += m2; m2 = m1 - (m2 << 1); m0 = m0 + m1; cost += ((m0 >= 0) ? m0 : -m0); m1 = m0 - (m1 << 1); cost += ((m1 >= 0) ? m1 : -m1); m3 = m2 + m3; cost += ((m3 >= 0) ? m3 : -m3); m2 = m3 - (m2 << 1); cost += ((m2 >= 0) ? m2 : -m2); pres += 4; if ((cost >> 1) > min_cost) /* early drop out */ { return (cost >> 1); } k--; } return (cost >> 1); } void mb_intra4x4_search(AVCEncObject *encvid, int *min_cost) { AVCCommonObj *video = encvid->common; AVCMacroblock *currMB = video->currMB; AVCPictureData *currPic = video->currPic; AVCFrameIO *currInput = encvid->currInput; int pitch = currPic->pitch; int org_pitch = currInput->pitch; int offset; uint8 *curL, *comp, *org4, *org8; int y = video->mb_y << 4; int x = video->mb_x << 4; int b8, b4, cost4x4, blkidx; int cost = 0; int numcoef; int dummy = 0; int mb_intra = currMB->mb_intra; // save the original value offset = y * pitch + x; curL = currPic->Sl + offset; org8 = currInput->YCbCr[0] + y * org_pitch + x; video->pred_pitch = 4; cost = (int)(6.0 * encvid->lambda_mode + 0.4999); cost <<= 2; currMB->mb_intra = 1; // temporary set this to one to enable the IDCT // operation inside dct_luma for (b8 = 0; b8 < 4; b8++) { comp = curL; org4 = org8; for (b4 = 0; b4 < 4; b4++) { blkidx = blkIdx2blkXY[b8][b4]; cost4x4 = blk_intra4x4_search(encvid, blkidx, comp, org4); cost += cost4x4; if (cost > *min_cost) { currMB->mb_intra = mb_intra; // restore the value return ; } /* do residue, Xfrm, Q, invQ, invXfrm, recon and save the DCT coefs.*/ video->pred_block = encvid->pred_i4[currMB->i4Mode[blkidx]]; numcoef = dct_luma(encvid, blkidx, comp, org4, &dummy); currMB->nz_coeff[blkidx] = numcoef; if (numcoef) { video->cbp4x4 |= (1 << blkidx); currMB->CBP |= (1 << b8); } if (b4&1) { comp += ((pitch << 2) - 4); org4 += ((org_pitch << 2) - 4); } else { comp += 4; org4 += 4; } } if (b8&1) { curL += ((pitch << 3) - 8); org8 += ((org_pitch << 3) - 8); } else { curL += 8; org8 += 8; } } currMB->mb_intra = mb_intra; // restore the value if (cost < *min_cost) { *min_cost = cost; currMB->mbMode = AVC_I4; currMB->mb_intra = 1; } return ; } /* search for i4 mode for a 4x4 block */ int blk_intra4x4_search(AVCEncObject *encvid, int blkidx, uint8 *cur, uint8 *org) { AVCCommonObj *video = encvid->common; AVCNeighborAvailability availability; AVCMacroblock *currMB = video->currMB; bool top_left = FALSE; int pitch = video->currPic->pitch; uint8 mode_avail[AVCNumI4PredMode]; uint32 temp, DC; uint8 *pred; int org_pitch = encvid->currInput->pitch; uint16 min_cost, cost; int P_x, Q_x, R_x, P_y, Q_y, R_y, D, D0, D1; int P0, Q0, R0, S0, P1, Q1, R1, P2, Q2; uint8 P_A, P_B, P_C, P_D, P_E, P_F, P_G, P_H, P_I, P_J, P_K, P_L, P_X; int r0, r1, r2, r3, r4, r5, r6, r7; int x0, x1, x2, x3, x4, x5; uint32 temp1, temp2; int ipmode, mostProbableMode; int fixedcost = 4 * encvid->lambda_mode; int min_sad = 0x7FFF; availability.left = TRUE; availability.top = TRUE; if (blkidx <= 3) /* top row block (!block_y) */ { /* check availability up */ availability.top = video->intraAvailB ; } if (!(blkidx&0x3)) /* left column block (!block_x)*/ { /* check availability left */ availability.left = video->intraAvailA ; } availability.top_right = BlkTopRight[blkidx]; if (availability.top_right == 2) { availability.top_right = video->intraAvailB; } else if (availability.top_right == 3) { availability.top_right = video->intraAvailC; } if (availability.top == TRUE) { temp = *(uint32*)(cur - pitch); P_A = temp & 0xFF; P_B = (temp >> 8) & 0xFF; P_C = (temp >> 16) & 0xFF; P_D = (temp >> 24) & 0xFF; } else { P_A = P_B = P_C = P_D = 128; } if (availability.top_right == TRUE) { temp = *(uint32*)(cur - pitch + 4); P_E = temp & 0xFF; P_F = (temp >> 8) & 0xFF; P_G = (temp >> 16) & 0xFF; P_H = (temp >> 24) & 0xFF; } else { P_E = P_F = P_G = P_H = 128; } if (availability.left == TRUE) { cur--; P_I = *cur; P_J = *(cur += pitch); P_K = *(cur += pitch); P_L = *(cur + pitch); cur -= (pitch << 1); cur++; } else { P_I = P_J = P_K = P_L = 128; } /* check if top-left pixel is available */ if (((blkidx > 3) && (blkidx&0x3)) || ((blkidx > 3) && video->intraAvailA) || ((blkidx&0x3) && video->intraAvailB) || (video->intraAvailA && video->intraAvailD && video->intraAvailB)) { top_left = TRUE; P_X = *(cur - pitch - 1); } else { P_X = 128; } //===== INTRA PREDICTION FOR 4x4 BLOCK ===== /* vertical */ mode_avail[AVC_I4_Vertical] = 0; if (availability.top) { mode_avail[AVC_I4_Vertical] = 1; pred = encvid->pred_i4[AVC_I4_Vertical]; temp = (P_D << 24) | (P_C << 16) | (P_B << 8) | P_A ; *((uint32*)pred) = temp; /* write 4 at a time */ *((uint32*)(pred += 4)) = temp; *((uint32*)(pred += 4)) = temp; *((uint32*)(pred += 4)) = temp; } /* horizontal */ mode_avail[AVC_I4_Horizontal] = 0; mode_avail[AVC_I4_Horizontal_Up] = 0; if (availability.left) { mode_avail[AVC_I4_Horizontal] = 1; pred = encvid->pred_i4[AVC_I4_Horizontal]; temp = P_I | (P_I << 8); temp = temp | (temp << 16); *((uint32*)pred) = temp; temp = P_J | (P_J << 8); temp = temp | (temp << 16); *((uint32*)(pred += 4)) = temp; temp = P_K | (P_K << 8); temp = temp | (temp << 16); *((uint32*)(pred += 4)) = temp; temp = P_L | (P_L << 8); temp = temp | (temp << 16); *((uint32*)(pred += 4)) = temp; mode_avail[AVC_I4_Horizontal_Up] = 1; pred = encvid->pred_i4[AVC_I4_Horizontal_Up]; Q0 = (P_J + P_K + 1) >> 1; Q1 = (P_J + (P_K << 1) + P_L + 2) >> 2; P0 = ((P_I + P_J + 1) >> 1); P1 = ((P_I + (P_J << 1) + P_K + 2) >> 2); temp = P0 | (P1 << 8); // [P0 P1 Q0 Q1] temp |= (Q0 << 16); // [Q0 Q1 R0 DO] temp |= (Q1 << 24); // [R0 D0 D1 D1] *((uint32*)pred) = temp; // [D1 D1 D1 D1] D0 = (P_K + 3 * P_L + 2) >> 2; R0 = (P_K + P_L + 1) >> 1; temp = Q0 | (Q1 << 8); temp |= (R0 << 16); temp |= (D0 << 24); *((uint32*)(pred += 4)) = temp; D1 = P_L; temp = R0 | (D0 << 8); temp |= (D1 << 16); temp |= (D1 << 24); *((uint32*)(pred += 4)) = temp; temp = D1 | (D1 << 8); temp |= (temp << 16); *((uint32*)(pred += 4)) = temp; } /* DC */ mode_avail[AVC_I4_DC] = 1; pred = encvid->pred_i4[AVC_I4_DC]; if (availability.left) { DC = P_I + P_J + P_K + P_L; if (availability.top) { DC = (P_A + P_B + P_C + P_D + DC + 4) >> 3; } else { DC = (DC + 2) >> 2; } } else if (availability.top) { DC = (P_A + P_B + P_C + P_D + 2) >> 2; } else { DC = 128; } temp = DC | (DC << 8); temp = temp | (temp << 16); *((uint32*)pred) = temp; *((uint32*)(pred += 4)) = temp; *((uint32*)(pred += 4)) = temp; *((uint32*)(pred += 4)) = temp; /* Down-left */ mode_avail[AVC_I4_Diagonal_Down_Left] = 0; if (availability.top) { mode_avail[AVC_I4_Diagonal_Down_Left] = 1; pred = encvid->pred_i4[AVC_I4_Diagonal_Down_Left]; r0 = P_A; r1 = P_B; r2 = P_C; r3 = P_D; r0 += (r1 << 1); r0 += r2; r0 += 2; r0 >>= 2; r1 += (r2 << 1); r1 += r3; r1 += 2; r1 >>= 2; if (availability.top_right) { r4 = P_E; r5 = P_F; r6 = P_G; r7 = P_H; r2 += (r3 << 1); r2 += r4; r2 += 2; r2 >>= 2; r3 += (r4 << 1); r3 += r5; r3 += 2; r3 >>= 2; r4 += (r5 << 1); r4 += r6; r4 += 2; r4 >>= 2; r5 += (r6 << 1); r5 += r7; r5 += 2; r5 >>= 2; r6 += (3 * r7); r6 += 2; r6 >>= 2; temp = r0 | (r1 << 8); temp |= (r2 << 16); temp |= (r3 << 24); *((uint32*)pred) = temp; temp = (temp >> 8) | (r4 << 24); *((uint32*)(pred += 4)) = temp; temp = (temp >> 8) | (r5 << 24); *((uint32*)(pred += 4)) = temp; temp = (temp >> 8) | (r6 << 24); *((uint32*)(pred += 4)) = temp; } else { r2 += (r3 * 3); r2 += 2; r2 >>= 2; r3 = ((r3 << 2) + 2); r3 >>= 2; temp = r0 | (r1 << 8); temp |= (r2 << 16); temp |= (r3 << 24); *((uint32*)pred) = temp; temp = (temp >> 8) | (r3 << 24); *((uint32*)(pred += 4)) = temp; temp = (temp >> 8) | (r3 << 24); *((uint32*)(pred += 4)) = temp; temp = (temp >> 8) | (r3 << 24); *((uint32*)(pred += 4)) = temp; } } /* Down Right */ mode_avail[AVC_I4_Diagonal_Down_Right] = 0; /* Diagonal Vertical Right */ mode_avail[AVC_I4_Vertical_Right] = 0; /* Horizontal Down */ mode_avail[AVC_I4_Horizontal_Down] = 0; if (top_left == TRUE) { /* Down Right */ mode_avail[AVC_I4_Diagonal_Down_Right] = 1; pred = encvid->pred_i4[AVC_I4_Diagonal_Down_Right]; Q_x = (P_A + 2 * P_B + P_C + 2) >> 2; R_x = (P_B + 2 * P_C + P_D + 2) >> 2; P_x = (P_X + 2 * P_A + P_B + 2) >> 2; D = (P_A + 2 * P_X + P_I + 2) >> 2; P_y = (P_X + 2 * P_I + P_J + 2) >> 2; Q_y = (P_I + 2 * P_J + P_K + 2) >> 2; R_y = (P_J + 2 * P_K + P_L + 2) >> 2; /* we can pack these */ temp = D | (P_x << 8); //[D P_x Q_x R_x] //[P_y D P_x Q_x] temp |= (Q_x << 16); //[Q_y P_y D P_x] temp |= (R_x << 24); //[R_y Q_y P_y D ] *((uint32*)pred) = temp; temp = P_y | (D << 8); temp |= (P_x << 16); temp |= (Q_x << 24); *((uint32*)(pred += 4)) = temp; temp = Q_y | (P_y << 8); temp |= (D << 16); temp |= (P_x << 24); *((uint32*)(pred += 4)) = temp; temp = R_y | (Q_y << 8); temp |= (P_y << 16); temp |= (D << 24); *((uint32*)(pred += 4)) = temp; /* Diagonal Vertical Right */ mode_avail[AVC_I4_Vertical_Right] = 1; pred = encvid->pred_i4[AVC_I4_Vertical_Right]; Q0 = P_A + P_B + 1; R0 = P_B + P_C + 1; S0 = P_C + P_D + 1; P0 = P_X + P_A + 1; D = (P_I + 2 * P_X + P_A + 2) >> 2; P1 = (P0 + Q0) >> 2; Q1 = (Q0 + R0) >> 2; R1 = (R0 + S0) >> 2; P0 >>= 1; Q0 >>= 1; R0 >>= 1; S0 >>= 1; P2 = (P_X + 2 * P_I + P_J + 2) >> 2; Q2 = (P_I + 2 * P_J + P_K + 2) >> 2; temp = P0 | (Q0 << 8); //[P0 Q0 R0 S0] //[D P1 Q1 R1] temp |= (R0 << 16); //[P2 P0 Q0 R0] temp |= (S0 << 24); //[Q2 D P1 Q1] *((uint32*)pred) = temp; temp = D | (P1 << 8); temp |= (Q1 << 16); temp |= (R1 << 24); *((uint32*)(pred += 4)) = temp; temp = P2 | (P0 << 8); temp |= (Q0 << 16); temp |= (R0 << 24); *((uint32*)(pred += 4)) = temp; temp = Q2 | (D << 8); temp |= (P1 << 16); temp |= (Q1 << 24); *((uint32*)(pred += 4)) = temp; /* Horizontal Down */ mode_avail[AVC_I4_Horizontal_Down] = 1; pred = encvid->pred_i4[AVC_I4_Horizontal_Down]; Q2 = (P_A + 2 * P_B + P_C + 2) >> 2; P2 = (P_X + 2 * P_A + P_B + 2) >> 2; D = (P_I + 2 * P_X + P_A + 2) >> 2; P0 = P_X + P_I + 1; Q0 = P_I + P_J + 1; R0 = P_J + P_K + 1; S0 = P_K + P_L + 1; P1 = (P0 + Q0) >> 2; Q1 = (Q0 + R0) >> 2; R1 = (R0 + S0) >> 2; P0 >>= 1; Q0 >>= 1; R0 >>= 1; S0 >>= 1; /* we can pack these */ temp = P0 | (D << 8); //[P0 D P2 Q2] //[Q0 P1 P0 D ] temp |= (P2 << 16); //[R0 Q1 Q0 P1] temp |= (Q2 << 24); //[S0 R1 R0 Q1] *((uint32*)pred) = temp; temp = Q0 | (P1 << 8); temp |= (P0 << 16); temp |= (D << 24); *((uint32*)(pred += 4)) = temp; temp = R0 | (Q1 << 8); temp |= (Q0 << 16); temp |= (P1 << 24); *((uint32*)(pred += 4)) = temp; temp = S0 | (R1 << 8); temp |= (R0 << 16); temp |= (Q1 << 24); *((uint32*)(pred += 4)) = temp; } /* vertical left */ mode_avail[AVC_I4_Vertical_Left] = 0; if (availability.top) { mode_avail[AVC_I4_Vertical_Left] = 1; pred = encvid->pred_i4[AVC_I4_Vertical_Left]; x0 = P_A + P_B + 1; x1 = P_B + P_C + 1; x2 = P_C + P_D + 1; if (availability.top_right) { x3 = P_D + P_E + 1; x4 = P_E + P_F + 1; x5 = P_F + P_G + 1; } else { x3 = x4 = x5 = (P_D << 1) + 1; } temp1 = (x0 >> 1); temp1 |= ((x1 >> 1) << 8); temp1 |= ((x2 >> 1) << 16); temp1 |= ((x3 >> 1) << 24); *((uint32*)pred) = temp1; temp2 = ((x0 + x1) >> 2); temp2 |= (((x1 + x2) >> 2) << 8); temp2 |= (((x2 + x3) >> 2) << 16); temp2 |= (((x3 + x4) >> 2) << 24); *((uint32*)(pred += 4)) = temp2; temp1 = (temp1 >> 8) | ((x4 >> 1) << 24); /* rotate out old value */ *((uint32*)(pred += 4)) = temp1; temp2 = (temp2 >> 8) | (((x4 + x5) >> 2) << 24); /* rotate out old value */ *((uint32*)(pred += 4)) = temp2; } //===== LOOP OVER ALL 4x4 INTRA PREDICTION MODES ===== // can re-order the search here instead of going in order // find most probable mode encvid->mostProbableI4Mode[blkidx] = mostProbableMode = FindMostProbableI4Mode(video, blkidx); min_cost = 0xFFFF; for (ipmode = 0; ipmode < AVCNumI4PredMode; ipmode++) { if (mode_avail[ipmode] == TRUE) { cost = (ipmode == mostProbableMode) ? 0 : fixedcost; pred = encvid->pred_i4[ipmode]; cost_i4(org, org_pitch, pred, &cost); if (cost < min_cost) { currMB->i4Mode[blkidx] = (AVCIntra4x4PredMode)ipmode; min_cost = cost; min_sad = cost - ((ipmode == mostProbableMode) ? 0 : fixedcost); } } } if (blkidx == 0) { encvid->i4_sad = min_sad; } else { encvid->i4_sad += min_sad; } return min_cost; } int FindMostProbableI4Mode(AVCCommonObj *video, int blkidx) { int dcOnlyPredictionFlag; AVCMacroblock *currMB = video->currMB; int intra4x4PredModeA, intra4x4PredModeB, predIntra4x4PredMode; dcOnlyPredictionFlag = 0; if (blkidx&0x3) { intra4x4PredModeA = currMB->i4Mode[blkidx-1]; // block to the left } else /* for blk 0, 4, 8, 12 */ { if (video->intraAvailA) { if (video->mblock[video->mbAddrA].mbMode == AVC_I4) { intra4x4PredModeA = video->mblock[video->mbAddrA].i4Mode[blkidx + 3]; } else { intra4x4PredModeA = AVC_I4_DC; } } else { dcOnlyPredictionFlag = 1; goto PRED_RESULT_READY; // skip below } } if (blkidx >> 2) { intra4x4PredModeB = currMB->i4Mode[blkidx-4]; // block above } else /* block 0, 1, 2, 3 */ { if (video->intraAvailB) { if (video->mblock[video->mbAddrB].mbMode == AVC_I4) { intra4x4PredModeB = video->mblock[video->mbAddrB].i4Mode[blkidx+12]; } else { intra4x4PredModeB = AVC_I4_DC; } } else { dcOnlyPredictionFlag = 1; } } PRED_RESULT_READY: if (dcOnlyPredictionFlag) { intra4x4PredModeA = intra4x4PredModeB = AVC_I4_DC; } predIntra4x4PredMode = AVC_MIN(intra4x4PredModeA, intra4x4PredModeB); return predIntra4x4PredMode; } void cost_i4(uint8 *org, int org_pitch, uint8 *pred, uint16 *cost) { int k; int16 res[16], *pres; int m0, m1, m2, m3, tmp1; int satd = 0; pres = res; // horizontal transform k = 4; while (k > 0) { m0 = org[0] - pred[0]; m3 = org[3] - pred[3]; m0 += m3; m3 = m0 - (m3 << 1); m1 = org[1] - pred[1]; m2 = org[2] - pred[2]; m1 += m2; m2 = m1 - (m2 << 1); pres[0] = m0 + m1; pres[2] = m0 - m1; pres[1] = m2 + m3; pres[3] = m3 - m2; org += org_pitch; pres += 4; pred += 4; k--; } /* vertical transform */ pres = res; k = 4; while (k > 0) { m0 = pres[0]; m3 = pres[12]; m0 += m3; m3 = m0 - (m3 << 1); m1 = pres[4]; m2 = pres[8]; m1 += m2; m2 = m1 - (m2 << 1); pres[0] = m0 + m1; pres[8] = m0 - m1; pres[4] = m2 + m3; pres[12] = m3 - m2; pres++; k--; } pres = res; k = 4; while (k > 0) { tmp1 = *pres++; satd += ((tmp1 >= 0) ? tmp1 : -tmp1); tmp1 = *pres++; satd += ((tmp1 >= 0) ? tmp1 : -tmp1); tmp1 = *pres++; satd += ((tmp1 >= 0) ? tmp1 : -tmp1); tmp1 = *pres++; satd += ((tmp1 >= 0) ? tmp1 : -tmp1); k--; } satd = (satd + 1) >> 1; *cost += satd; return ; } void chroma_intra_search(AVCEncObject *encvid) { AVCCommonObj *video = encvid->common; AVCPictureData *currPic = video->currPic; int x_pos = video->mb_x << 3; int y_pos = video->mb_y << 3; int pitch = currPic->pitch >> 1; int offset = y_pos * pitch + x_pos; uint8 *comp_ref_x, *comp_ref_y, *pred; int sum_x0, sum_x1, sum_y0, sum_y1; int pred_0[2], pred_1[2], pred_2[2], pred_3[2]; uint32 pred_a, pred_b, pred_c, pred_d; int i, j, component; int a_16, b, c, factor_c, topleft; int H, V, value; uint8 *comp_ref_x0, *comp_ref_x1, *comp_ref_y0, *comp_ref_y1; uint8 *curCb = currPic->Scb + offset; uint8 *curCr = currPic->Scr + offset; uint8 *orgCb, *orgCr; AVCFrameIO *currInput = encvid->currInput; AVCMacroblock *currMB = video->currMB; int org_pitch; int cost, mincost; /* evaluate DC mode */ if (video->intraAvailB & video->intraAvailA) { comp_ref_x = curCb - pitch; comp_ref_y = curCb - 1; for (i = 0; i < 2; i++) { pred_a = *((uint32*)comp_ref_x); comp_ref_x += 4; pred_b = (pred_a >> 8) & 0xFF00FF; pred_a &= 0xFF00FF; pred_a += pred_b; pred_a += (pred_a >> 16); sum_x0 = pred_a & 0xFFFF; pred_a = *((uint32*)comp_ref_x); pred_b = (pred_a >> 8) & 0xFF00FF; pred_a &= 0xFF00FF; pred_a += pred_b; pred_a += (pred_a >> 16); sum_x1 = pred_a & 0xFFFF; pred_1[i] = (sum_x1 + 2) >> 2; sum_y0 = *comp_ref_y; sum_y0 += *(comp_ref_y += pitch); sum_y0 += *(comp_ref_y += pitch); sum_y0 += *(comp_ref_y += pitch); sum_y1 = *(comp_ref_y += pitch); sum_y1 += *(comp_ref_y += pitch); sum_y1 += *(comp_ref_y += pitch); sum_y1 += *(comp_ref_y += pitch); pred_2[i] = (sum_y1 + 2) >> 2; pred_0[i] = (sum_y0 + sum_x0 + 4) >> 3; pred_3[i] = (sum_y1 + sum_x1 + 4) >> 3; comp_ref_x = curCr - pitch; comp_ref_y = curCr - 1; } } else if (video->intraAvailA) { comp_ref_y = curCb - 1; for (i = 0; i < 2; i++) { sum_y0 = *comp_ref_y; sum_y0 += *(comp_ref_y += pitch); sum_y0 += *(comp_ref_y += pitch); sum_y0 += *(comp_ref_y += pitch); sum_y1 = *(comp_ref_y += pitch); sum_y1 += *(comp_ref_y += pitch); sum_y1 += *(comp_ref_y += pitch); sum_y1 += *(comp_ref_y += pitch); pred_0[i] = pred_1[i] = (sum_y0 + 2) >> 2; pred_2[i] = pred_3[i] = (sum_y1 + 2) >> 2; comp_ref_y = curCr - 1; } } else if (video->intraAvailB) { comp_ref_x = curCb - pitch; for (i = 0; i < 2; i++) { pred_a = *((uint32*)comp_ref_x); comp_ref_x += 4; pred_b = (pred_a >> 8) & 0xFF00FF; pred_a &= 0xFF00FF; pred_a += pred_b; pred_a += (pred_a >> 16); sum_x0 = pred_a & 0xFFFF; pred_a = *((uint32*)comp_ref_x); pred_b = (pred_a >> 8) & 0xFF00FF; pred_a &= 0xFF00FF; pred_a += pred_b; pred_a += (pred_a >> 16); sum_x1 = pred_a & 0xFFFF; pred_0[i] = pred_2[i] = (sum_x0 + 2) >> 2; pred_1[i] = pred_3[i] = (sum_x1 + 2) >> 2; comp_ref_x = curCr - pitch; } } else { pred_0[0] = pred_0[1] = pred_1[0] = pred_1[1] = pred_2[0] = pred_2[1] = pred_3[0] = pred_3[1] = 128; } pred = encvid->pred_ic[AVC_IC_DC]; pred_a = pred_0[0]; pred_b = pred_1[0]; pred_a |= (pred_a << 8); pred_a |= (pred_a << 16); pred_b |= (pred_b << 8); pred_b |= (pred_b << 16); pred_c = pred_0[1]; pred_d = pred_1[1]; pred_c |= (pred_c << 8); pred_c |= (pred_c << 16); pred_d |= (pred_d << 8); pred_d |= (pred_d << 16); for (j = 0; j < 4; j++) /* 4 lines */ { *((uint32*)pred) = pred_a; *((uint32*)(pred + 4)) = pred_b; *((uint32*)(pred + 8)) = pred_c; *((uint32*)(pred + 12)) = pred_d; pred += 16; /* move to the next line */ } pred_a = pred_2[0]; pred_b = pred_3[0]; pred_a |= (pred_a << 8); pred_a |= (pred_a << 16); pred_b |= (pred_b << 8); pred_b |= (pred_b << 16); pred_c = pred_2[1]; pred_d = pred_3[1]; pred_c |= (pred_c << 8); pred_c |= (pred_c << 16); pred_d |= (pred_d << 8); pred_d |= (pred_d << 16); for (j = 0; j < 4; j++) /* 4 lines */ { *((uint32*)pred) = pred_a; *((uint32*)(pred + 4)) = pred_b; *((uint32*)(pred + 8)) = pred_c; *((uint32*)(pred + 12)) = pred_d; pred += 16; /* move to the next line */ } /* predict horizontal mode */ if (video->intraAvailA) { comp_ref_y = curCb - 1; comp_ref_x = curCr - 1; pred = encvid->pred_ic[AVC_IC_Horizontal]; for (i = 4; i < 6; i++) { for (j = 0; j < 4; j++) { pred_a = *comp_ref_y; comp_ref_y += pitch; pred_a |= (pred_a << 8); pred_a |= (pred_a << 16); *((uint32*)pred) = pred_a; *((uint32*)(pred + 4)) = pred_a; pred_a = *comp_ref_x; comp_ref_x += pitch; pred_a |= (pred_a << 8); pred_a |= (pred_a << 16); *((uint32*)(pred + 8)) = pred_a; *((uint32*)(pred + 12)) = pred_a; pred += 16; } } } /* vertical mode */ if (video->intraAvailB) { comp_ref_x = curCb - pitch; comp_ref_y = curCr - pitch; pred = encvid->pred_ic[AVC_IC_Vertical]; pred_a = *((uint32*)comp_ref_x); pred_b = *((uint32*)(comp_ref_x + 4)); pred_c = *((uint32*)comp_ref_y); pred_d = *((uint32*)(comp_ref_y + 4)); for (j = 0; j < 8; j++) { *((uint32*)pred) = pred_a; *((uint32*)(pred + 4)) = pred_b; *((uint32*)(pred + 8)) = pred_c; *((uint32*)(pred + 12)) = pred_d; pred += 16; } } /* Intra_Chroma_Plane */ if (video->intraAvailA && video->intraAvailB && video->intraAvailD) { comp_ref_x = curCb - pitch; comp_ref_y = curCb - 1; topleft = curCb[-pitch-1]; pred = encvid->pred_ic[AVC_IC_Plane]; for (component = 0; component < 2; component++) { H = V = 0; comp_ref_x0 = comp_ref_x + 4; comp_ref_x1 = comp_ref_x + 2; comp_ref_y0 = comp_ref_y + (pitch << 2); comp_ref_y1 = comp_ref_y + (pitch << 1); for (i = 1; i < 4; i++) { H += i * (*comp_ref_x0++ - *comp_ref_x1--); V += i * (*comp_ref_y0 - *comp_ref_y1); comp_ref_y0 += pitch; comp_ref_y1 -= pitch; } H += i * (*comp_ref_x0++ - topleft); V += i * (*comp_ref_y0 - *comp_ref_y1); a_16 = ((*(comp_ref_x + 7) + *(comp_ref_y + 7 * pitch)) << 4) + 16; b = (17 * H + 16) >> 5; c = (17 * V + 16) >> 5; pred_a = 0; for (i = 4; i < 6; i++) { for (j = 0; j < 4; j++) { factor_c = a_16 + c * (pred_a++ - 3); factor_c -= 3 * b; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) pred_b = value; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) pred_b |= (value << 8); value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) pred_b |= (value << 16); value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) pred_b |= (value << 24); *((uint32*)pred) = pred_b; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) pred_b = value; value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) pred_b |= (value << 8); value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) pred_b |= (value << 16); value = factor_c >> 5; factor_c += b; CLIP_RESULT(value) pred_b |= (value << 24); *((uint32*)(pred + 4)) = pred_b; pred += 16; } } pred -= 120; /* point to cr */ comp_ref_x = curCr - pitch; comp_ref_y = curCr - 1; topleft = curCr[-pitch-1]; } } /* now evaluate it */ org_pitch = (currInput->pitch) >> 1; offset = x_pos + y_pos * org_pitch; orgCb = currInput->YCbCr[1] + offset; orgCr = currInput->YCbCr[2] + offset; mincost = 0x7fffffff; cost = SATDChroma(orgCb, orgCr, org_pitch, encvid->pred_ic[AVC_IC_DC], mincost); if (cost < mincost) { mincost = cost; currMB->intra_chroma_pred_mode = AVC_IC_DC; } if (video->intraAvailA) { cost = SATDChroma(orgCb, orgCr, org_pitch, encvid->pred_ic[AVC_IC_Horizontal], mincost); if (cost < mincost) { mincost = cost; currMB->intra_chroma_pred_mode = AVC_IC_Horizontal; } } if (video->intraAvailB) { cost = SATDChroma(orgCb, orgCr, org_pitch, encvid->pred_ic[AVC_IC_Vertical], mincost); if (cost < mincost) { mincost = cost; currMB->intra_chroma_pred_mode = AVC_IC_Vertical; } } if (video->intraAvailA && video->intraAvailB && video->intraAvailD) { cost = SATDChroma(orgCb, orgCr, org_pitch, encvid->pred_ic[AVC_IC_Plane], mincost); if (cost < mincost) { mincost = cost; currMB->intra_chroma_pred_mode = AVC_IC_Plane; } } return ; } int SATDChroma(uint8 *orgCb, uint8 *orgCr, int org_pitch, uint8 *pred, int min_cost) { int cost; /* first take difference between orgCb, orgCr and pred */ int16 res[128], *pres; // residue int m0, m1, m2, m3, tmp1; int j, k; pres = res; org_pitch -= 8; // horizontal transform for (j = 0; j < 8; j++) { k = 2; while (k > 0) { m0 = orgCb[0] - pred[0]; m3 = orgCb[3] - pred[3]; m0 += m3; m3 = m0 - (m3 << 1); m1 = orgCb[1] - pred[1]; m2 = orgCb[2] - pred[2]; m1 += m2; m2 = m1 - (m2 << 1); pres[0] = m0 + m1; pres[2] = m0 - m1; pres[1] = m2 + m3; pres[3] = m3 - m2; orgCb += 4; pres += 4; pred += 4; k--; } orgCb += org_pitch; k = 2; while (k > 0) { m0 = orgCr[0] - pred[0]; m3 = orgCr[3] - pred[3]; m0 += m3; m3 = m0 - (m3 << 1); m1 = orgCr[1] - pred[1]; m2 = orgCr[2] - pred[2]; m1 += m2; m2 = m1 - (m2 << 1); pres[0] = m0 + m1; pres[2] = m0 - m1; pres[1] = m2 + m3; pres[3] = m3 - m2; orgCr += 4; pres += 4; pred += 4; k--; } orgCr += org_pitch; } /* vertical transform */ for (j = 0; j < 2; j++) { pres = res + (j << 6); k = 16; while (k > 0) { m0 = pres[0]; m3 = pres[3<<4]; m0 += m3; m3 = m0 - (m3 << 1); m1 = pres[1<<4]; m2 = pres[2<<4]; m1 += m2; m2 = m1 - (m2 << 1); pres[0] = m0 + m1; pres[2<<4] = m0 - m1; pres[1<<4] = m2 + m3; pres[3<<4] = m3 - m2; pres++; k--; } } /* now sum of absolute value */ pres = res; cost = 0; k = 128; while (k > 0) { tmp1 = *pres++; cost += ((tmp1 >= 0) ? tmp1 : -tmp1); tmp1 = *pres++; cost += ((tmp1 >= 0) ? tmp1 : -tmp1); tmp1 = *pres++; cost += ((tmp1 >= 0) ? tmp1 : -tmp1); tmp1 = *pres++; cost += ((tmp1 >= 0) ? tmp1 : -tmp1); tmp1 = *pres++; cost += ((tmp1 >= 0) ? tmp1 : -tmp1); tmp1 = *pres++; cost += ((tmp1 >= 0) ? tmp1 : -tmp1); tmp1 = *pres++; cost += ((tmp1 >= 0) ? tmp1 : -tmp1); tmp1 = *pres++; cost += ((tmp1 >= 0) ? tmp1 : -tmp1); k -= 8; if (cost > min_cost) /* early drop out */ { return cost; } } return cost; } ///////////////////////////////// old code, unused /* find the best intra mode based on original (unencoded) frame */ /* output is currMB->mb_intra, currMB->mbMode, currMB->i16Mode (if currMB->mbMode == AVC_I16) currMB->i4Mode[..] (if currMB->mbMode == AVC_I4) */ #ifdef FIXED_INTRAPRED_MODE void MBIntraSearch(AVCEncObject *encvid, AVCMacroblock *currMB, int mbNum) { (void)(mbNum); AVCCommonObj *video = encvid->common; int indx, block_x, block_y; video->intraAvailA = video->intraAvailB = video->intraAvailC = video->intraAvailD = 0; if (!video->currPicParams->constrained_intra_pred_flag) { video->intraAvailA = video->mbAvailA; video->intraAvailB = video->mbAvailB; video->intraAvailC = video->mbAvailC; video->intraAvailD = video->mbAvailD; } else { if (video->mbAvailA) { video->intraAvailA = video->mblock[video->mbAddrA].mb_intra; } if (video->mbAvailB) { video->intraAvailB = video->mblock[video->mbAddrB].mb_intra ; } if (video->mbAvailC) { video->intraAvailC = video->mblock[video->mbAddrC].mb_intra; } if (video->mbAvailD) { video->intraAvailD = video->mblock[video->mbAddrD].mb_intra; } } currMB->mb_intra = TRUE; currMB->mbMode = FIXED_INTRAPRED_MODE; if (currMB->mbMode == AVC_I16) { currMB->i16Mode = FIXED_I16_MODE; if (FIXED_I16_MODE == AVC_I16_Vertical && !video->intraAvailB) { currMB->i16Mode = AVC_I16_DC; } if (FIXED_I16_MODE == AVC_I16_Horizontal && !video->intraAvailA) { currMB->i16Mode = AVC_I16_DC; } if (FIXED_I16_MODE == AVC_I16_Plane && !(video->intraAvailA && video->intraAvailB && video->intraAvailD)) { currMB->i16Mode = AVC_I16_DC; } } else //if(currMB->mbMode == AVC_I4) { for (indx = 0; indx < 16; indx++) { block_x = blkIdx2blkX[indx]; block_y = blkIdx2blkY[indx]; currMB->i4Mode[(block_y<<2)+block_x] = FIXED_I4_MODE; if (FIXED_I4_MODE == AVC_I4_Vertical && !(block_y > 0 || video->intraAvailB)) { currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC; } if (FIXED_I4_MODE == AVC_I4_Horizontal && !(block_x || video->intraAvailA)) { currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC; } if (FIXED_I4_MODE == AVC_I4_Diagonal_Down_Left && (block_y == 0 && !video->intraAvailB)) { currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC; } if (FIXED_I4_MODE == AVC_I4_Diagonal_Down_Right && !((block_y && block_x) || (block_y && video->intraAvailA) || (block_x && video->intraAvailB) || (video->intraAvailA && video->intraAvailD && video->intraAvailB))) { currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC; } if (FIXED_I4_MODE == AVC_I4_Vertical_Right && !((block_y && block_x) || (block_y && video->intraAvailA) || (block_x && video->intraAvailB) || (video->intraAvailA && video->intraAvailD && video->intraAvailB))) { currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC; } if (FIXED_I4_MODE == AVC_I4_Horizontal_Down && !((block_y && block_x) || (block_y && video->intraAvailA) || (block_x && video->intraAvailB) || (video->intraAvailA && video->intraAvailD && video->intraAvailB))) { currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC; } if (FIXED_I4_MODE == AVC_I4_Vertical_Left && (block_y == 0 && !video->intraAvailB)) { currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC; } if (FIXED_I4_MODE == AVC_I4_Horizontal_Up && !(block_x || video->intraAvailA)) { currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC; } } } currMB->intra_chroma_pred_mode = FIXED_INTRA_CHROMA_MODE; if (FIXED_INTRA_CHROMA_MODE == AVC_IC_Horizontal && !(video->intraAvailA)) { currMB->intra_chroma_pred_mode = AVC_IC_DC; } if (FIXED_INTRA_CHROMA_MODE == AVC_IC_Vertical && !(video->intraAvailB)) { currMB->intra_chroma_pred_mode = AVC_IC_DC; } if (FIXED_INTRA_CHROMA_MODE == AVC_IC_Plane && !(video->intraAvailA && video->intraAvailB && video->intraAvailD)) { currMB->intra_chroma_pred_mode = AVC_IC_DC; } /* also reset the motion vectors */ /* set MV and Ref_Idx codes of Intra blocks in P-slices */ oscl_memset(currMB->mvL0, 0, sizeof(int32)*16); currMB->ref_idx_L0[0] = -1; currMB->ref_idx_L0[1] = -1; currMB->ref_idx_L0[2] = -1; currMB->ref_idx_L0[3] = -1; // output from this function, currMB->mbMode should be set to either // AVC_I4, AVC_I16, or else in AVCMBMode enum, mbType, mb_intra, intra_chroma_pred_mode */ return ; } #else // faster combined prediction+SAD calculation void MBIntraSearch(AVCEncObject *encvid, AVCMacroblock *currMB, int mbNum) { AVCCommonObj *video = encvid->common; AVCFrameIO *currInput = encvid->currInput; uint8 *curL, *curCb, *curCr; uint8 *comp, *pred_block; int block_x, block_y, offset; uint sad, sad4, sadI4, sadI16; int component, SubBlock_indx, temp; int pitch = video->currPic->pitch; /* calculate the cost of each intra prediction mode and compare to the inter mode */ /* full search for all intra prediction */ offset = (video->mb_y << 4) * pitch + (video->mb_x << 4); curL = currInput->YCbCr[0] + offset; pred_block = video->pred_block + 84; /* Assuming that InitNeighborAvailability has been called prior to this function */ video->intraAvailA = video->intraAvailB = video->intraAvailC = video->intraAvailD = 0; if (!video->currPicParams->constrained_intra_pred_flag) { video->intraAvailA = video->mbAvailA; video->intraAvailB = video->mbAvailB; video->intraAvailC = video->mbAvailC; video->intraAvailD = video->mbAvailD; } else { if (video->mbAvailA) { video->intraAvailA = video->mblock[video->mbAddrA].mb_intra; } if (video->mbAvailB) { video->intraAvailB = video->mblock[video->mbAddrB].mb_intra ; } if (video->mbAvailC) { video->intraAvailC = video->mblock[video->mbAddrC].mb_intra; } if (video->mbAvailD) { video->intraAvailD = video->mblock[video->mbAddrD].mb_intra; } } /* currently we're doing exhaustive search. Smart search will be used later */ /* I16 modes */ curL = currInput->YCbCr[0] + offset; video->pintra_pred_top = curL - pitch; video->pintra_pred_left = curL - 1; if (video->mb_y) { video->intra_pred_topleft = *(curL - pitch - 1); } /* Intra_16x16_Vertical */ sadI16 = 65536; /* check availability of top */ if (video->intraAvailB) { sad = SAD_I16_Vert(video, curL, sadI16); if (sad < sadI16) { sadI16 = sad; currMB->i16Mode = AVC_I16_Vertical; } } /* Intra_16x16_Horizontal */ /* check availability of left */ if (video->intraAvailA) { sad = SAD_I16_HorzDC(video, curL, AVC_I16_Horizontal, sadI16); if (sad < sadI16) { sadI16 = sad; currMB->i16Mode = AVC_I16_Horizontal; } } /* Intra_16x16_DC, default mode */ sad = SAD_I16_HorzDC(video, curL, AVC_I16_DC, sadI16); if (sad < sadI16) { sadI16 = sad; currMB->i16Mode = AVC_I16_DC; } /* Intra_16x16_Plane */ if (video->intraAvailA && video->intraAvailB && video->intraAvailD) { sad = SAD_I16_Plane(video, curL, sadI16); if (sad < sadI16) { sadI16 = sad; currMB->i16Mode = AVC_I16_Plane; } } sadI16 >>= 1; /* before comparison */ /* selection between intra4, intra16 or inter mode */ if (sadI16 < encvid->min_cost) { currMB->mb_intra = TRUE; currMB->mbMode = AVC_I16; encvid->min_cost = sadI16; } if (currMB->mb_intra) /* only do the chrominance search when intra is decided */ { /* Note that we might be able to guess the type of prediction from the luma prediction type */ /* now search for the best chroma intra prediction */ offset = (offset >> 2) + (video->mb_x << 2); curCb = currInput->YCbCr[1] + offset; curCr = currInput->YCbCr[2] + offset; pitch >>= 1; video->pintra_pred_top_cb = curCb - pitch; video->pintra_pred_left_cb = curCb - 1; video->pintra_pred_top_cr = curCr - pitch; video->pintra_pred_left_cr = curCr - 1; if (video->mb_y) { video->intra_pred_topleft_cb = *(curCb - pitch - 1); video->intra_pred_topleft_cr = *(curCr - pitch - 1); } /* Intra_Chroma_DC */ sad4 = SAD_Chroma_DC(video, curCb, curCr, 65536); currMB->intra_chroma_pred_mode = AVC_IC_DC; /* Intra_Chroma_Horizontal */ if (video->intraAvailA) { /* check availability of left */ sad = SAD_Chroma_Horz(video, curCb, curCr, sad4); if (sad < sad4) { sad4 = sad; currMB->intra_chroma_pred_mode = AVC_IC_Horizontal; } } /* Intra_Chroma_Vertical */ if (video->intraAvailB) { /* check availability of top */ sad = SAD_Chroma_Vert(video, curCb, curCr, sad4); if (sad < sad4) { sad4 = sad; currMB->intra_chroma_pred_mode = AVC_IC_Vertical; } } /* Intra_Chroma_Plane */ if (video->intraAvailA && video->intraAvailB && video->intraAvailD) { /* check availability of top and left */ Intra_Chroma_Plane(video, pitch); sad = SADChroma(pred_block + 452, curCb, curCr, pitch); if (sad < sad4) { sad4 = sad; currMB->intra_chroma_pred_mode = AVC_IC_Plane; } } /* also reset the motion vectors */ /* set MV and Ref_Idx codes of Intra blocks in P-slices */ oscl_memset(currMB->mvL0, 0, sizeof(int32)*16); oscl_memset(currMB->ref_idx_L0, -1, sizeof(int16)*4); } // output from this function, currMB->mbMode should be set to either // AVC_I4, AVC_I16, or else in AVCMBMode enum, mbType, mb_intra, intra_chroma_pred_mode */ return ; } #endif ================================================ FILE: RtspCamera/jni/avc_h264/enc/src/motion_comp.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "avcenc_lib.h" #include "avcenc_int.h" #include "oscl_mem.h" #define CLIP_RESULT(x) if((uint)x > 0xFF){ \ x = 0xFF & (~(x>>31));} /* (blkwidth << 2) + (dy << 1) + dx */ static void (*const eChromaMC_SIMD[8])(uint8 *, int , int , int , uint8 *, int, int , int) = { &eChromaFullMC_SIMD, &eChromaHorizontalMC_SIMD, &eChromaVerticalMC_SIMD, &eChromaDiagonalMC_SIMD, &eChromaFullMC_SIMD, &eChromaHorizontalMC2_SIMD, &eChromaVerticalMC2_SIMD, &eChromaDiagonalMC2_SIMD }; /* Perform motion prediction and compensation with residue if exist. */ void AVCMBMotionComp(AVCEncObject *encvid, AVCCommonObj *video) { (void)(encvid); AVCMacroblock *currMB = video->currMB; AVCPictureData *currPic = video->currPic; int mbPartIdx, subMbPartIdx; int ref_idx; int offset_MbPart_indx = 0; int16 *mv; uint32 x_pos, y_pos; uint8 *curL, *curCb, *curCr; uint8 *ref_l, *ref_Cb, *ref_Cr; uint8 *predBlock, *predCb, *predCr; int block_x, block_y, offset_x, offset_y, offsetP, offset; int x_position = (video->mb_x << 4); int y_position = (video->mb_y << 4); int MbHeight, MbWidth, mbPartIdx_X, mbPartIdx_Y, offset_indx; int picWidth = currPic->width; int picPitch = currPic->pitch; int picHeight = currPic->height; uint32 tmp_word; tmp_word = y_position * picPitch; curL = currPic->Sl + tmp_word + x_position; offset = (tmp_word >> 2) + (x_position >> 1); curCb = currPic->Scb + offset; curCr = currPic->Scr + offset; predBlock = curL; predCb = curCb; predCr = curCr; GetMotionVectorPredictor(video, 1); for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++) { MbHeight = currMB->SubMbPartHeight[mbPartIdx]; MbWidth = currMB->SubMbPartWidth[mbPartIdx]; mbPartIdx_X = ((mbPartIdx + offset_MbPart_indx) & 1); mbPartIdx_Y = (mbPartIdx + offset_MbPart_indx) >> 1; ref_idx = currMB->ref_idx_L0[(mbPartIdx_Y << 1) + mbPartIdx_X]; offset_indx = 0; ref_l = video->RefPicList0[ref_idx]->Sl; ref_Cb = video->RefPicList0[ref_idx]->Scb; ref_Cr = video->RefPicList0[ref_idx]->Scr; for (subMbPartIdx = 0; subMbPartIdx < currMB->NumSubMbPart[mbPartIdx]; subMbPartIdx++) { block_x = (mbPartIdx_X << 1) + ((subMbPartIdx + offset_indx) & 1); block_y = (mbPartIdx_Y << 1) + (((subMbPartIdx + offset_indx) >> 1) & 1); mv = (int16*)(currMB->mvL0 + block_x + (block_y << 2)); offset_x = x_position + (block_x << 2); offset_y = y_position + (block_y << 2); x_pos = (offset_x << 2) + *mv++; /*quarter pel */ y_pos = (offset_y << 2) + *mv; /*quarter pel */ //offset = offset_y * currPic->width; //offsetC = (offset >> 2) + (offset_x >> 1); offsetP = (block_y << 2) * picPitch + (block_x << 2); eLumaMotionComp(ref_l, picPitch, picHeight, x_pos, y_pos, /*comp_Sl + offset + offset_x,*/ predBlock + offsetP, picPitch, MbWidth, MbHeight); offsetP = (block_y * picWidth) + (block_x << 1); eChromaMotionComp(ref_Cb, picWidth >> 1, picHeight >> 1, x_pos, y_pos, /*comp_Scb + offsetC,*/ predCb + offsetP, picPitch >> 1, MbWidth >> 1, MbHeight >> 1); eChromaMotionComp(ref_Cr, picWidth >> 1, picHeight >> 1, x_pos, y_pos, /*comp_Scr + offsetC,*/ predCr + offsetP, picPitch >> 1, MbWidth >> 1, MbHeight >> 1); offset_indx = currMB->SubMbPartWidth[mbPartIdx] >> 3; } offset_MbPart_indx = currMB->MbPartWidth >> 4; } return ; } /* preform the actual motion comp here */ void eLumaMotionComp(uint8 *ref, int picpitch, int picheight, int x_pos, int y_pos, uint8 *pred, int pred_pitch, int blkwidth, int blkheight) { (void)(picheight); int dx, dy; int temp2[21][21]; /* for intermediate results */ uint8 *ref2; dx = x_pos & 3; dy = y_pos & 3; x_pos = x_pos >> 2; /* round it to full-pel resolution */ y_pos = y_pos >> 2; /* perform actual motion compensation */ if (dx == 0 && dy == 0) { /* fullpel position *//* G */ ref += y_pos * picpitch + x_pos; eFullPelMC(ref, picpitch, pred, pred_pitch, blkwidth, blkheight); } /* other positions */ else if (dy == 0) { /* no vertical interpolation *//* a,b,c*/ ref += y_pos * picpitch + x_pos; eHorzInterp1MC(ref, picpitch, pred, pred_pitch, blkwidth, blkheight, dx); } else if (dx == 0) { /*no horizontal interpolation *//* d,h,n */ ref += y_pos * picpitch + x_pos; eVertInterp1MC(ref, picpitch, pred, pred_pitch, blkwidth, blkheight, dy); } else if (dy == 2) { /* horizontal cross *//* i, j, k */ ref += y_pos * picpitch + x_pos - 2; /* move to the left 2 pixels */ eVertInterp2MC(ref, picpitch, &temp2[0][0], 21, blkwidth + 5, blkheight); eHorzInterp2MC(&temp2[0][2], 21, pred, pred_pitch, blkwidth, blkheight, dx); } else if (dx == 2) { /* vertical cross */ /* f,q */ ref += (y_pos - 2) * picpitch + x_pos; /* move to up 2 lines */ eHorzInterp3MC(ref, picpitch, &temp2[0][0], 21, blkwidth, blkheight + 5); eVertInterp3MC(&temp2[2][0], 21, pred, pred_pitch, blkwidth, blkheight, dy); } else { /* diagonal *//* e,g,p,r */ ref2 = ref + (y_pos + (dy / 2)) * picpitch + x_pos; ref += (y_pos * picpitch) + x_pos + (dx / 2); eDiagonalInterpMC(ref2, ref, picpitch, pred, pred_pitch, blkwidth, blkheight); } return ; } void eCreateAlign(uint8 *ref, int picpitch, int y_pos, uint8 *out, int blkwidth, int blkheight) { int i, j; int offset, out_offset; uint32 prev_pix, result, pix1, pix2, pix4; ref += y_pos * picpitch;// + x_pos; out_offset = 24 - blkwidth; //switch(x_pos&0x3){ switch (((uint32)ref)&0x3) { case 1: offset = picpitch - blkwidth - 3; for (j = 0; j < blkheight; j++) { pix1 = *ref++; pix2 = *((uint16*)ref); ref += 2; result = (pix2 << 8) | pix1; for (i = 3; i < blkwidth; i += 4) { pix4 = *((uint32*)ref); ref += 4; prev_pix = (pix4 << 24) & 0xFF000000; /* mask out byte belong to previous word */ result |= prev_pix; *((uint32*)out) = result; /* write 4 bytes */ out += 4; result = pix4 >> 8; /* for the next loop */ } ref += offset; out += out_offset; } break; case 2: offset = picpitch - blkwidth - 2; for (j = 0; j < blkheight; j++) { result = *((uint16*)ref); ref += 2; for (i = 2; i < blkwidth; i += 4) { pix4 = *((uint32*)ref); ref += 4; prev_pix = (pix4 << 16) & 0xFFFF0000; /* mask out byte belong to previous word */ result |= prev_pix; *((uint32*)out) = result; /* write 4 bytes */ out += 4; result = pix4 >> 16; /* for the next loop */ } ref += offset; out += out_offset; } break; case 3: offset = picpitch - blkwidth - 1; for (j = 0; j < blkheight; j++) { result = *ref++; for (i = 1; i < blkwidth; i += 4) { pix4 = *((uint32*)ref); ref += 4; prev_pix = (pix4 << 8) & 0xFFFFFF00; /* mask out byte belong to previous word */ result |= prev_pix; *((uint32*)out) = result; /* write 4 bytes */ out += 4; result = pix4 >> 24; /* for the next loop */ } ref += offset; out += out_offset; } break; } } void eHorzInterp1MC(uint8 *in, int inpitch, uint8 *out, int outpitch, int blkwidth, int blkheight, int dx) { uint8 *p_ref; uint32 *p_cur; uint32 tmp, pkres; int result, curr_offset, ref_offset; int j; int32 r0, r1, r2, r3, r4, r5; int32 r13, r6; p_cur = (uint32*)out; /* assume it's word aligned */ curr_offset = (outpitch - blkwidth) >> 2; p_ref = in; ref_offset = inpitch - blkwidth; if (dx&1) { dx = ((dx >> 1) ? -3 : -4); /* use in 3/4 pel */ p_ref -= 2; r13 = 0; for (j = blkheight; j > 0; j--) { tmp = (uint32)(p_ref + blkwidth); r0 = p_ref[0]; r1 = p_ref[2]; r0 |= (r1 << 16); /* 0,c,0,a */ r1 = p_ref[1]; r2 = p_ref[3]; r1 |= (r2 << 16); /* 0,d,0,b */ while ((uint32)p_ref < tmp) { r2 = *(p_ref += 4); /* move pointer to e */ r3 = p_ref[2]; r2 |= (r3 << 16); /* 0,g,0,e */ r3 = p_ref[1]; r4 = p_ref[3]; r3 |= (r4 << 16); /* 0,h,0,f */ r4 = r0 + r3; /* c+h, a+f */ r5 = r0 + r1; /* c+d, a+b */ r6 = r2 + r3; /* g+h, e+f */ r5 >>= 16; r5 |= (r6 << 16); /* e+f, c+d */ r4 += r5 * 20; /* c+20*e+20*f+h, a+20*c+20*d+f */ r4 += 0x100010; /* +16, +16 */ r5 = r1 + r2; /* d+g, b+e */ r4 -= r5 * 5; /* c-5*d+20*e+20*f-5*g+h, a-5*b+20*c+20*d-5*e+f */ r4 >>= 5; r13 |= r4; /* check clipping */ r5 = p_ref[dx+2]; r6 = p_ref[dx+4]; r5 |= (r6 << 16); r4 += r5; r4 += 0x10001; r4 = (r4 >> 1) & 0xFF00FF; r5 = p_ref[4]; /* i */ r6 = (r5 << 16); r5 = r6 | (r2 >> 16);/* 0,i,0,g */ r5 += r1; /* d+i, b+g */ /* r5 not free */ r1 >>= 16; r1 |= (r3 << 16); /* 0,f,0,d */ /* r1 has changed */ r1 += r2; /* f+g, d+e */ r5 += 20 * r1; /* d+20f+20g+i, b+20d+20e+g */ r0 >>= 16; r0 |= (r2 << 16); /* 0,e,0,c */ /* r0 has changed */ r0 += r3; /* e+h, c+f */ r5 += 0x100010; /* 16,16 */ r5 -= r0 * 5; /* d-5e+20f+20g-5h+i, b-5c+20d+20e-5f+g */ r5 >>= 5; r13 |= r5; /* check clipping */ r0 = p_ref[dx+3]; r1 = p_ref[dx+5]; r0 |= (r1 << 16); r5 += r0; r5 += 0x10001; r5 = (r5 >> 1) & 0xFF00FF; r4 |= (r5 << 8); /* pack them together */ *p_cur++ = r4; r1 = r3; r0 = r2; } p_cur += curr_offset; /* move to the next line */ p_ref += ref_offset; /* ref_offset = inpitch-blkwidth; */ if (r13&0xFF000700) /* need clipping */ { /* move back to the beginning of the line */ p_ref -= (ref_offset + blkwidth); /* input */ p_cur -= (outpitch >> 2); tmp = (uint32)(p_ref + blkwidth); for (; (uint32)p_ref < tmp;) { r0 = *p_ref++; r1 = *p_ref++; r2 = *p_ref++; r3 = *p_ref++; r4 = *p_ref++; /* first pixel */ r5 = *p_ref++; result = (r0 + r5); r0 = (r1 + r4); result -= (r0 * 5);//result -= r0; result -= (r0<<2); r0 = (r2 + r3); result += (r0 * 20);//result += (r0<<4); result += (r0<<2); result = (result + 16) >> 5; CLIP_RESULT(result) /* 3/4 pel, no need to clip */ result = (result + p_ref[dx] + 1); pkres = (result >> 1) ; /* second pixel */ r0 = *p_ref++; result = (r1 + r0); r1 = (r2 + r5); result -= (r1 * 5);//result -= r1; result -= (r1<<2); r1 = (r3 + r4); result += (r1 * 20);//result += (r1<<4); result += (r1<<2); result = (result + 16) >> 5; CLIP_RESULT(result) /* 3/4 pel, no need to clip */ result = (result + p_ref[dx] + 1); result = (result >> 1); pkres |= (result << 8); /* third pixel */ r1 = *p_ref++; result = (r2 + r1); r2 = (r3 + r0); result -= (r2 * 5);//result -= r2; result -= (r2<<2); r2 = (r4 + r5); result += (r2 * 20);//result += (r2<<4); result += (r2<<2); result = (result + 16) >> 5; CLIP_RESULT(result) /* 3/4 pel, no need to clip */ result = (result + p_ref[dx] + 1); result = (result >> 1); pkres |= (result << 16); /* fourth pixel */ r2 = *p_ref++; result = (r3 + r2); r3 = (r4 + r1); result -= (r3 * 5);//result -= r3; result -= (r3<<2); r3 = (r5 + r0); result += (r3 * 20);//result += (r3<<4); result += (r3<<2); result = (result + 16) >> 5; CLIP_RESULT(result) /* 3/4 pel, no need to clip */ result = (result + p_ref[dx] + 1); result = (result >> 1); pkres |= (result << 24); *p_cur++ = pkres; /* write 4 pixels */ p_ref -= 5; /* offset back to the middle of filter */ } p_cur += curr_offset; /* move to the next line */ p_ref += ref_offset; /* move to the next line */ } } } else { p_ref -= 2; r13 = 0; for (j = blkheight; j > 0; j--) { tmp = (uint32)(p_ref + blkwidth); r0 = p_ref[0]; r1 = p_ref[2]; r0 |= (r1 << 16); /* 0,c,0,a */ r1 = p_ref[1]; r2 = p_ref[3]; r1 |= (r2 << 16); /* 0,d,0,b */ while ((uint32)p_ref < tmp) { r2 = *(p_ref += 4); /* move pointer to e */ r3 = p_ref[2]; r2 |= (r3 << 16); /* 0,g,0,e */ r3 = p_ref[1]; r4 = p_ref[3]; r3 |= (r4 << 16); /* 0,h,0,f */ r4 = r0 + r3; /* c+h, a+f */ r5 = r0 + r1; /* c+d, a+b */ r6 = r2 + r3; /* g+h, e+f */ r5 >>= 16; r5 |= (r6 << 16); /* e+f, c+d */ r4 += r5 * 20; /* c+20*e+20*f+h, a+20*c+20*d+f */ r4 += 0x100010; /* +16, +16 */ r5 = r1 + r2; /* d+g, b+e */ r4 -= r5 * 5; /* c-5*d+20*e+20*f-5*g+h, a-5*b+20*c+20*d-5*e+f */ r4 >>= 5; r13 |= r4; /* check clipping */ r4 &= 0xFF00FF; /* mask */ r5 = p_ref[4]; /* i */ r6 = (r5 << 16); r5 = r6 | (r2 >> 16);/* 0,i,0,g */ r5 += r1; /* d+i, b+g */ /* r5 not free */ r1 >>= 16; r1 |= (r3 << 16); /* 0,f,0,d */ /* r1 has changed */ r1 += r2; /* f+g, d+e */ r5 += 20 * r1; /* d+20f+20g+i, b+20d+20e+g */ r0 >>= 16; r0 |= (r2 << 16); /* 0,e,0,c */ /* r0 has changed */ r0 += r3; /* e+h, c+f */ r5 += 0x100010; /* 16,16 */ r5 -= r0 * 5; /* d-5e+20f+20g-5h+i, b-5c+20d+20e-5f+g */ r5 >>= 5; r13 |= r5; /* check clipping */ r5 &= 0xFF00FF; /* mask */ r4 |= (r5 << 8); /* pack them together */ *p_cur++ = r4; r1 = r3; r0 = r2; } p_cur += curr_offset; /* move to the next line */ p_ref += ref_offset; /* ref_offset = inpitch-blkwidth; */ if (r13&0xFF000700) /* need clipping */ { /* move back to the beginning of the line */ p_ref -= (ref_offset + blkwidth); /* input */ p_cur -= (outpitch >> 2); tmp = (uint32)(p_ref + blkwidth); for (; (uint32)p_ref < tmp;) { r0 = *p_ref++; r1 = *p_ref++; r2 = *p_ref++; r3 = *p_ref++; r4 = *p_ref++; /* first pixel */ r5 = *p_ref++; result = (r0 + r5); r0 = (r1 + r4); result -= (r0 * 5);//result -= r0; result -= (r0<<2); r0 = (r2 + r3); result += (r0 * 20);//result += (r0<<4); result += (r0<<2); result = (result + 16) >> 5; CLIP_RESULT(result) pkres = result; /* second pixel */ r0 = *p_ref++; result = (r1 + r0); r1 = (r2 + r5); result -= (r1 * 5);//result -= r1; result -= (r1<<2); r1 = (r3 + r4); result += (r1 * 20);//result += (r1<<4); result += (r1<<2); result = (result + 16) >> 5; CLIP_RESULT(result) pkres |= (result << 8); /* third pixel */ r1 = *p_ref++; result = (r2 + r1); r2 = (r3 + r0); result -= (r2 * 5);//result -= r2; result -= (r2<<2); r2 = (r4 + r5); result += (r2 * 20);//result += (r2<<4); result += (r2<<2); result = (result + 16) >> 5; CLIP_RESULT(result) pkres |= (result << 16); /* fourth pixel */ r2 = *p_ref++; result = (r3 + r2); r3 = (r4 + r1); result -= (r3 * 5);//result -= r3; result -= (r3<<2); r3 = (r5 + r0); result += (r3 * 20);//result += (r3<<4); result += (r3<<2); result = (result + 16) >> 5; CLIP_RESULT(result) pkres |= (result << 24); *p_cur++ = pkres; /* write 4 pixels */ p_ref -= 5; } p_cur += curr_offset; /* move to the next line */ p_ref += ref_offset; } } } return ; } void eHorzInterp2MC(int *in, int inpitch, uint8 *out, int outpitch, int blkwidth, int blkheight, int dx) { int *p_ref; uint32 *p_cur; uint32 tmp, pkres; int result, result2, curr_offset, ref_offset; int j, r0, r1, r2, r3, r4, r5; p_cur = (uint32*)out; /* assume it's word aligned */ curr_offset = (outpitch - blkwidth) >> 2; p_ref = in; ref_offset = inpitch - blkwidth; if (dx&1) { dx = ((dx >> 1) ? -3 : -4); /* use in 3/4 pel */ for (j = blkheight; j > 0 ; j--) { tmp = (uint32)(p_ref + blkwidth); for (; (uint32)p_ref < tmp;) { r0 = p_ref[-2]; r1 = p_ref[-1]; r2 = *p_ref++; r3 = *p_ref++; r4 = *p_ref++; /* first pixel */ r5 = *p_ref++; result = (r0 + r5); r0 = (r1 + r4); result -= (r0 * 5);//result -= r0; result -= (r0<<2); r0 = (r2 + r3); result += (r0 * 20);//result += (r0<<4); result += (r0<<2); result = (result + 512) >> 10; CLIP_RESULT(result) result2 = ((p_ref[dx] + 16) >> 5); CLIP_RESULT(result2) /* 3/4 pel, no need to clip */ result = (result + result2 + 1); pkres = (result >> 1); /* second pixel */ r0 = *p_ref++; result = (r1 + r0); r1 = (r2 + r5); result -= (r1 * 5);//result -= r1; result -= (r1<<2); r1 = (r3 + r4); result += (r1 * 20);//result += (r1<<4); result += (r1<<2); result = (result + 512) >> 10; CLIP_RESULT(result) result2 = ((p_ref[dx] + 16) >> 5); CLIP_RESULT(result2) /* 3/4 pel, no need to clip */ result = (result + result2 + 1); result = (result >> 1); pkres |= (result << 8); /* third pixel */ r1 = *p_ref++; result = (r2 + r1); r2 = (r3 + r0); result -= (r2 * 5);//result -= r2; result -= (r2<<2); r2 = (r4 + r5); result += (r2 * 20);//result += (r2<<4); result += (r2<<2); result = (result + 512) >> 10; CLIP_RESULT(result) result2 = ((p_ref[dx] + 16) >> 5); CLIP_RESULT(result2) /* 3/4 pel, no need to clip */ result = (result + result2 + 1); result = (result >> 1); pkres |= (result << 16); /* fourth pixel */ r2 = *p_ref++; result = (r3 + r2); r3 = (r4 + r1); result -= (r3 * 5);//result -= r3; result -= (r3<<2); r3 = (r5 + r0); result += (r3 * 20);//result += (r3<<4); result += (r3<<2); result = (result + 512) >> 10; CLIP_RESULT(result) result2 = ((p_ref[dx] + 16) >> 5); CLIP_RESULT(result2) /* 3/4 pel, no need to clip */ result = (result + result2 + 1); result = (result >> 1); pkres |= (result << 24); *p_cur++ = pkres; /* write 4 pixels */ p_ref -= 3; /* offset back to the middle of filter */ } p_cur += curr_offset; /* move to the next line */ p_ref += ref_offset; /* move to the next line */ } } else { for (j = blkheight; j > 0 ; j--) { tmp = (uint32)(p_ref + blkwidth); for (; (uint32)p_ref < tmp;) { r0 = p_ref[-2]; r1 = p_ref[-1]; r2 = *p_ref++; r3 = *p_ref++; r4 = *p_ref++; /* first pixel */ r5 = *p_ref++; result = (r0 + r5); r0 = (r1 + r4); result -= (r0 * 5);//result -= r0; result -= (r0<<2); r0 = (r2 + r3); result += (r0 * 20);//result += (r0<<4); result += (r0<<2); result = (result + 512) >> 10; CLIP_RESULT(result) pkres = result; /* second pixel */ r0 = *p_ref++; result = (r1 + r0); r1 = (r2 + r5); result -= (r1 * 5);//result -= r1; result -= (r1<<2); r1 = (r3 + r4); result += (r1 * 20);//result += (r1<<4); result += (r1<<2); result = (result + 512) >> 10; CLIP_RESULT(result) pkres |= (result << 8); /* third pixel */ r1 = *p_ref++; result = (r2 + r1); r2 = (r3 + r0); result -= (r2 * 5);//result -= r2; result -= (r2<<2); r2 = (r4 + r5); result += (r2 * 20);//result += (r2<<4); result += (r2<<2); result = (result + 512) >> 10; CLIP_RESULT(result) pkres |= (result << 16); /* fourth pixel */ r2 = *p_ref++; result = (r3 + r2); r3 = (r4 + r1); result -= (r3 * 5);//result -= r3; result -= (r3<<2); r3 = (r5 + r0); result += (r3 * 20);//result += (r3<<4); result += (r3<<2); result = (result + 512) >> 10; CLIP_RESULT(result) pkres |= (result << 24); *p_cur++ = pkres; /* write 4 pixels */ p_ref -= 3; /* offset back to the middle of filter */ } p_cur += curr_offset; /* move to the next line */ p_ref += ref_offset; /* move to the next line */ } } return ; } void eHorzInterp3MC(uint8 *in, int inpitch, int *out, int outpitch, int blkwidth, int blkheight) { uint8 *p_ref; int *p_cur; uint32 tmp; int result, curr_offset, ref_offset; int j, r0, r1, r2, r3, r4, r5; p_cur = out; curr_offset = (outpitch - blkwidth); p_ref = in; ref_offset = inpitch - blkwidth; for (j = blkheight; j > 0 ; j--) { tmp = (uint32)(p_ref + blkwidth); for (; (uint32)p_ref < tmp;) { r0 = p_ref[-2]; r1 = p_ref[-1]; r2 = *p_ref++; r3 = *p_ref++; r4 = *p_ref++; /* first pixel */ r5 = *p_ref++; result = (r0 + r5); r0 = (r1 + r4); result -= (r0 * 5);//result -= r0; result -= (r0<<2); r0 = (r2 + r3); result += (r0 * 20);//result += (r0<<4); result += (r0<<2); *p_cur++ = result; /* second pixel */ r0 = *p_ref++; result = (r1 + r0); r1 = (r2 + r5); result -= (r1 * 5);//result -= r1; result -= (r1<<2); r1 = (r3 + r4); result += (r1 * 20);//result += (r1<<4); result += (r1<<2); *p_cur++ = result; /* third pixel */ r1 = *p_ref++; result = (r2 + r1); r2 = (r3 + r0); result -= (r2 * 5);//result -= r2; result -= (r2<<2); r2 = (r4 + r5); result += (r2 * 20);//result += (r2<<4); result += (r2<<2); *p_cur++ = result; /* fourth pixel */ r2 = *p_ref++; result = (r3 + r2); r3 = (r4 + r1); result -= (r3 * 5);//result -= r3; result -= (r3<<2); r3 = (r5 + r0); result += (r3 * 20);//result += (r3<<4); result += (r3<<2); *p_cur++ = result; p_ref -= 3; /* move back to the middle of the filter */ } p_cur += curr_offset; /* move to the next line */ p_ref += ref_offset; } return ; } void eVertInterp1MC(uint8 *in, int inpitch, uint8 *out, int outpitch, int blkwidth, int blkheight, int dy) { uint8 *p_cur, *p_ref; uint32 tmp; int result, curr_offset, ref_offset; int j, i; int32 r0, r1, r2, r3, r4, r5, r6, r7, r8, r13; uint8 tmp_in[24][24]; /* not word-aligned */ if (((uint32)in)&0x3) { eCreateAlign(in, inpitch, -2, &tmp_in[0][0], blkwidth, blkheight + 5); in = &tmp_in[2][0]; inpitch = 24; } p_cur = out; curr_offset = 1 - outpitch * (blkheight - 1); /* offset vertically back up and one pixel to right */ ref_offset = blkheight * inpitch; /* for limit */ curr_offset += 3; if (dy&1) { dy = (dy >> 1) ? 0 : -inpitch; for (j = 0; j < blkwidth; j += 4, in += 4) { r13 = 0; p_ref = in; p_cur -= outpitch; /* compensate for the first offset */ tmp = (uint32)(p_ref + ref_offset); /* limit */ while ((uint32)p_ref < tmp) /* the loop un-rolled */ { r0 = *((uint32*)(p_ref - (inpitch << 1))); /* load 4 bytes */ p_ref += inpitch; r6 = (r0 >> 8) & 0xFF00FF; /* second and fourth byte */ r0 &= 0xFF00FF; r1 = *((uint32*)(p_ref + (inpitch << 1))); /* r1, r7, ref[3] */ r7 = (r1 >> 8) & 0xFF00FF; r1 &= 0xFF00FF; r0 += r1; r6 += r7; r2 = *((uint32*)p_ref); /* r2, r8, ref[1] */ r8 = (r2 >> 8) & 0xFF00FF; r2 &= 0xFF00FF; r1 = *((uint32*)(p_ref - inpitch)); /* r1, r7, ref[0] */ r7 = (r1 >> 8) & 0xFF00FF; r1 &= 0xFF00FF; r1 += r2; r7 += r8; r0 += 20 * r1; r6 += 20 * r7; r0 += 0x100010; r6 += 0x100010; r2 = *((uint32*)(p_ref - (inpitch << 1))); /* r2, r8, ref[-1] */ r8 = (r2 >> 8) & 0xFF00FF; r2 &= 0xFF00FF; r1 = *((uint32*)(p_ref + inpitch)); /* r1, r7, ref[2] */ r7 = (r1 >> 8) & 0xFF00FF; r1 &= 0xFF00FF; r1 += r2; r7 += r8; r0 -= 5 * r1; r6 -= 5 * r7; r0 >>= 5; r6 >>= 5; /* clip */ r13 |= r6; r13 |= r0; //CLIPPACK(r6,result) r1 = *((uint32*)(p_ref + dy)); r2 = (r1 >> 8) & 0xFF00FF; r1 &= 0xFF00FF; r0 += r1; r6 += r2; r0 += 0x10001; r6 += 0x10001; r0 = (r0 >> 1) & 0xFF00FF; r6 = (r6 >> 1) & 0xFF00FF; r0 |= (r6 << 8); /* pack it back */ *((uint32*)(p_cur += outpitch)) = r0; } p_cur += curr_offset; /* offset to the next pixel */ if (r13 & 0xFF000700) /* this column need clipping */ { p_cur -= 4; for (i = 0; i < 4; i++) { p_ref = in + i; p_cur -= outpitch; /* compensate for the first offset */ tmp = (uint32)(p_ref + ref_offset); /* limit */ while ((uint32)p_ref < tmp) { /* loop un-rolled */ r0 = *(p_ref - (inpitch << 1)); r1 = *(p_ref - inpitch); r2 = *p_ref; r3 = *(p_ref += inpitch); /* modify pointer before loading */ r4 = *(p_ref += inpitch); /* first pixel */ r5 = *(p_ref += inpitch); result = (r0 + r5); r0 = (r1 + r4); result -= (r0 * 5);//result -= r0; result -= (r0<<2); r0 = (r2 + r3); result += (r0 * 20);//result += (r0<<4); result += (r0<<2); result = (result + 16) >> 5; CLIP_RESULT(result) /* 3/4 pel, no need to clip */ result = (result + p_ref[dy-(inpitch<<1)] + 1); result = (result >> 1); *(p_cur += outpitch) = result; /* second pixel */ r0 = *(p_ref += inpitch); result = (r1 + r0); r1 = (r2 + r5); result -= (r1 * 5);//result -= r1; result -= (r1<<2); r1 = (r3 + r4); result += (r1 * 20);//result += (r1<<4); result += (r1<<2); result = (result + 16) >> 5; CLIP_RESULT(result) /* 3/4 pel, no need to clip */ result = (result + p_ref[dy-(inpitch<<1)] + 1); result = (result >> 1); *(p_cur += outpitch) = result; /* third pixel */ r1 = *(p_ref += inpitch); result = (r2 + r1); r2 = (r3 + r0); result -= (r2 * 5);//result -= r2; result -= (r2<<2); r2 = (r4 + r5); result += (r2 * 20);//result += (r2<<4); result += (r2<<2); result = (result + 16) >> 5; CLIP_RESULT(result) /* 3/4 pel, no need to clip */ result = (result + p_ref[dy-(inpitch<<1)] + 1); result = (result >> 1); *(p_cur += outpitch) = result; /* fourth pixel */ r2 = *(p_ref += inpitch); result = (r3 + r2); r3 = (r4 + r1); result -= (r3 * 5);//result -= r3; result -= (r3<<2); r3 = (r5 + r0); result += (r3 * 20);//result += (r3<<4); result += (r3<<2); result = (result + 16) >> 5; CLIP_RESULT(result) /* 3/4 pel, no need to clip */ result = (result + p_ref[dy-(inpitch<<1)] + 1); result = (result >> 1); *(p_cur += outpitch) = result; p_ref -= (inpitch << 1); /* move back to center of the filter of the next one */ } p_cur += (curr_offset - 3); } } } } else { for (j = 0; j < blkwidth; j += 4, in += 4) { r13 = 0; p_ref = in; p_cur -= outpitch; /* compensate for the first offset */ tmp = (uint32)(p_ref + ref_offset); /* limit */ while ((uint32)p_ref < tmp) /* the loop un-rolled */ { r0 = *((uint32*)(p_ref - (inpitch << 1))); /* load 4 bytes */ p_ref += inpitch; r6 = (r0 >> 8) & 0xFF00FF; /* second and fourth byte */ r0 &= 0xFF00FF; r1 = *((uint32*)(p_ref + (inpitch << 1))); /* r1, r7, ref[3] */ r7 = (r1 >> 8) & 0xFF00FF; r1 &= 0xFF00FF; r0 += r1; r6 += r7; r2 = *((uint32*)p_ref); /* r2, r8, ref[1] */ r8 = (r2 >> 8) & 0xFF00FF; r2 &= 0xFF00FF; r1 = *((uint32*)(p_ref - inpitch)); /* r1, r7, ref[0] */ r7 = (r1 >> 8) & 0xFF00FF; r1 &= 0xFF00FF; r1 += r2; r7 += r8; r0 += 20 * r1; r6 += 20 * r7; r0 += 0x100010; r6 += 0x100010; r2 = *((uint32*)(p_ref - (inpitch << 1))); /* r2, r8, ref[-1] */ r8 = (r2 >> 8) & 0xFF00FF; r2 &= 0xFF00FF; r1 = *((uint32*)(p_ref + inpitch)); /* r1, r7, ref[2] */ r7 = (r1 >> 8) & 0xFF00FF; r1 &= 0xFF00FF; r1 += r2; r7 += r8; r0 -= 5 * r1; r6 -= 5 * r7; r0 >>= 5; r6 >>= 5; /* clip */ r13 |= r6; r13 |= r0; //CLIPPACK(r6,result) r0 &= 0xFF00FF; r6 &= 0xFF00FF; r0 |= (r6 << 8); /* pack it back */ *((uint32*)(p_cur += outpitch)) = r0; } p_cur += curr_offset; /* offset to the next pixel */ if (r13 & 0xFF000700) /* this column need clipping */ { p_cur -= 4; for (i = 0; i < 4; i++) { p_ref = in + i; p_cur -= outpitch; /* compensate for the first offset */ tmp = (uint32)(p_ref + ref_offset); /* limit */ while ((uint32)p_ref < tmp) { /* loop un-rolled */ r0 = *(p_ref - (inpitch << 1)); r1 = *(p_ref - inpitch); r2 = *p_ref; r3 = *(p_ref += inpitch); /* modify pointer before loading */ r4 = *(p_ref += inpitch); /* first pixel */ r5 = *(p_ref += inpitch); result = (r0 + r5); r0 = (r1 + r4); result -= (r0 * 5);//result -= r0; result -= (r0<<2); r0 = (r2 + r3); result += (r0 * 20);//result += (r0<<4); result += (r0<<2); result = (result + 16) >> 5; CLIP_RESULT(result) *(p_cur += outpitch) = result; /* second pixel */ r0 = *(p_ref += inpitch); result = (r1 + r0); r1 = (r2 + r5); result -= (r1 * 5);//result -= r1; result -= (r1<<2); r1 = (r3 + r4); result += (r1 * 20);//result += (r1<<4); result += (r1<<2); result = (result + 16) >> 5; CLIP_RESULT(result) *(p_cur += outpitch) = result; /* third pixel */ r1 = *(p_ref += inpitch); result = (r2 + r1); r2 = (r3 + r0); result -= (r2 * 5);//result -= r2; result -= (r2<<2); r2 = (r4 + r5); result += (r2 * 20);//result += (r2<<4); result += (r2<<2); result = (result + 16) >> 5; CLIP_RESULT(result) *(p_cur += outpitch) = result; /* fourth pixel */ r2 = *(p_ref += inpitch); result = (r3 + r2); r3 = (r4 + r1); result -= (r3 * 5);//result -= r3; result -= (r3<<2); r3 = (r5 + r0); result += (r3 * 20);//result += (r3<<4); result += (r3<<2); result = (result + 16) >> 5; CLIP_RESULT(result) *(p_cur += outpitch) = result; p_ref -= (inpitch << 1); /* move back to center of the filter of the next one */ } p_cur += (curr_offset - 3); } } } } return ; } void eVertInterp2MC(uint8 *in, int inpitch, int *out, int outpitch, int blkwidth, int blkheight) { int *p_cur; uint8 *p_ref; uint32 tmp; int result, curr_offset, ref_offset; int j, r0, r1, r2, r3, r4, r5; p_cur = out; curr_offset = 1 - outpitch * (blkheight - 1); /* offset vertically back up and one pixel to right */ ref_offset = blkheight * inpitch; /* for limit */ for (j = 0; j < blkwidth; j++) { p_cur -= outpitch; /* compensate for the first offset */ p_ref = in++; tmp = (uint32)(p_ref + ref_offset); /* limit */ while ((uint32)p_ref < tmp) { /* loop un-rolled */ r0 = *(p_ref - (inpitch << 1)); r1 = *(p_ref - inpitch); r2 = *p_ref; r3 = *(p_ref += inpitch); /* modify pointer before loading */ r4 = *(p_ref += inpitch); /* first pixel */ r5 = *(p_ref += inpitch); result = (r0 + r5); r0 = (r1 + r4); result -= (r0 * 5);//result -= r0; result -= (r0<<2); r0 = (r2 + r3); result += (r0 * 20);//result += (r0<<4); result += (r0<<2); *(p_cur += outpitch) = result; /* second pixel */ r0 = *(p_ref += inpitch); result = (r1 + r0); r1 = (r2 + r5); result -= (r1 * 5);//result -= r1; result -= (r1<<2); r1 = (r3 + r4); result += (r1 * 20);//result += (r1<<4); result += (r1<<2); *(p_cur += outpitch) = result; /* third pixel */ r1 = *(p_ref += inpitch); result = (r2 + r1); r2 = (r3 + r0); result -= (r2 * 5);//result -= r2; result -= (r2<<2); r2 = (r4 + r5); result += (r2 * 20);//result += (r2<<4); result += (r2<<2); *(p_cur += outpitch) = result; /* fourth pixel */ r2 = *(p_ref += inpitch); result = (r3 + r2); r3 = (r4 + r1); result -= (r3 * 5);//result -= r3; result -= (r3<<2); r3 = (r5 + r0); result += (r3 * 20);//result += (r3<<4); result += (r3<<2); *(p_cur += outpitch) = result; p_ref -= (inpitch << 1); /* move back to center of the filter of the next one */ } p_cur += curr_offset; } return ; } void eVertInterp3MC(int *in, int inpitch, uint8 *out, int outpitch, int blkwidth, int blkheight, int dy) { uint8 *p_cur; int *p_ref; uint32 tmp; int result, result2, curr_offset, ref_offset; int j, r0, r1, r2, r3, r4, r5; p_cur = out; curr_offset = 1 - outpitch * (blkheight - 1); /* offset vertically back up and one pixel to right */ ref_offset = blkheight * inpitch; /* for limit */ if (dy&1) { dy = (dy >> 1) ? -(inpitch << 1) : -(inpitch << 1) - inpitch; for (j = 0; j < blkwidth; j++) { p_cur -= outpitch; /* compensate for the first offset */ p_ref = in++; tmp = (uint32)(p_ref + ref_offset); /* limit */ while ((uint32)p_ref < tmp) { /* loop un-rolled */ r0 = *(p_ref - (inpitch << 1)); r1 = *(p_ref - inpitch); r2 = *p_ref; r3 = *(p_ref += inpitch); /* modify pointer before loading */ r4 = *(p_ref += inpitch); /* first pixel */ r5 = *(p_ref += inpitch); result = (r0 + r5); r0 = (r1 + r4); result -= (r0 * 5);//result -= r0; result -= (r0<<2); r0 = (r2 + r3); result += (r0 * 20);//result += (r0<<4); result += (r0<<2); result = (result + 512) >> 10; CLIP_RESULT(result) result2 = ((p_ref[dy] + 16) >> 5); CLIP_RESULT(result2) /* 3/4 pel, no need to clip */ result = (result + result2 + 1); result = (result >> 1); *(p_cur += outpitch) = result; /* second pixel */ r0 = *(p_ref += inpitch); result = (r1 + r0); r1 = (r2 + r5); result -= (r1 * 5);//result -= r1; result -= (r1<<2); r1 = (r3 + r4); result += (r1 * 20);//result += (r1<<4); result += (r1<<2); result = (result + 512) >> 10; CLIP_RESULT(result) result2 = ((p_ref[dy] + 16) >> 5); CLIP_RESULT(result2) /* 3/4 pel, no need to clip */ result = (result + result2 + 1); result = (result >> 1); *(p_cur += outpitch) = result; /* third pixel */ r1 = *(p_ref += inpitch); result = (r2 + r1); r2 = (r3 + r0); result -= (r2 * 5);//result -= r2; result -= (r2<<2); r2 = (r4 + r5); result += (r2 * 20);//result += (r2<<4); result += (r2<<2); result = (result + 512) >> 10; CLIP_RESULT(result) result2 = ((p_ref[dy] + 16) >> 5); CLIP_RESULT(result2) /* 3/4 pel, no need to clip */ result = (result + result2 + 1); result = (result >> 1); *(p_cur += outpitch) = result; /* fourth pixel */ r2 = *(p_ref += inpitch); result = (r3 + r2); r3 = (r4 + r1); result -= (r3 * 5);//result -= r3; result -= (r3<<2); r3 = (r5 + r0); result += (r3 * 20);//result += (r3<<4); result += (r3<<2); result = (result + 512) >> 10; CLIP_RESULT(result) result2 = ((p_ref[dy] + 16) >> 5); CLIP_RESULT(result2) /* 3/4 pel, no need to clip */ result = (result + result2 + 1); result = (result >> 1); *(p_cur += outpitch) = result; p_ref -= (inpitch << 1); /* move back to center of the filter of the next one */ } p_cur += curr_offset; } } else { for (j = 0; j < blkwidth; j++) { p_cur -= outpitch; /* compensate for the first offset */ p_ref = in++; tmp = (uint32)(p_ref + ref_offset); /* limit */ while ((uint32)p_ref < tmp) { /* loop un-rolled */ r0 = *(p_ref - (inpitch << 1)); r1 = *(p_ref - inpitch); r2 = *p_ref; r3 = *(p_ref += inpitch); /* modify pointer before loading */ r4 = *(p_ref += inpitch); /* first pixel */ r5 = *(p_ref += inpitch); result = (r0 + r5); r0 = (r1 + r4); result -= (r0 * 5);//result -= r0; result -= (r0<<2); r0 = (r2 + r3); result += (r0 * 20);//result += (r0<<4); result += (r0<<2); result = (result + 512) >> 10; CLIP_RESULT(result) *(p_cur += outpitch) = result; /* second pixel */ r0 = *(p_ref += inpitch); result = (r1 + r0); r1 = (r2 + r5); result -= (r1 * 5);//result -= r1; result -= (r1<<2); r1 = (r3 + r4); result += (r1 * 20);//result += (r1<<4); result += (r1<<2); result = (result + 512) >> 10; CLIP_RESULT(result) *(p_cur += outpitch) = result; /* third pixel */ r1 = *(p_ref += inpitch); result = (r2 + r1); r2 = (r3 + r0); result -= (r2 * 5);//result -= r2; result -= (r2<<2); r2 = (r4 + r5); result += (r2 * 20);//result += (r2<<4); result += (r2<<2); result = (result + 512) >> 10; CLIP_RESULT(result) *(p_cur += outpitch) = result; /* fourth pixel */ r2 = *(p_ref += inpitch); result = (r3 + r2); r3 = (r4 + r1); result -= (r3 * 5);//result -= r3; result -= (r3<<2); r3 = (r5 + r0); result += (r3 * 20);//result += (r3<<4); result += (r3<<2); result = (result + 512) >> 10; CLIP_RESULT(result) *(p_cur += outpitch) = result; p_ref -= (inpitch << 1); /* move back to center of the filter of the next one */ } p_cur += curr_offset; } } return ; } void eDiagonalInterpMC(uint8 *in1, uint8 *in2, int inpitch, uint8 *out, int outpitch, int blkwidth, int blkheight) { int j, i; int result; uint8 *p_cur, *p_ref, *p_tmp8; int curr_offset, ref_offset; uint8 tmp_res[24][24], tmp_in[24][24]; uint32 *p_tmp; uint32 tmp, pkres, tmp_result; int32 r0, r1, r2, r3, r4, r5; int32 r6, r7, r8, r9, r10, r13; void *tmp_void; ref_offset = inpitch - blkwidth; p_ref = in1 - 2; /* perform horizontal interpolation */ /* not word-aligned */ /* It is faster to read 1 byte at time to avoid calling CreateAlign */ /* if(((uint32)p_ref)&0x3) { CreateAlign(p_ref,inpitch,0,&tmp_in[0][0],blkwidth+8,blkheight); p_ref = &tmp_in[0][0]; ref_offset = 24-blkwidth; }*/ tmp_void = (void*) & (tmp_res[0][0]); p_tmp = (uint32*) tmp_void; for (j = blkheight; j > 0; j--) { r13 = 0; tmp = (uint32)(p_ref + blkwidth); //r0 = *((uint32*)p_ref); /* d,c,b,a */ //r1 = (r0>>8)&0xFF00FF; /* 0,d,0,b */ //r0 &= 0xFF00FF; /* 0,c,0,a */ /* It is faster to read 1 byte at a time */ r0 = p_ref[0]; r1 = p_ref[2]; r0 |= (r1 << 16); /* 0,c,0,a */ r1 = p_ref[1]; r2 = p_ref[3]; r1 |= (r2 << 16); /* 0,d,0,b */ while ((uint32)p_ref < tmp) { //r2 = *((uint32*)(p_ref+=4));/* h,g,f,e */ //r3 = (r2>>8)&0xFF00FF; /* 0,h,0,f */ //r2 &= 0xFF00FF; /* 0,g,0,e */ /* It is faster to read 1 byte at a time */ r2 = *(p_ref += 4); r3 = p_ref[2]; r2 |= (r3 << 16); /* 0,g,0,e */ r3 = p_ref[1]; r4 = p_ref[3]; r3 |= (r4 << 16); /* 0,h,0,f */ r4 = r0 + r3; /* c+h, a+f */ r5 = r0 + r1; /* c+d, a+b */ r6 = r2 + r3; /* g+h, e+f */ r5 >>= 16; r5 |= (r6 << 16); /* e+f, c+d */ r4 += r5 * 20; /* c+20*e+20*f+h, a+20*c+20*d+f */ r4 += 0x100010; /* +16, +16 */ r5 = r1 + r2; /* d+g, b+e */ r4 -= r5 * 5; /* c-5*d+20*e+20*f-5*g+h, a-5*b+20*c+20*d-5*e+f */ r4 >>= 5; r13 |= r4; /* check clipping */ r4 &= 0xFF00FF; /* mask */ r5 = p_ref[4]; /* i */ r6 = (r5 << 16); r5 = r6 | (r2 >> 16);/* 0,i,0,g */ r5 += r1; /* d+i, b+g */ /* r5 not free */ r1 >>= 16; r1 |= (r3 << 16); /* 0,f,0,d */ /* r1 has changed */ r1 += r2; /* f+g, d+e */ r5 += 20 * r1; /* d+20f+20g+i, b+20d+20e+g */ r0 >>= 16; r0 |= (r2 << 16); /* 0,e,0,c */ /* r0 has changed */ r0 += r3; /* e+h, c+f */ r5 += 0x100010; /* 16,16 */ r5 -= r0 * 5; /* d-5e+20f+20g-5h+i, b-5c+20d+20e-5f+g */ r5 >>= 5; r13 |= r5; /* check clipping */ r5 &= 0xFF00FF; /* mask */ r4 |= (r5 << 8); /* pack them together */ *p_tmp++ = r4; r1 = r3; r0 = r2; } p_tmp += ((24 - blkwidth) >> 2); /* move to the next line */ p_ref += ref_offset; /* ref_offset = inpitch-blkwidth; */ if (r13&0xFF000700) /* need clipping */ { /* move back to the beginning of the line */ p_ref -= (ref_offset + blkwidth); /* input */ p_tmp -= 6; /* intermediate output */ tmp = (uint32)(p_ref + blkwidth); while ((uint32)p_ref < tmp) { r0 = *p_ref++; r1 = *p_ref++; r2 = *p_ref++; r3 = *p_ref++; r4 = *p_ref++; /* first pixel */ r5 = *p_ref++; result = (r0 + r5); r0 = (r1 + r4); result -= (r0 * 5);//result -= r0; result -= (r0<<2); r0 = (r2 + r3); result += (r0 * 20);//result += (r0<<4); result += (r0<<2); result = (result + 16) >> 5; CLIP_RESULT(result) pkres = result; /* second pixel */ r0 = *p_ref++; result = (r1 + r0); r1 = (r2 + r5); result -= (r1 * 5);//result -= r1; result -= (r1<<2); r1 = (r3 + r4); result += (r1 * 20);//result += (r1<<4); result += (r1<<2); result = (result + 16) >> 5; CLIP_RESULT(result) pkres |= (result << 8); /* third pixel */ r1 = *p_ref++; result = (r2 + r1); r2 = (r3 + r0); result -= (r2 * 5);//result -= r2; result -= (r2<<2); r2 = (r4 + r5); result += (r2 * 20);//result += (r2<<4); result += (r2<<2); result = (result + 16) >> 5; CLIP_RESULT(result) pkres |= (result << 16); /* fourth pixel */ r2 = *p_ref++; result = (r3 + r2); r3 = (r4 + r1); result -= (r3 * 5);//result -= r3; result -= (r3<<2); r3 = (r5 + r0); result += (r3 * 20);//result += (r3<<4); result += (r3<<2); result = (result + 16) >> 5; CLIP_RESULT(result) pkres |= (result << 24); *p_tmp++ = pkres; /* write 4 pixel */ p_ref -= 5; } p_tmp += ((24 - blkwidth) >> 2); /* move to the next line */ p_ref += ref_offset; /* ref_offset = inpitch-blkwidth; */ } } /* perform vertical interpolation */ /* not word-aligned */ if (((uint32)in2)&0x3) { eCreateAlign(in2, inpitch, -2, &tmp_in[0][0], blkwidth, blkheight + 5); in2 = &tmp_in[2][0]; inpitch = 24; } p_cur = out; curr_offset = 1 - outpitch * (blkheight - 1); /* offset vertically up and one pixel right */ pkres = blkheight * inpitch; /* reuse it for limit */ curr_offset += 3; for (j = 0; j < blkwidth; j += 4, in2 += 4) { r13 = 0; p_ref = in2; p_tmp8 = &(tmp_res[0][j]); /* intermediate result */ p_tmp8 -= 24; /* compensate for the first offset */ p_cur -= outpitch; /* compensate for the first offset */ tmp = (uint32)(p_ref + pkres); /* limit */ while ((uint32)p_ref < tmp) /* the loop un-rolled */ { /* Read 1 byte at a time is too slow, too many read and pack ops, need to call CreateAlign */ /*p_ref8 = p_ref-(inpitch<<1); r0 = p_ref8[0]; r1 = p_ref8[2]; r0 |= (r1<<16); r6 = p_ref8[1]; r1 = p_ref8[3]; r6 |= (r1<<16); p_ref+=inpitch; */ r0 = *((uint32*)(p_ref - (inpitch << 1))); /* load 4 bytes */ p_ref += inpitch; r6 = (r0 >> 8) & 0xFF00FF; /* second and fourth byte */ r0 &= 0xFF00FF; /*p_ref8 = p_ref+(inpitch<<1); r1 = p_ref8[0]; r7 = p_ref8[2]; r1 |= (r7<<16); r7 = p_ref8[1]; r2 = p_ref8[3]; r7 |= (r2<<16);*/ r1 = *((uint32*)(p_ref + (inpitch << 1))); /* r1, r7, ref[3] */ r7 = (r1 >> 8) & 0xFF00FF; r1 &= 0xFF00FF; r0 += r1; r6 += r7; /*r2 = p_ref[0]; r8 = p_ref[2]; r2 |= (r8<<16); r8 = p_ref[1]; r1 = p_ref[3]; r8 |= (r1<<16);*/ r2 = *((uint32*)p_ref); /* r2, r8, ref[1] */ r8 = (r2 >> 8) & 0xFF00FF; r2 &= 0xFF00FF; /*p_ref8 = p_ref-inpitch; r1 = p_ref8[0]; r7 = p_ref8[2]; r1 |= (r7<<16); r1 += r2; r7 = p_ref8[1]; r2 = p_ref8[3]; r7 |= (r2<<16);*/ r1 = *((uint32*)(p_ref - inpitch)); /* r1, r7, ref[0] */ r7 = (r1 >> 8) & 0xFF00FF; r1 &= 0xFF00FF; r1 += r2; r7 += r8; r0 += 20 * r1; r6 += 20 * r7; r0 += 0x100010; r6 += 0x100010; /*p_ref8 = p_ref-(inpitch<<1); r2 = p_ref8[0]; r8 = p_ref8[2]; r2 |= (r8<<16); r8 = p_ref8[1]; r1 = p_ref8[3]; r8 |= (r1<<16);*/ r2 = *((uint32*)(p_ref - (inpitch << 1))); /* r2, r8, ref[-1] */ r8 = (r2 >> 8) & 0xFF00FF; r2 &= 0xFF00FF; /*p_ref8 = p_ref+inpitch; r1 = p_ref8[0]; r7 = p_ref8[2]; r1 |= (r7<<16); r1 += r2; r7 = p_ref8[1]; r2 = p_ref8[3]; r7 |= (r2<<16);*/ r1 = *((uint32*)(p_ref + inpitch)); /* r1, r7, ref[2] */ r7 = (r1 >> 8) & 0xFF00FF; r1 &= 0xFF00FF; r1 += r2; r7 += r8; r0 -= 5 * r1; r6 -= 5 * r7; r0 >>= 5; r6 >>= 5; /* clip */ r13 |= r6; r13 |= r0; //CLIPPACK(r6,result) /* add with horizontal results */ r10 = *((uint32*)(p_tmp8 += 24)); r9 = (r10 >> 8) & 0xFF00FF; r10 &= 0xFF00FF; r0 += r10; r0 += 0x10001; r0 = (r0 >> 1) & 0xFF00FF; /* mask to 8 bytes */ r6 += r9; r6 += 0x10001; r6 = (r6 >> 1) & 0xFF00FF; /* mask to 8 bytes */ r0 |= (r6 << 8); /* pack it back */ *((uint32*)(p_cur += outpitch)) = r0; } p_cur += curr_offset; /* offset to the next pixel */ if (r13 & 0xFF000700) /* this column need clipping */ { p_cur -= 4; for (i = 0; i < 4; i++) { p_ref = in2 + i; p_tmp8 = &(tmp_res[0][j+i]); /* intermediate result */ p_tmp8 -= 24; /* compensate for the first offset */ p_cur -= outpitch; /* compensate for the first offset */ tmp = (uint32)(p_ref + pkres); /* limit */ while ((uint32)p_ref < tmp) /* the loop un-rolled */ { r0 = *(p_ref - (inpitch << 1)); r1 = *(p_ref - inpitch); r2 = *p_ref; r3 = *(p_ref += inpitch); /* modify pointer before loading */ r4 = *(p_ref += inpitch); /* first pixel */ r5 = *(p_ref += inpitch); result = (r0 + r5); r0 = (r1 + r4); result -= (r0 * 5);//result -= r0; result -= (r0<<2); r0 = (r2 + r3); result += (r0 * 20);//result += (r0<<4); result += (r0<<2); result = (result + 16) >> 5; CLIP_RESULT(result) tmp_result = *(p_tmp8 += 24); /* modify pointer before loading */ result = (result + tmp_result + 1); /* no clip */ result = (result >> 1); *(p_cur += outpitch) = result; /* second pixel */ r0 = *(p_ref += inpitch); result = (r1 + r0); r1 = (r2 + r5); result -= (r1 * 5);//result -= r1; result -= (r1<<2); r1 = (r3 + r4); result += (r1 * 20);//result += (r1<<4); result += (r1<<2); result = (result + 16) >> 5; CLIP_RESULT(result) tmp_result = *(p_tmp8 += 24); /* intermediate result */ result = (result + tmp_result + 1); /* no clip */ result = (result >> 1); *(p_cur += outpitch) = result; /* third pixel */ r1 = *(p_ref += inpitch); result = (r2 + r1); r2 = (r3 + r0); result -= (r2 * 5);//result -= r2; result -= (r2<<2); r2 = (r4 + r5); result += (r2 * 20);//result += (r2<<4); result += (r2<<2); result = (result + 16) >> 5; CLIP_RESULT(result) tmp_result = *(p_tmp8 += 24); /* intermediate result */ result = (result + tmp_result + 1); /* no clip */ result = (result >> 1); *(p_cur += outpitch) = result; /* fourth pixel */ r2 = *(p_ref += inpitch); result = (r3 + r2); r3 = (r4 + r1); result -= (r3 * 5);//result -= r3; result -= (r3<<2); r3 = (r5 + r0); result += (r3 * 20);//result += (r3<<4); result += (r3<<2); result = (result + 16) >> 5; CLIP_RESULT(result) tmp_result = *(p_tmp8 += 24); /* intermediate result */ result = (result + tmp_result + 1); /* no clip */ result = (result >> 1); *(p_cur += outpitch) = result; p_ref -= (inpitch << 1); /* move back to center of the filter of the next one */ } p_cur += (curr_offset - 3); } } } return ; } /* position G */ void eFullPelMC(uint8 *in, int inpitch, uint8 *out, int outpitch, int blkwidth, int blkheight) { int i, j; int offset_in = inpitch - blkwidth; int offset_out = outpitch - blkwidth; uint32 temp; uint8 byte; if (((uint32)in)&3) { for (j = blkheight; j > 0; j--) { for (i = blkwidth; i > 0; i -= 4) { temp = *in++; byte = *in++; temp |= (byte << 8); byte = *in++; temp |= (byte << 16); byte = *in++; temp |= (byte << 24); *((uint32*)out) = temp; /* write 4 bytes */ out += 4; } out += offset_out; in += offset_in; } } else { for (j = blkheight; j > 0; j--) { for (i = blkwidth; i > 0; i -= 4) { temp = *((uint32*)in); *((uint32*)out) = temp; in += 4; out += 4; } out += offset_out; in += offset_in; } } return ; } void ePadChroma(uint8 *ref, int picwidth, int picheight, int picpitch, int x_pos, int y_pos) { int pad_height; int pad_width; uint8 *start; uint32 word1, word2, word3; int offset, j; pad_height = 8 + ((y_pos & 7) ? 1 : 0); pad_width = 8 + ((x_pos & 7) ? 1 : 0); y_pos >>= 3; x_pos >>= 3; // pad vertical first if (y_pos < 0) // need to pad up { if (x_pos < -8) start = ref - 8; else if (x_pos + pad_width > picwidth + 7) start = ref + picwidth + 7 - pad_width; else start = ref + x_pos; /* word-align start */ offset = (uint32)start & 0x3; if (offset) start -= offset; word1 = *((uint32*)start); word2 = *((uint32*)(start + 4)); word3 = *((uint32*)(start + 8)); /* pad up N rows */ j = -y_pos; if (j > 8) j = 8; while (j--) { *((uint32*)(start -= picpitch)) = word1; *((uint32*)(start + 4)) = word2; *((uint32*)(start + 8)) = word3; } } else if (y_pos + pad_height >= picheight) /* pad down */ { if (x_pos < -8) start = ref + picpitch * (picheight - 1) - 8; else if (x_pos + pad_width > picwidth + 7) start = ref + picpitch * (picheight - 1) + picwidth + 7 - pad_width; else start = ref + picpitch * (picheight - 1) + x_pos; /* word-align start */ offset = (uint32)start & 0x3; if (offset) start -= offset; word1 = *((uint32*)start); word2 = *((uint32*)(start + 4)); word3 = *((uint32*)(start + 8)); /* pad down N rows */ j = y_pos + pad_height - picheight; if (j > 8) j = 8; while (j--) { *((uint32*)(start += picpitch)) = word1; *((uint32*)(start + 4)) = word2; *((uint32*)(start + 8)) = word3; } } /* now pad horizontal */ if (x_pos < 0) // pad left { if (y_pos < -8) start = ref - (picpitch << 3); else if (y_pos + pad_height > picheight + 7) start = ref + (picheight + 7 - pad_height) * picpitch; else start = ref + y_pos * picpitch; // now pad left 8 pixels for pad_height rows */ j = pad_height; start -= picpitch; while (j--) { word1 = *(start += picpitch); word1 |= (word1 << 8); word1 |= (word1 << 16); *((uint32*)(start - 8)) = word1; *((uint32*)(start - 4)) = word1; } } else if (x_pos + pad_width >= picwidth) /* pad right */ { if (y_pos < -8) start = ref - (picpitch << 3) + picwidth - 1; else if (y_pos + pad_height > picheight + 7) start = ref + (picheight + 7 - pad_height) * picpitch + picwidth - 1; else start = ref + y_pos * picpitch + picwidth - 1; // now pad right 8 pixels for pad_height rows */ j = pad_height; start -= picpitch; while (j--) { word1 = *(start += picpitch); word1 |= (word1 << 8); word1 |= (word1 << 16); *((uint32*)(start + 1)) = word1; *((uint32*)(start + 5)) = word1; } } return ; } void eChromaMotionComp(uint8 *ref, int picwidth, int picheight, int x_pos, int y_pos, uint8 *pred, int picpitch, int blkwidth, int blkheight) { int dx, dy; int offset_dx, offset_dy; int index; ePadChroma(ref, picwidth, picheight, picpitch, x_pos, y_pos); dx = x_pos & 7; dy = y_pos & 7; offset_dx = (dx + 7) >> 3; offset_dy = (dy + 7) >> 3; x_pos = x_pos >> 3; /* round it to full-pel resolution */ y_pos = y_pos >> 3; ref += y_pos * picpitch + x_pos; index = offset_dx + (offset_dy << 1) + ((blkwidth << 1) & 0x7); (*(eChromaMC_SIMD[index]))(ref, picpitch , dx, dy, pred, picpitch, blkwidth, blkheight); return ; } /* SIMD routines, unroll the loops in vertical direction, decreasing loops (things to be done) */ void eChromaDiagonalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy, uint8 *pOut, int predPitch, int blkwidth, int blkheight) { int32 r0, r1, r2, r3, result0, result1; uint8 temp[288]; uint8 *ref, *out; int i, j; int dx_8 = 8 - dx; int dy_8 = 8 - dy; /* horizontal first */ out = temp; for (i = 0; i < blkheight + 1; i++) { ref = pRef; r0 = ref[0]; for (j = 0; j < blkwidth; j += 4) { r0 |= (ref[2] << 16); result0 = dx_8 * r0; r1 = ref[1] | (ref[3] << 16); result0 += dx * r1; *(int32 *)out = result0; result0 = dx_8 * r1; r2 = ref[4]; r0 = r0 >> 16; r1 = r0 | (r2 << 16); result0 += dx * r1; *(int32 *)(out + 16) = result0; ref += 4; out += 4; r0 = r2; } pRef += srcPitch; out += (32 - blkwidth); } // pRef -= srcPitch*(blkheight+1); ref = temp; for (j = 0; j < blkwidth; j += 4) { r0 = *(int32 *)ref; r1 = *(int32 *)(ref + 16); ref += 32; out = pOut; for (i = 0; i < (blkheight >> 1); i++) { result0 = dy_8 * r0 + 0x00200020; r2 = *(int32 *)ref; result0 += dy * r2; result0 >>= 6; result0 &= 0x00FF00FF; r0 = r2; result1 = dy_8 * r1 + 0x00200020; r3 = *(int32 *)(ref + 16); result1 += dy * r3; result1 >>= 6; result1 &= 0x00FF00FF; r1 = r3; *(int32 *)out = result0 | (result1 << 8); out += predPitch; ref += 32; result0 = dy_8 * r0 + 0x00200020; r2 = *(int32 *)ref; result0 += dy * r2; result0 >>= 6; result0 &= 0x00FF00FF; r0 = r2; result1 = dy_8 * r1 + 0x00200020; r3 = *(int32 *)(ref + 16); result1 += dy * r3; result1 >>= 6; result1 &= 0x00FF00FF; r1 = r3; *(int32 *)out = result0 | (result1 << 8); out += predPitch; ref += 32; } pOut += 4; ref = temp + 4; /* since it can only iterate twice max */ } return; } void eChromaHorizontalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy, uint8 *pOut, int predPitch, int blkwidth, int blkheight) { (void)(dy); int32 r0, r1, r2, result0, result1; uint8 *ref, *out; int i, j; int dx_8 = 8 - dx; /* horizontal first */ for (i = 0; i < blkheight; i++) { ref = pRef; out = pOut; r0 = ref[0]; for (j = 0; j < blkwidth; j += 4) { r0 |= (ref[2] << 16); result0 = dx_8 * r0 + 0x00040004; r1 = ref[1] | (ref[3] << 16); result0 += dx * r1; result0 >>= 3; result0 &= 0x00FF00FF; result1 = dx_8 * r1 + 0x00040004; r2 = ref[4]; r0 = r0 >> 16; r1 = r0 | (r2 << 16); result1 += dx * r1; result1 >>= 3; result1 &= 0x00FF00FF; *(int32 *)out = result0 | (result1 << 8); ref += 4; out += 4; r0 = r2; } pRef += srcPitch; pOut += predPitch; } return; } void eChromaVerticalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy, uint8 *pOut, int predPitch, int blkwidth, int blkheight) { (void)(dx); int32 r0, r1, r2, r3, result0, result1; int i, j; uint8 *ref, *out; int dy_8 = 8 - dy; /* vertical first */ for (i = 0; i < blkwidth; i += 4) { ref = pRef; out = pOut; r0 = ref[0] | (ref[2] << 16); r1 = ref[1] | (ref[3] << 16); ref += srcPitch; for (j = 0; j < blkheight; j++) { result0 = dy_8 * r0 + 0x00040004; r2 = ref[0] | (ref[2] << 16); result0 += dy * r2; result0 >>= 3; result0 &= 0x00FF00FF; r0 = r2; result1 = dy_8 * r1 + 0x00040004; r3 = ref[1] | (ref[3] << 16); result1 += dy * r3; result1 >>= 3; result1 &= 0x00FF00FF; r1 = r3; *(int32 *)out = result0 | (result1 << 8); ref += srcPitch; out += predPitch; } pOut += 4; pRef += 4; } return; } void eChromaDiagonalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy, uint8 *pOut, int predPitch, int blkwidth, int blkheight) { (void)(blkwidth); int32 r0, r1, temp0, temp1, result; int32 temp[9]; int32 *out; int i, r_temp; int dy_8 = 8 - dy; /* horizontal first */ out = temp; for (i = 0; i < blkheight + 1; i++) { r_temp = pRef[1]; temp0 = (pRef[0] << 3) + dx * (r_temp - pRef[0]); temp1 = (r_temp << 3) + dx * (pRef[2] - r_temp); r0 = temp0 | (temp1 << 16); *out++ = r0; pRef += srcPitch; } pRef -= srcPitch * (blkheight + 1); out = temp; r0 = *out++; for (i = 0; i < blkheight; i++) { result = dy_8 * r0 + 0x00200020; r1 = *out++; result += dy * r1; result >>= 6; result &= 0x00FF00FF; *(int16 *)pOut = (result >> 8) | (result & 0xFF); r0 = r1; pOut += predPitch; } return; } void eChromaHorizontalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy, uint8 *pOut, int predPitch, int blkwidth, int blkheight) { (void)(dy); (void)(blkwidth); int i, temp, temp0, temp1; /* horizontal first */ for (i = 0; i < blkheight; i++) { temp = pRef[1]; temp0 = ((pRef[0] << 3) + dx * (temp - pRef[0]) + 4) >> 3; temp1 = ((temp << 3) + dx * (pRef[2] - temp) + 4) >> 3; *(int16 *)pOut = temp0 | (temp1 << 8); pRef += srcPitch; pOut += predPitch; } return; } void eChromaVerticalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy, uint8 *pOut, int predPitch, int blkwidth, int blkheight) { (void)(dx); (void)(blkwidth); int32 r0, r1, result; int i; int dy_8 = 8 - dy; r0 = pRef[0] | (pRef[1] << 16); pRef += srcPitch; for (i = 0; i < blkheight; i++) { result = dy_8 * r0 + 0x00040004; r1 = pRef[0] | (pRef[1] << 16); result += dy * r1; result >>= 3; result &= 0x00FF00FF; *(int16 *)pOut = (result >> 8) | (result & 0xFF); r0 = r1; pRef += srcPitch; pOut += predPitch; } return; } void eChromaFullMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy, uint8 *pOut, int predPitch, int blkwidth, int blkheight) { (void)(dx); (void)(dy); int i, j; int offset_in = srcPitch - blkwidth; int offset_out = predPitch - blkwidth; uint16 temp; uint8 byte; if (((uint32)pRef)&1) { for (j = blkheight; j > 0; j--) { for (i = blkwidth; i > 0; i -= 2) { temp = *pRef++; byte = *pRef++; temp |= (byte << 8); *((uint16*)pOut) = temp; /* write 2 bytes */ pOut += 2; } pOut += offset_out; pRef += offset_in; } } else { for (j = blkheight; j > 0; j--) { for (i = blkwidth; i > 0; i -= 2) { temp = *((uint16*)pRef); *((uint16*)pOut) = temp; pRef += 2; pOut += 2; } pOut += offset_out; pRef += offset_in; } } return ; } ================================================ FILE: RtspCamera/jni/avc_h264/enc/src/motion_est.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2010 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "oscl_mem.h" #include "avcenc_lib.h" #define MIN_GOP 1 /* minimum size of GOP, 1/23/01, need to be tested */ #define DEFAULT_REF_IDX 0 /* always from the first frame in the reflist */ #define ALL_CAND_EQUAL 10 /* any number greater than 5 will work */ /* from TMN 3.2 */ #define PREF_NULL_VEC 129 /* zero vector bias */ #define PREF_16_VEC 129 /* 1MV bias versus 4MVs*/ #define PREF_INTRA 3024//512 /* bias for INTRA coding */ const static int tab_exclude[9][9] = // [last_loc][curr_loc] { {0, 0, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 1, 1, 1, 0, 0}, {0, 0, 0, 0, 1, 1, 1, 1, 1}, {0, 0, 0, 0, 0, 0, 1, 1, 1}, {0, 1, 1, 0, 0, 0, 1, 1, 1}, {0, 1, 1, 0, 0, 0, 0, 0, 1}, {0, 1, 1, 1, 1, 0, 0, 0, 1}, {0, 0, 1, 1, 1, 0, 0, 0, 0}, {0, 0, 1, 1, 1, 1, 1, 0, 0} }; //to decide whether to continue or compute const static int refine_next[8][2] = /* [curr_k][increment] */ { {0, 0}, {2, 0}, {1, 1}, {0, 2}, { -1, 1}, { -2, 0}, { -1, -1}, {0, -2} }; #ifdef _SAD_STAT uint32 num_MB = 0; uint32 num_cand = 0; #endif /************************************************************************/ #define TH_INTER_2 100 /* temporary for now */ //#define FIXED_INTERPRED_MODE AVC_P16 #define FIXED_REF_IDX 0 #define FIXED_MVX 0 #define FIXED_MVY 0 // only use when AVC_P8 or AVC_P8ref0 #define FIXED_SUBMB_MODE AVC_4x4 /*************************************************************************/ /* Initialize arrays necessary for motion search */ AVCEnc_Status InitMotionSearchModule(AVCHandle *avcHandle) { AVCEncObject *encvid = (AVCEncObject*) avcHandle->AVCObject; AVCRateControl *rateCtrl = encvid->rateCtrl; int search_range = rateCtrl->mvRange; int number_of_subpel_positions = 4 * (2 * search_range + 3); int max_mv_bits, max_mvd; int temp_bits = 0; uint8 *mvbits; int bits, imax, imin, i; uint8* subpel_pred = (uint8*) encvid->subpel_pred; // all 16 sub-pel positions while (number_of_subpel_positions > 0) { temp_bits++; number_of_subpel_positions >>= 1; } max_mv_bits = 3 + 2 * temp_bits; max_mvd = (1 << (max_mv_bits >> 1)) - 1; encvid->mvbits_array = (uint8*) avcHandle->CBAVC_Malloc(encvid->avcHandle->userData, sizeof(uint8) * (2 * max_mvd + 1), DEFAULT_ATTR); if (encvid->mvbits_array == NULL) { return AVCENC_MEMORY_FAIL; } mvbits = encvid->mvbits = encvid->mvbits_array + max_mvd; mvbits[0] = 1; for (bits = 3; bits <= max_mv_bits; bits += 2) { imax = 1 << (bits >> 1); imin = imax >> 1; for (i = imin; i < imax; i++) mvbits[-i] = mvbits[i] = bits; } /* initialize half-pel search */ encvid->hpel_cand[0] = subpel_pred + REF_CENTER; encvid->hpel_cand[1] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 1 ; encvid->hpel_cand[2] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 1; encvid->hpel_cand[3] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 25; encvid->hpel_cand[4] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 25; encvid->hpel_cand[5] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 25; encvid->hpel_cand[6] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24; encvid->hpel_cand[7] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24; encvid->hpel_cand[8] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE; /* For quarter-pel interpolation around best half-pel result */ encvid->bilin_base[0][0] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE; encvid->bilin_base[0][1] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 1; encvid->bilin_base[0][2] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24; encvid->bilin_base[0][3] = subpel_pred + REF_CENTER; encvid->bilin_base[1][0] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE; encvid->bilin_base[1][1] = subpel_pred + REF_CENTER - 24; encvid->bilin_base[1][2] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE; encvid->bilin_base[1][3] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 1; encvid->bilin_base[2][0] = subpel_pred + REF_CENTER - 24; encvid->bilin_base[2][1] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 1; encvid->bilin_base[2][2] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 1; encvid->bilin_base[2][3] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 1; encvid->bilin_base[3][0] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 1; encvid->bilin_base[3][1] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 1; encvid->bilin_base[3][2] = subpel_pred + REF_CENTER; encvid->bilin_base[3][3] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 25; encvid->bilin_base[4][0] = subpel_pred + REF_CENTER; encvid->bilin_base[4][1] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 25; encvid->bilin_base[4][2] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 25; encvid->bilin_base[4][3] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 25; encvid->bilin_base[5][0] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24; encvid->bilin_base[5][1] = subpel_pred + REF_CENTER; encvid->bilin_base[5][2] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24; encvid->bilin_base[5][3] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 25; encvid->bilin_base[6][0] = subpel_pred + REF_CENTER - 1; encvid->bilin_base[6][1] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24; encvid->bilin_base[6][2] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 24; encvid->bilin_base[6][3] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24; encvid->bilin_base[7][0] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE; encvid->bilin_base[7][1] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE; encvid->bilin_base[7][2] = subpel_pred + REF_CENTER - 1; encvid->bilin_base[7][3] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24; encvid->bilin_base[8][0] = subpel_pred + REF_CENTER - 25; encvid->bilin_base[8][1] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE; encvid->bilin_base[8][2] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE; encvid->bilin_base[8][3] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE; return AVCENC_SUCCESS; } /* Clean-up memory */ void CleanMotionSearchModule(AVCHandle *avcHandle) { AVCEncObject *encvid = (AVCEncObject*) avcHandle->AVCObject; if (encvid->mvbits_array) { avcHandle->CBAVC_Free(avcHandle->userData, (int)(encvid->mvbits_array)); encvid->mvbits = NULL; } return ; } bool IntraDecisionABE(int *min_cost, uint8 *cur, int pitch, bool ave) { int j; uint8 *out; int temp, SBE; OsclFloat ABE; bool intra = true; SBE = 0; /* top neighbor */ out = cur - pitch; for (j = 0; j < 16; j++) { temp = out[j] - cur[j]; SBE += ((temp >= 0) ? temp : -temp); } /* left neighbor */ out = cur - 1; out -= pitch; cur -= pitch; for (j = 0; j < 16; j++) { temp = *(out += pitch) - *(cur += pitch); SBE += ((temp >= 0) ? temp : -temp); } /* compare mincost/384 and SBE/64 */ ABE = SBE / 32.0; //ABE = SBE/64.0; // if (ABE >= *min_cost / 256.0) //if( ABE*0.8 >= min_cost/384.0) // { intra = false; // no possibility of intra, just use inter } else { if (ave == true) { *min_cost = (*min_cost + (int)(SBE * 8)) >> 1; // possibility of intra, averaging the cost } else { *min_cost = (int)(SBE * 8); } } return intra; } /******* main function for macroblock prediction for the entire frame ***/ /* if turns out to be IDR frame, set video->nal_unit_type to AVC_NALTYPE_IDR */ void AVCMotionEstimation(AVCEncObject *encvid) { AVCCommonObj *video = encvid->common; int slice_type = video->slice_type; AVCFrameIO *currInput = encvid->currInput; AVCPictureData *refPic = video->RefPicList0[0]; int i, j, k; int mbwidth = video->PicWidthInMbs; int mbheight = video->PicHeightInMbs; int totalMB = video->PicSizeInMbs; int pitch = currInput->pitch; AVCMacroblock *currMB, *mblock = video->mblock; AVCMV *mot_mb_16x16, *mot16x16 = encvid->mot16x16; // AVCMV *mot_mb_16x8, *mot_mb_8x16, *mot_mb_8x8, etc; AVCRateControl *rateCtrl = encvid->rateCtrl; uint8 *intraSearch = encvid->intraSearch; uint FS_en = encvid->fullsearch_enable; int NumIntraSearch, start_i, numLoop, incr_i; int mbnum, offset; uint8 *cur, *best_cand[5]; int totalSAD = 0; /* average SAD for rate control */ int type_pred; int abe_cost; #ifdef HTFM /***** HYPOTHESIS TESTING ********/ /* 2/28/01 */ int collect = 0; HTFM_Stat htfm_stat; double newvar[16]; double exp_lamda[15]; /*********************************/ #endif int hp_guess = 0; uint32 mv_uint32; offset = 0; if (slice_type == AVC_I_SLICE) // need to calculate rateCtrl->totalSAD for RC to take action!! { /* cannot do I16 prediction here because it needs full decoding. */ { /* no RC for I-slice */ i = totalMB - 1; while (i >= 0) { encvid->min_cost[i--] = 0x7FFFFFFF; /* max value for int */ } } /* reset intra MB pattern */ oscl_memset(intraSearch, 1, sizeof(uint8)*totalMB); encvid->firstIntraRefreshMBIndx = 0; /* reset this */ return ; } else // P_SLICE { for (i = 0; i < totalMB; i++) { mblock[i].mb_intra = 0; } oscl_memset(intraSearch, 1, sizeof(uint8)*totalMB); } if (refPic->padded == 0) { AVCPaddingEdge(refPic); refPic->padded = 1; } /* Random INTRA update */ if (rateCtrl->intraMBRate) { AVCRasterIntraUpdate(encvid, mblock, totalMB, rateCtrl->intraMBRate); } encvid->sad_extra_info = NULL; #ifdef HTFM /***** HYPOTHESIS TESTING ********/ InitHTFM(video, &htfm_stat, newvar, &collect); /*********************************/ #endif if ((rateCtrl->scdEnable == 1) && ((rateCtrl->frame_rate < 5.0) || (video->sliceHdr->frame_num > MIN_GOP))) /* do not try to detect a new scene if low frame rate and too close to previous I-frame */ { incr_i = 2; numLoop = 2; start_i = 1; type_pred = 0; /* for initial candidate selection */ } else { incr_i = 1; numLoop = 1; start_i = 0; type_pred = 2; } /* First pass, loop thru half the macroblock */ /* determine scene change */ /* Second pass, for the rest of macroblocks */ NumIntraSearch = 0; // to be intra searched in the encoding loop. while (numLoop--) { for (j = 0; j < mbheight; j++) { if (incr_i > 1) start_i = (start_i == 0 ? 1 : 0) ; /* toggle 0 and 1 */ offset = pitch * (j << 4) + (start_i << 4); mbnum = j * mbwidth + start_i; for (i = start_i; i < mbwidth; i += incr_i) { video->mbNum = mbnum; video->currMB = currMB = mblock + mbnum; mot_mb_16x16 = mot16x16 + mbnum; cur = currInput->YCbCr[0] + offset; if (currMB->mb_intra == 0) /* for INTER mode */ { #if defined(HTFM) HTFMPrepareCurMB_AVC(encvid, &htfm_stat, cur, pitch); #else AVCPrepareCurMB(encvid, cur, pitch); #endif /************************************************************/ /******** full-pel 1MV search **********************/ AVCMBMotionSearch(encvid, cur, best_cand, i << 4, j << 4, type_pred, FS_en, &hp_guess); abe_cost = encvid->min_cost[mbnum] = mot_mb_16x16->sad; /* set mbMode and MVs */ currMB->mbMode = AVC_P16; currMB->MBPartPredMode[0][0] = AVC_Pred_L0; mv_uint32 = ((mot_mb_16x16->y) << 16) | ((mot_mb_16x16->x) & 0xffff); for (k = 0; k < 32; k += 2) { currMB->mvL0[k>>1] = mv_uint32; } /* make a decision whether it should be tested for intra or not */ if (i != mbwidth - 1 && j != mbheight - 1 && i != 0 && j != 0) { if (false == IntraDecisionABE(&abe_cost, cur, pitch, true)) { intraSearch[mbnum] = 0; } else { NumIntraSearch++; rateCtrl->MADofMB[mbnum] = abe_cost; } } else // boundary MBs, always do intra search { NumIntraSearch++; } totalSAD += (int) rateCtrl->MADofMB[mbnum];//mot_mb_16x16->sad; } else /* INTRA update, use for prediction */ { mot_mb_16x16[0].x = mot_mb_16x16[0].y = 0; /* reset all other MVs to zero */ /* mot_mb_16x8, mot_mb_8x16, mot_mb_8x8, etc. */ abe_cost = encvid->min_cost[mbnum] = 0x7FFFFFFF; /* max value for int */ if (i != mbwidth - 1 && j != mbheight - 1 && i != 0 && j != 0) { IntraDecisionABE(&abe_cost, cur, pitch, false); rateCtrl->MADofMB[mbnum] = abe_cost; totalSAD += abe_cost; } NumIntraSearch++ ; /* cannot do I16 prediction here because it needs full decoding. */ // intraSearch[mbnum] = 1; } mbnum += incr_i; offset += (incr_i << 4); } /* for i */ } /* for j */ /* since we cannot do intra/inter decision here, the SCD has to be based on other criteria such as motion vectors coherency or the SAD */ if (incr_i > 1 && numLoop) /* scene change on and first loop */ { //if(NumIntraSearch > ((totalMB>>3)<<1) + (totalMB>>3)) /* 75% of 50%MBs */ if (NumIntraSearch*99 > (48*totalMB)) /* 20% of 50%MBs */ /* need to do more investigation about this threshold since the NumIntraSearch only show potential intra MBs, not the actual one */ { /* we can choose to just encode I_SLICE without IDR */ //video->nal_unit_type = AVC_NALTYPE_IDR; video->nal_unit_type = AVC_NALTYPE_SLICE; video->sliceHdr->slice_type = AVC_I_ALL_SLICE; video->slice_type = AVC_I_SLICE; oscl_memset(intraSearch, 1, sizeof(uint8)*totalMB); i = totalMB; while (i--) { mblock[i].mb_intra = 1; encvid->min_cost[i] = 0x7FFFFFFF; /* max value for int */ } rateCtrl->totalSAD = totalSAD * 2; /* SAD */ return ; } } /******** no scene change, continue motion search **********************/ start_i = 0; type_pred++; /* second pass */ } rateCtrl->totalSAD = totalSAD; /* SAD */ #ifdef HTFM /***** HYPOTHESIS TESTING ********/ if (collect) { collect = 0; UpdateHTFM(encvid, newvar, exp_lamda, &htfm_stat); } /*********************************/ #endif return ; } /*===================================================================== Function: PaddingEdge Date: 09/16/2000 Purpose: Pad edge of a Vop =====================================================================*/ void AVCPaddingEdge(AVCPictureData *refPic) { uint8 *src, *dst; int i; int pitch, width, height; uint32 temp1, temp2; width = refPic->width; height = refPic->height; pitch = refPic->pitch; /* pad top */ src = refPic->Sl; temp1 = *src; /* top-left corner */ temp2 = src[width-1]; /* top-right corner */ temp1 |= (temp1 << 8); temp1 |= (temp1 << 16); temp2 |= (temp2 << 8); temp2 |= (temp2 << 16); dst = src - (pitch << 4); *((uint32*)(dst - 16)) = temp1; *((uint32*)(dst - 12)) = temp1; *((uint32*)(dst - 8)) = temp1; *((uint32*)(dst - 4)) = temp1; oscl_memcpy(dst, src, width); *((uint32*)(dst += width)) = temp2; *((uint32*)(dst + 4)) = temp2; *((uint32*)(dst + 8)) = temp2; *((uint32*)(dst + 12)) = temp2; dst = dst - width - 16; i = 15; while (i--) { oscl_memcpy(dst + pitch, dst, pitch); dst += pitch; } /* pad sides */ dst += (pitch + 16); src = dst; i = height; while (i--) { temp1 = *src; temp2 = src[width-1]; temp1 |= (temp1 << 8); temp1 |= (temp1 << 16); temp2 |= (temp2 << 8); temp2 |= (temp2 << 16); *((uint32*)(dst - 16)) = temp1; *((uint32*)(dst - 12)) = temp1; *((uint32*)(dst - 8)) = temp1; *((uint32*)(dst - 4)) = temp1; *((uint32*)(dst += width)) = temp2; *((uint32*)(dst + 4)) = temp2; *((uint32*)(dst + 8)) = temp2; *((uint32*)(dst + 12)) = temp2; src += pitch; dst = src; } /* pad bottom */ dst -= 16; i = 16; while (i--) { oscl_memcpy(dst, dst - pitch, pitch); dst += pitch; } return ; } /*=========================================================================== Function: AVCRasterIntraUpdate Date: 2/26/01 Purpose: To raster-scan assign INTRA-update . N macroblocks are updated (also was programmable). ===========================================================================*/ void AVCRasterIntraUpdate(AVCEncObject *encvid, AVCMacroblock *mblock, int totalMB, int numRefresh) { int indx, i; indx = encvid->firstIntraRefreshMBIndx; for (i = 0; i < numRefresh && indx < totalMB; i++) { (mblock + indx)->mb_intra = 1; encvid->intraSearch[indx++] = 1; } /* if read the end of frame, reset and loop around */ if (indx >= totalMB - 1) { indx = 0; while (i < numRefresh && indx < totalMB) { (mblock + indx)->mb_intra = 1; encvid->intraSearch[indx++] = 1; i++; } } encvid->firstIntraRefreshMBIndx = indx; /* update with a new value */ return ; } #ifdef HTFM void InitHTFM(VideoEncData *encvid, HTFM_Stat *htfm_stat, double *newvar, int *collect) { AVCCommonObj *video = encvid->common; int i; int lx = video->currPic->width; // padding int lx2 = lx << 1; int lx3 = lx2 + lx; int rx = video->currPic->pitch; int rx2 = rx << 1; int rx3 = rx2 + rx; int *offset, *offset2; /* 4/11/01, collect data every 30 frames, doesn't have to be base layer */ if (((int)video->sliceHdr->frame_num) % 30 == 1) { *collect = 1; htfm_stat->countbreak = 0; htfm_stat->abs_dif_mad_avg = 0; for (i = 0; i < 16; i++) { newvar[i] = 0.0; } // encvid->functionPointer->SAD_MB_PADDING = &SAD_MB_PADDING_HTFM_Collect; encvid->functionPointer->SAD_Macroblock = &SAD_MB_HTFM_Collect; encvid->functionPointer->SAD_MB_HalfPel[0] = NULL; encvid->functionPointer->SAD_MB_HalfPel[1] = &SAD_MB_HP_HTFM_Collectxh; encvid->functionPointer->SAD_MB_HalfPel[2] = &SAD_MB_HP_HTFM_Collectyh; encvid->functionPointer->SAD_MB_HalfPel[3] = &SAD_MB_HP_HTFM_Collectxhyh; encvid->sad_extra_info = (void*)(htfm_stat); offset = htfm_stat->offsetArray; offset2 = htfm_stat->offsetRef; } else { // encvid->functionPointer->SAD_MB_PADDING = &SAD_MB_PADDING_HTFM; encvid->functionPointer->SAD_Macroblock = &SAD_MB_HTFM; encvid->functionPointer->SAD_MB_HalfPel[0] = NULL; encvid->functionPointer->SAD_MB_HalfPel[1] = &SAD_MB_HP_HTFMxh; encvid->functionPointer->SAD_MB_HalfPel[2] = &SAD_MB_HP_HTFMyh; encvid->functionPointer->SAD_MB_HalfPel[3] = &SAD_MB_HP_HTFMxhyh; encvid->sad_extra_info = (void*)(encvid->nrmlz_th); offset = encvid->nrmlz_th + 16; offset2 = encvid->nrmlz_th + 32; } offset[0] = 0; offset[1] = lx2 + 2; offset[2] = 2; offset[3] = lx2; offset[4] = lx + 1; offset[5] = lx3 + 3; offset[6] = lx + 3; offset[7] = lx3 + 1; offset[8] = lx; offset[9] = lx3 + 2; offset[10] = lx3 ; offset[11] = lx + 2 ; offset[12] = 1; offset[13] = lx2 + 3; offset[14] = lx2 + 1; offset[15] = 3; offset2[0] = 0; offset2[1] = rx2 + 2; offset2[2] = 2; offset2[3] = rx2; offset2[4] = rx + 1; offset2[5] = rx3 + 3; offset2[6] = rx + 3; offset2[7] = rx3 + 1; offset2[8] = rx; offset2[9] = rx3 + 2; offset2[10] = rx3 ; offset2[11] = rx + 2 ; offset2[12] = 1; offset2[13] = rx2 + 3; offset2[14] = rx2 + 1; offset2[15] = 3; return ; } void UpdateHTFM(AVCEncObject *encvid, double *newvar, double *exp_lamda, HTFM_Stat *htfm_stat) { if (htfm_stat->countbreak == 0) htfm_stat->countbreak = 1; newvar[0] = (double)(htfm_stat->abs_dif_mad_avg) / (htfm_stat->countbreak * 16.); if (newvar[0] < 0.001) { newvar[0] = 0.001; /* to prevent floating overflow */ } exp_lamda[0] = 1 / (newvar[0] * 1.4142136); exp_lamda[1] = exp_lamda[0] * 1.5825; exp_lamda[2] = exp_lamda[0] * 2.1750; exp_lamda[3] = exp_lamda[0] * 3.5065; exp_lamda[4] = exp_lamda[0] * 3.1436; exp_lamda[5] = exp_lamda[0] * 3.5315; exp_lamda[6] = exp_lamda[0] * 3.7449; exp_lamda[7] = exp_lamda[0] * 4.5854; exp_lamda[8] = exp_lamda[0] * 4.6191; exp_lamda[9] = exp_lamda[0] * 5.4041; exp_lamda[10] = exp_lamda[0] * 6.5974; exp_lamda[11] = exp_lamda[0] * 10.5341; exp_lamda[12] = exp_lamda[0] * 10.0719; exp_lamda[13] = exp_lamda[0] * 12.0516; exp_lamda[14] = exp_lamda[0] * 15.4552; CalcThreshold(HTFM_Pf, exp_lamda, encvid->nrmlz_th); return ; } void CalcThreshold(double pf, double exp_lamda[], int nrmlz_th[]) { int i; double temp[15]; // printf("\nLamda: "); /* parametric PREMODELling */ for (i = 0; i < 15; i++) { // printf("%g ",exp_lamda[i]); if (pf < 0.5) temp[i] = 1 / exp_lamda[i] * M4VENC_LOG(2 * pf); else temp[i] = -1 / exp_lamda[i] * M4VENC_LOG(2 * (1 - pf)); } nrmlz_th[15] = 0; for (i = 0; i < 15; i++) /* scale upto no.pixels */ nrmlz_th[i] = (int)(temp[i] * ((i + 1) << 4) + 0.5); return ; } void HTFMPrepareCurMB_AVC(AVCEncObject *encvid, HTFM_Stat *htfm_stat, uint8 *cur, int pitch) { AVCCommonObj *video = encvid->common; uint32 *htfmMB = (uint32*)(encvid->currYMB); uint8 *ptr, byte; int *offset; int i; uint32 word; if (((int)video->sliceHdr->frame_num) % 30 == 1) { offset = htfm_stat->offsetArray; } else { offset = encvid->nrmlz_th + 16; } for (i = 0; i < 16; i++) { ptr = cur + offset[i]; word = ptr[0]; byte = ptr[4]; word |= (byte << 8); byte = ptr[8]; word |= (byte << 16); byte = ptr[12]; word |= (byte << 24); *htfmMB++ = word; word = *(ptr += (pitch << 2)); byte = ptr[4]; word |= (byte << 8); byte = ptr[8]; word |= (byte << 16); byte = ptr[12]; word |= (byte << 24); *htfmMB++ = word; word = *(ptr += (pitch << 2)); byte = ptr[4]; word |= (byte << 8); byte = ptr[8]; word |= (byte << 16); byte = ptr[12]; word |= (byte << 24); *htfmMB++ = word; word = *(ptr += (pitch << 2)); byte = ptr[4]; word |= (byte << 8); byte = ptr[8]; word |= (byte << 16); byte = ptr[12]; word |= (byte << 24); *htfmMB++ = word; } return ; } #endif // HTFM void AVCPrepareCurMB(AVCEncObject *encvid, uint8 *cur, int pitch) { void* tmp = (void*)(encvid->currYMB); uint32 *currYMB = (uint32*) tmp; int i; cur -= pitch; for (i = 0; i < 16; i++) { *currYMB++ = *((uint32*)(cur += pitch)); *currYMB++ = *((uint32*)(cur + 4)); *currYMB++ = *((uint32*)(cur + 8)); *currYMB++ = *((uint32*)(cur + 12)); } return ; } #ifdef FIXED_INTERPRED_MODE /* due to the complexity of the predicted motion vector, we may not decide to skip a macroblock here just yet. */ /* We will find the best motion vector and the best intra prediction mode for each block. */ /* output are currMB->NumMbPart, currMB->MbPartWidth, currMB->MbPartHeight, currMB->NumSubMbPart[], currMB->SubMbPartWidth[], currMB->SubMbPartHeight, currMB->MBPartPredMode[][] (L0 or L1 or BiPred) currMB->RefIdx[], currMB->ref_idx_L0[], currMB->mvL0[], currMB->mvL1[] */ AVCEnc_Status AVCMBMotionSearch(AVCEncObject *encvid, AVCMacroblock *currMB, int mbNum, int num_pass) { AVCCommonObj *video = encvid->common; int mbPartIdx, subMbPartIdx; int16 *mv; int i; int SubMbPartHeight, SubMbPartWidth, NumSubMbPart; /* assign value to currMB->MBPartPredMode[][x],subMbMode[],NumSubMbPart[],SubMbPartWidth[],SubMbPartHeight[] */ currMB->mbMode = FIXED_INTERPRED_MODE; currMB->mb_intra = 0; if (currMB->mbMode == AVC_P16) { currMB->NumMbPart = 1; currMB->MbPartWidth = 16; currMB->MbPartHeight = 16; currMB->SubMbPartHeight[0] = 16; currMB->SubMbPartWidth[0] = 16; currMB->NumSubMbPart[0] = 1; } else if (currMB->mbMode == AVC_P16x8) { currMB->NumMbPart = 2; currMB->MbPartWidth = 16; currMB->MbPartHeight = 8; for (i = 0; i < 2; i++) { currMB->SubMbPartWidth[i] = 16; currMB->SubMbPartHeight[i] = 8; currMB->NumSubMbPart[i] = 1; } } else if (currMB->mbMode == AVC_P8x16) { currMB->NumMbPart = 2; currMB->MbPartWidth = 8; currMB->MbPartHeight = 16; for (i = 0; i < 2; i++) { currMB->SubMbPartWidth[i] = 8; currMB->SubMbPartHeight[i] = 16; currMB->NumSubMbPart[i] = 1; } } else if (currMB->mbMode == AVC_P8 || currMB->mbMode == AVC_P8ref0) { currMB->NumMbPart = 4; currMB->MbPartWidth = 8; currMB->MbPartHeight = 8; if (FIXED_SUBMB_MODE == AVC_8x8) { SubMbPartHeight = 8; SubMbPartWidth = 8; NumSubMbPart = 1; } else if (FIXED_SUBMB_MODE == AVC_8x4) { SubMbPartHeight = 4; SubMbPartWidth = 8; NumSubMbPart = 2; } else if (FIXED_SUBMB_MODE == AVC_4x8) { SubMbPartHeight = 8; SubMbPartWidth = 4; NumSubMbPart = 2; } else if (FIXED_SUBMB_MODE == AVC_4x4) { SubMbPartHeight = 4; SubMbPartWidth = 4; NumSubMbPart = 4; } for (i = 0; i < 4; i++) { currMB->subMbMode[i] = FIXED_SUBMB_MODE; currMB->SubMbPartHeight[i] = SubMbPartHeight; currMB->SubMbPartWidth[i] = SubMbPartWidth; currMB->NumSubMbPart[i] = NumSubMbPart; } } else /* it's probably intra mode */ { return AVCENC_SUCCESS; } for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++) { currMB->MBPartPredMode[mbPartIdx][0] = AVC_Pred_L0; currMB->ref_idx_L0[mbPartIdx] = FIXED_REF_IDX; currMB->RefIdx[mbPartIdx] = video->RefPicList0[FIXED_REF_IDX]->RefIdx; for (subMbPartIdx = 0; subMbPartIdx < 4; subMbPartIdx++) { mv = (int16*)(currMB->mvL0 + (mbPartIdx << 2) + subMbPartIdx); *mv++ = FIXED_MVX; *mv = FIXED_MVY; } } encvid->min_cost = 0; return AVCENC_SUCCESS; } #else /* perform the search */ /* This option #1 search is very similar to PV's MPEG4 motion search algorithm. The search is done in hierarchical manner from 16x16 MB down to smaller and smaller partition. At each level, a decision can be made to stop the search if the expected prediction gain is not worth the computation. The decision can also be made at the finest level for more fullsearch-like behavior with the price of heavier computation. */ void AVCMBMotionSearch(AVCEncObject *encvid, uint8 *cur, uint8 *best_cand[], int i0, int j0, int type_pred, int FS_en, int *hp_guess) { AVCCommonObj *video = encvid->common; AVCPictureData *currPic = video->currPic; AVCSeqParamSet *currSPS = video->currSeqParams; AVCRateControl *rateCtrl = encvid->rateCtrl; AVCMacroblock *currMB = video->currMB; uint8 *ref, *cand, *ncand; void *extra_info = encvid->sad_extra_info; int mbnum = video->mbNum; int width = currPic->width; /* 6/12/01, must be multiple of 16 */ int height = currPic->height; AVCMV *mot16x16 = encvid->mot16x16; int (*SAD_Macroblock)(uint8*, uint8*, int, void*) = encvid->functionPointer->SAD_Macroblock; int range = rateCtrl->mvRange; int lx = currPic->pitch; /* padding */ int i, j, imin, jmin, ilow, ihigh, jlow, jhigh; int d, dmin, dn[9]; int k; int mvx[5], mvy[5]; int num_can, center_again; int last_loc, new_loc = 0; int step, max_step = range >> 1; int next; int cmvx, cmvy; /* estimated predicted MV */ int lev_idx; int lambda_motion = encvid->lambda_motion; uint8 *mvbits = encvid->mvbits; int mvshift = 2; int mvcost; int min_sad = 65535; ref = video->RefPicList0[DEFAULT_REF_IDX]->Sl; /* origin of actual frame */ /* have to initialize these params, necessary for interprediction part */ currMB->NumMbPart = 1; currMB->SubMbPartHeight[0] = 16; currMB->SubMbPartWidth[0] = 16; currMB->NumSubMbPart[0] = 1; currMB->ref_idx_L0[0] = currMB->ref_idx_L0[1] = currMB->ref_idx_L0[2] = currMB->ref_idx_L0[3] = DEFAULT_REF_IDX; currMB->ref_idx_L1[0] = currMB->ref_idx_L1[1] = currMB->ref_idx_L1[2] = currMB->ref_idx_L1[3] = DEFAULT_REF_IDX; currMB->RefIdx[0] = currMB->RefIdx[1] = currMB->RefIdx[2] = currMB->RefIdx[3] = video->RefPicList0[DEFAULT_REF_IDX]->RefIdx; cur = encvid->currYMB; /* use smaller memory space for current MB */ /* find limit of the search (adjusting search range)*/ lev_idx = mapLev2Idx[currSPS->level_idc]; /* we can make this part dynamic based on previous statistics */ ilow = i0 - range; if (i0 - ilow > 2047) /* clip to conform with the standard */ { ilow = i0 - 2047; } if (ilow < -13) // change it from -15 to -13 because of 6-tap filter needs extra 2 lines. { ilow = -13; } ihigh = i0 + range - 1; if (ihigh - i0 > 2047) /* clip to conform with the standard */ { ihigh = i0 + 2047; } if (ihigh > width - 3) { ihigh = width - 3; // change from width-1 to width-3 for the same reason as above } jlow = j0 - range; if (j0 - jlow > MaxVmvR[lev_idx] - 1) /* clip to conform with the standard */ { jlow = j0 - MaxVmvR[lev_idx] + 1; } if (jlow < -13) // same reason as above { jlow = -13; } jhigh = j0 + range - 1; if (jhigh - j0 > MaxVmvR[lev_idx] - 1) /* clip to conform with the standard */ { jhigh = j0 + MaxVmvR[lev_idx] - 1; } if (jhigh > height - 3) // same reason as above { jhigh = height - 3; } /* find initial motion vector & predicted MV*/ AVCCandidateSelection(mvx, mvy, &num_can, i0 >> 4, j0 >> 4, encvid, type_pred, &cmvx, &cmvy); imin = i0; jmin = j0; /* needed for fullsearch */ ncand = ref + i0 + j0 * lx; /* for first row of MB, fullsearch can be used */ if (FS_en) { *hp_guess = 0; /* no guess for fast half-pel */ dmin = AVCFullSearch(encvid, ref, cur, &imin, &jmin, ilow, ihigh, jlow, jhigh, cmvx, cmvy); ncand = ref + imin + jmin * lx; } else { /* fullsearch the top row to only upto (0,3) MB */ /* upto 30% complexity saving with the same complexity */ if (video->PrevRefFrameNum == 0 && j0 == 0 && i0 <= 64 && type_pred != 1) { *hp_guess = 0; /* no guess for fast half-pel */ dmin = AVCFullSearch(encvid, ref, cur, &imin, &jmin, ilow, ihigh, jlow, jhigh, cmvx, cmvy); ncand = ref + imin + jmin * lx; } else { /************** initialize candidate **************************/ dmin = 65535; /* check if all are equal */ if (num_can == ALL_CAND_EQUAL) { i = i0 + mvx[0]; j = j0 + mvy[0]; if (i >= ilow && i <= ihigh && j >= jlow && j <= jhigh) { cand = ref + i + j * lx; d = (*SAD_Macroblock)(cand, cur, (dmin << 16) | lx, extra_info); mvcost = MV_COST(lambda_motion, mvshift, i - i0, j - j0, cmvx, cmvy); d += mvcost; if (d < dmin) { dmin = d; imin = i; jmin = j; ncand = cand; min_sad = d - mvcost; // for rate control } } } else { /************** evaluate unique candidates **********************/ for (k = 0; k < num_can; k++) { i = i0 + mvx[k]; j = j0 + mvy[k]; if (i >= ilow && i <= ihigh && j >= jlow && j <= jhigh) { cand = ref + i + j * lx; d = (*SAD_Macroblock)(cand, cur, (dmin << 16) | lx, extra_info); mvcost = MV_COST(lambda_motion, mvshift, i - i0, j - j0, cmvx, cmvy); d += mvcost; if (d < dmin) { dmin = d; imin = i; jmin = j; ncand = cand; min_sad = d - mvcost; // for rate control } } } } /******************* local refinement ***************************/ center_again = 0; last_loc = new_loc = 0; // ncand = ref + jmin*lx + imin; /* center of the search */ step = 0; dn[0] = dmin; while (!center_again && step <= max_step) { AVCMoveNeighborSAD(dn, last_loc); center_again = 1; i = imin; j = jmin - 1; cand = ref + i + j * lx; /* starting from [0,-1] */ /* spiral check one step at a time*/ for (k = 2; k <= 8; k += 2) { if (!tab_exclude[last_loc][k]) /* exclude last step computation */ { /* not already computed */ if (i >= ilow && i <= ihigh && j >= jlow && j <= jhigh) { d = (*SAD_Macroblock)(cand, cur, (dmin << 16) | lx, extra_info); mvcost = MV_COST(lambda_motion, mvshift, i - i0, j - j0, cmvx, cmvy); d += mvcost; dn[k] = d; /* keep it for half pel use */ if (d < dmin) { ncand = cand; dmin = d; imin = i; jmin = j; center_again = 0; new_loc = k; min_sad = d - mvcost; // for rate control } } } if (k == 8) /* end side search*/ { if (!center_again) { k = -1; /* start diagonal search */ cand -= lx; j--; } } else { next = refine_next[k][0]; i += next; cand += next; next = refine_next[k][1]; j += next; cand += lx * next; } } last_loc = new_loc; step ++; } if (!center_again) AVCMoveNeighborSAD(dn, last_loc); *hp_guess = AVCFindMin(dn); encvid->rateCtrl->MADofMB[mbnum] = min_sad / 256.0; } } mot16x16[mbnum].sad = dmin; mot16x16[mbnum].x = (imin - i0) << 2; mot16x16[mbnum].y = (jmin - j0) << 2; best_cand[0] = ncand; if (rateCtrl->subPelEnable) // always enable half-pel search { /* find half-pel resolution motion vector */ min_sad = AVCFindHalfPelMB(encvid, cur, mot16x16 + mbnum, best_cand[0], i0, j0, *hp_guess, cmvx, cmvy); encvid->rateCtrl->MADofMB[mbnum] = min_sad / 256.0; if (encvid->best_qpel_pos == -1) { ncand = encvid->hpel_cand[encvid->best_hpel_pos]; } else { ncand = encvid->qpel_cand[encvid->best_qpel_pos]; } } else { encvid->rateCtrl->MADofMB[mbnum] = min_sad / 256.0; } /** do motion comp here for now */ ref = currPic->Sl + i0 + j0 * lx; /* copy from the best result to current Picture */ for (j = 0; j < 16; j++) { for (i = 0; i < 16; i++) { *ref++ = *ncand++; } ref += (lx - 16); ncand += 8; } return ; } #endif /*=============================================================================== Function: AVCFullSearch Date: 09/16/2000 Purpose: Perform full-search motion estimation over the range of search region in a spiral-outward manner. Input/Output: VideoEncData, current Vol, previou Vop, pointer to the left corner of current VOP, current coord (also output), boundaries. ===============================================================================*/ int AVCFullSearch(AVCEncObject *encvid, uint8 *prev, uint8 *cur, int *imin, int *jmin, int ilow, int ihigh, int jlow, int jhigh, int cmvx, int cmvy) { int range = encvid->rateCtrl->mvRange; AVCPictureData *currPic = encvid->common->currPic; uint8 *cand; int i, j, k, l; int d, dmin; int i0 = *imin; /* current position */ int j0 = *jmin; int (*SAD_Macroblock)(uint8*, uint8*, int, void*) = encvid->functionPointer->SAD_Macroblock; void *extra_info = encvid->sad_extra_info; int lx = currPic->pitch; /* with padding */ int offset = i0 + j0 * lx; int lambda_motion = encvid->lambda_motion; uint8 *mvbits = encvid->mvbits; int mvshift = 2; int mvcost; int min_sad; cand = prev + offset; dmin = (*SAD_Macroblock)(cand, cur, (65535 << 16) | lx, (void*)extra_info); mvcost = MV_COST(lambda_motion, mvshift, 0, 0, cmvx, cmvy); min_sad = dmin; dmin += mvcost; /* perform spiral search */ for (k = 1; k <= range; k++) { i = i0 - k; j = j0 - k; cand = prev + i + j * lx; for (l = 0; l < 8*k; l++) { /* no need for boundary checking again */ if (i >= ilow && i <= ihigh && j >= jlow && j <= jhigh) { d = (*SAD_Macroblock)(cand, cur, (dmin << 16) | lx, (void*)extra_info); mvcost = MV_COST(lambda_motion, mvshift, i - i0, j - j0, cmvx, cmvy); d += mvcost; if (d < dmin) { dmin = d; *imin = i; *jmin = j; min_sad = d - mvcost; } } if (l < (k << 1)) { i++; cand++; } else if (l < (k << 2)) { j++; cand += lx; } else if (l < ((k << 2) + (k << 1))) { i--; cand--; } else { j--; cand -= lx; } } } encvid->rateCtrl->MADofMB[encvid->common->mbNum] = (min_sad / 256.0); // for rate control return dmin; } /*=============================================================================== Function: AVCCandidateSelection Date: 09/16/2000 Purpose: Fill up the list of candidate using spatio-temporal correlation among neighboring blocks. Input/Output: type_pred = 0: first pass, 1: second pass, or no SCD Modified: , 09/23/01, get rid of redundant candidates before passing back. , 09/11/07, added return for modified predicted MV, this will be needed for both fast search and fullsearch. ===============================================================================*/ void AVCCandidateSelection(int *mvx, int *mvy, int *num_can, int imb, int jmb, AVCEncObject *encvid, int type_pred, int *cmvx, int *cmvy) { AVCCommonObj *video = encvid->common; AVCMV *mot16x16 = encvid->mot16x16; AVCMV *pmot; int mbnum = video->mbNum; int mbwidth = video->PicWidthInMbs; int mbheight = video->PicHeightInMbs; int i, j, same, num1; /* this part is for predicted MV */ int pmvA_x = 0, pmvA_y = 0, pmvB_x = 0, pmvB_y = 0, pmvC_x = 0, pmvC_y = 0; int availA = 0, availB = 0, availC = 0; *num_can = 0; if (video->PrevRefFrameNum != 0) // previous frame is an IDR frame { /* Spatio-Temporal Candidate (five candidates) */ if (type_pred == 0) /* first pass */ { pmot = &mot16x16[mbnum]; /* same coordinate previous frame */ mvx[(*num_can)] = (pmot->x) >> 2; mvy[(*num_can)++] = (pmot->y) >> 2; if (imb >= (mbwidth >> 1) && imb > 0) /*left neighbor previous frame */ { pmot = &mot16x16[mbnum-1]; mvx[(*num_can)] = (pmot->x) >> 2; mvy[(*num_can)++] = (pmot->y) >> 2; } else if (imb + 1 < mbwidth) /*right neighbor previous frame */ { pmot = &mot16x16[mbnum+1]; mvx[(*num_can)] = (pmot->x) >> 2; mvy[(*num_can)++] = (pmot->y) >> 2; } if (jmb < mbheight - 1) /*bottom neighbor previous frame */ { pmot = &mot16x16[mbnum+mbwidth]; mvx[(*num_can)] = (pmot->x) >> 2; mvy[(*num_can)++] = (pmot->y) >> 2; } else if (jmb > 0) /*upper neighbor previous frame */ { pmot = &mot16x16[mbnum-mbwidth]; mvx[(*num_can)] = (pmot->x) >> 2; mvy[(*num_can)++] = (pmot->y) >> 2; } if (imb > 0 && jmb > 0) /* upper-left neighbor current frame*/ { pmot = &mot16x16[mbnum-mbwidth-1]; mvx[(*num_can)] = (pmot->x) >> 2; mvy[(*num_can)++] = (pmot->y) >> 2; } if (jmb > 0 && imb < mbheight - 1) /* upper right neighbor current frame*/ { pmot = &mot16x16[mbnum-mbwidth+1]; mvx[(*num_can)] = (pmot->x) >> 2; mvy[(*num_can)++] = (pmot->y) >> 2; } } else /* second pass */ /* original ST1 algorithm */ { pmot = &mot16x16[mbnum]; /* same coordinate previous frame */ mvx[(*num_can)] = (pmot->x) >> 2; mvy[(*num_can)++] = (pmot->y) >> 2; if (imb > 0) /*left neighbor current frame */ { pmot = &mot16x16[mbnum-1]; mvx[(*num_can)] = (pmot->x) >> 2; mvy[(*num_can)++] = (pmot->y) >> 2; } if (jmb > 0) /*upper neighbor current frame */ { pmot = &mot16x16[mbnum-mbwidth]; mvx[(*num_can)] = (pmot->x) >> 2; mvy[(*num_can)++] = (pmot->y) >> 2; } if (imb < mbwidth - 1) /*right neighbor previous frame */ { pmot = &mot16x16[mbnum+1]; mvx[(*num_can)] = (pmot->x) >> 2; mvy[(*num_can)++] = (pmot->y) >> 2; } if (jmb < mbheight - 1) /*bottom neighbor previous frame */ { pmot = &mot16x16[mbnum+mbwidth]; mvx[(*num_can)] = (pmot->x) >> 2; mvy[(*num_can)++] = (pmot->y) >> 2; } } /* get predicted MV */ if (imb > 0) /* get MV from left (A) neighbor either on current or previous frame */ { availA = 1; pmot = &mot16x16[mbnum-1]; pmvA_x = pmot->x; pmvA_y = pmot->y; } if (jmb > 0) /* get MV from top (B) neighbor either on current or previous frame */ { availB = 1; pmot = &mot16x16[mbnum-mbwidth]; pmvB_x = pmot->x; pmvB_y = pmot->y; availC = 1; if (imb < mbwidth - 1) /* get MV from top-right (C) neighbor of current frame */ { pmot = &mot16x16[mbnum-mbwidth+1]; } else /* get MV from top-left (D) neighbor of current frame */ { pmot = &mot16x16[mbnum-mbwidth-1]; } pmvC_x = pmot->x; pmvC_y = pmot->y; } } else /* only Spatial Candidate (four candidates)*/ { if (type_pred == 0) /*first pass*/ { if (imb > 1) /* neighbor two blocks away to the left */ { pmot = &mot16x16[mbnum-2]; mvx[(*num_can)] = (pmot->x) >> 2; mvy[(*num_can)++] = (pmot->y) >> 2; } if (imb > 0 && jmb > 0) /* upper-left neighbor */ { pmot = &mot16x16[mbnum-mbwidth-1]; mvx[(*num_can)] = (pmot->x) >> 2; mvy[(*num_can)++] = (pmot->y) >> 2; } if (jmb > 0 && imb < mbheight - 1) /* upper right neighbor */ { pmot = &mot16x16[mbnum-mbwidth+1]; mvx[(*num_can)] = (pmot->x) >> 2; mvy[(*num_can)++] = (pmot->y) >> 2; } /* get predicted MV */ if (imb > 1) /* get MV from 2nd left (A) neighbor either of current frame */ { availA = 1; pmot = &mot16x16[mbnum-2]; pmvA_x = pmot->x; pmvA_y = pmot->y; } if (jmb > 0 && imb > 0) /* get MV from top-left (B) neighbor of current frame */ { availB = 1; pmot = &mot16x16[mbnum-mbwidth-1]; pmvB_x = pmot->x; pmvB_y = pmot->y; } if (jmb > 0 && imb < mbwidth - 1) { availC = 1; pmot = &mot16x16[mbnum-mbwidth+1]; pmvC_x = pmot->x; pmvC_y = pmot->y; } } //#ifdef SCENE_CHANGE_DETECTION /* second pass (ST2 algorithm)*/ else { if (type_pred == 1) /* 4/7/01 */ { if (imb > 0) /*left neighbor current frame */ { pmot = &mot16x16[mbnum-1]; mvx[(*num_can)] = (pmot->x) >> 2; mvy[(*num_can)++] = (pmot->y) >> 2; } if (jmb > 0) /*upper neighbor current frame */ { pmot = &mot16x16[mbnum-mbwidth]; mvx[(*num_can)] = (pmot->x) >> 2; mvy[(*num_can)++] = (pmot->y) >> 2; } if (imb < mbwidth - 1) /*right neighbor current frame */ { pmot = &mot16x16[mbnum+1]; mvx[(*num_can)] = (pmot->x) >> 2; mvy[(*num_can)++] = (pmot->y) >> 2; } if (jmb < mbheight - 1) /*bottom neighbor current frame */ { pmot = &mot16x16[mbnum+mbwidth]; mvx[(*num_can)] = (pmot->x) >> 2; mvy[(*num_can)++] = (pmot->y) >> 2; } } //#else else /* original ST1 algorithm */ { if (imb > 0) /*left neighbor current frame */ { pmot = &mot16x16[mbnum-1]; mvx[(*num_can)] = (pmot->x) >> 2; mvy[(*num_can)++] = (pmot->y) >> 2; if (jmb > 0) /*upper-left neighbor current frame */ { pmot = &mot16x16[mbnum-mbwidth-1]; mvx[(*num_can)] = (pmot->x) >> 2; mvy[(*num_can)++] = (pmot->y) >> 2; } } if (jmb > 0) /*upper neighbor current frame */ { pmot = &mot16x16[mbnum-mbwidth]; mvx[(*num_can)] = (pmot->x) >> 2; mvy[(*num_can)++] = (pmot->y) >> 2; if (imb < mbheight - 1) /*upper-right neighbor current frame */ { pmot = &mot16x16[mbnum-mbwidth+1]; mvx[(*num_can)] = (pmot->x) >> 2; mvy[(*num_can)++] = (pmot->y) >> 2; } } } /* get predicted MV */ if (imb > 0) /* get MV from left (A) neighbor either on current or previous frame */ { availA = 1; pmot = &mot16x16[mbnum-1]; pmvA_x = pmot->x; pmvA_y = pmot->y; } if (jmb > 0) /* get MV from top (B) neighbor either on current or previous frame */ { availB = 1; pmot = &mot16x16[mbnum-mbwidth]; pmvB_x = pmot->x; pmvB_y = pmot->y; availC = 1; if (imb < mbwidth - 1) /* get MV from top-right (C) neighbor of current frame */ { pmot = &mot16x16[mbnum-mbwidth+1]; } else /* get MV from top-left (D) neighbor of current frame */ { pmot = &mot16x16[mbnum-mbwidth-1]; } pmvC_x = pmot->x; pmvC_y = pmot->y; } } //#endif } /* 3/23/01, remove redundant candidate (possible k-mean) */ num1 = *num_can; *num_can = 1; for (i = 1; i < num1; i++) { same = 0; j = 0; while (!same && j < *num_can) { #if (CANDIDATE_DISTANCE==0) if (mvx[i] == mvx[j] && mvy[i] == mvy[j]) #else // modified k-mean, 3/24/01, shouldn't be greater than 3 if (AVC_ABS(mvx[i] - mvx[j]) + AVC_ABS(mvy[i] - mvy[j]) < CANDIDATE_DISTANCE) #endif same = 1; j++; } if (!same) { mvx[*num_can] = mvx[i]; mvy[*num_can] = mvy[i]; (*num_can)++; } } if (num1 == 5 && *num_can == 1) *num_can = ALL_CAND_EQUAL; /* all are equal */ /* calculate predicted MV */ if (availA && !(availB || availC)) { *cmvx = pmvA_x; *cmvy = pmvA_y; } else { *cmvx = AVC_MEDIAN(pmvA_x, pmvB_x, pmvC_x); *cmvy = AVC_MEDIAN(pmvA_y, pmvB_y, pmvC_y); } return ; } /************************************************************* Function: AVCMoveNeighborSAD Date: 3/27/01 Purpose: Move neighboring SAD around when center has shifted *************************************************************/ void AVCMoveNeighborSAD(int dn[], int new_loc) { int tmp[9]; tmp[0] = dn[0]; tmp[1] = dn[1]; tmp[2] = dn[2]; tmp[3] = dn[3]; tmp[4] = dn[4]; tmp[5] = dn[5]; tmp[6] = dn[6]; tmp[7] = dn[7]; tmp[8] = dn[8]; dn[0] = dn[1] = dn[2] = dn[3] = dn[4] = dn[5] = dn[6] = dn[7] = dn[8] = 65536; switch (new_loc) { case 0: break; case 1: dn[4] = tmp[2]; dn[5] = tmp[0]; dn[6] = tmp[8]; break; case 2: dn[4] = tmp[3]; dn[5] = tmp[4]; dn[6] = tmp[0]; dn[7] = tmp[8]; dn[8] = tmp[1]; break; case 3: dn[6] = tmp[4]; dn[7] = tmp[0]; dn[8] = tmp[2]; break; case 4: dn[1] = tmp[2]; dn[2] = tmp[3]; dn[6] = tmp[5]; dn[7] = tmp[6]; dn[8] = tmp[0]; break; case 5: dn[1] = tmp[0]; dn[2] = tmp[4]; dn[8] = tmp[6]; break; case 6: dn[1] = tmp[8]; dn[2] = tmp[0]; dn[3] = tmp[4]; dn[4] = tmp[5]; dn[8] = tmp[7]; break; case 7: dn[2] = tmp[8]; dn[3] = tmp[0]; dn[4] = tmp[6]; break; case 8: dn[2] = tmp[1]; dn[3] = tmp[2]; dn[4] = tmp[0]; dn[5] = tmp[6]; dn[6] = tmp[7]; break; } dn[0] = tmp[new_loc]; return ; } /* 3/28/01, find minimal of dn[9] */ int AVCFindMin(int dn[]) { int min, i; int dmin; dmin = dn[1]; min = 1; for (i = 2; i < 9; i++) { if (dn[i] < dmin) { dmin = dn[i]; min = i; } } return min; } ================================================ FILE: RtspCamera/jni/avc_h264/enc/src/pvavcencoder.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2010 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "pvavcencoder.h" #include "oscl_mem.h" // xxx pa #define LOG_TAG "pvaencoder" #include "android/log.h" /* global static functions */ void CbAvcEncDebugLog(uint32 *userData, AVCLogType type, char *string1, int val1, int val2) { OSCL_UNUSED_ARG(userData); OSCL_UNUSED_ARG(type); OSCL_UNUSED_ARG(string1); OSCL_UNUSED_ARG(val1); OSCL_UNUSED_ARG(val2); return ; } int CbAvcEncMalloc(void *userData, int32 size, int attribute) { OSCL_UNUSED_ARG(userData); OSCL_UNUSED_ARG(attribute); uint8 *mem; mem = (uint8*) oscl_malloc(size); return (int)mem; } void CbAvcEncFree(void *userData, int mem) { OSCL_UNUSED_ARG(userData); oscl_free((void*)mem); return ; } int CbAvcEncDPBAlloc(void *userData, uint frame_size_in_mbs, uint num_buffers) { PVAVCEncoder *pAvcEnc = (PVAVCEncoder*) userData; return pAvcEnc->AVC_DPBAlloc(frame_size_in_mbs, num_buffers); } void CbAvcEncFrameUnbind(void *userData, int indx) { PVAVCEncoder *pAvcEnc = (PVAVCEncoder*) userData; pAvcEnc->AVC_FrameUnbind(indx); return ; } int CbAvcEncFrameBind(void *userData, int indx, uint8 **yuv) { PVAVCEncoder *pAvcEnc = (PVAVCEncoder*) userData; return pAvcEnc->AVC_FrameBind(indx, yuv); } /* ///////////////////////////////////////////////////////////////////////// */ PVAVCEncoder::PVAVCEncoder() { //iEncoderControl } /* ///////////////////////////////////////////////////////////////////////// */ OSCL_EXPORT_REF PVAVCEncoder::~PVAVCEncoder() { CleanupEncoder(); } /* ///////////////////////////////////////////////////////////////////////// */ OSCL_EXPORT_REF PVAVCEncoder* PVAVCEncoder::New() { PVAVCEncoder* self = new PVAVCEncoder; if (self && self->Construct()) return self; if (self) delete self; return NULL; } /* ///////////////////////////////////////////////////////////////////////// */ bool PVAVCEncoder::Construct() { oscl_memset((void *)&iAvcHandle, 0, sizeof(AVCHandle)); iAvcHandle.CBAVC_DPBAlloc = &CbAvcEncDPBAlloc; iAvcHandle.CBAVC_FrameBind = &CbAvcEncFrameBind; iAvcHandle.CBAVC_FrameUnbind = &CbAvcEncFrameUnbind; iAvcHandle.CBAVC_Free = &CbAvcEncFree; iAvcHandle.CBAVC_Malloc = &CbAvcEncMalloc; iAvcHandle.CBAVC_DebugLog = &CbAvcEncDebugLog; iAvcHandle.userData = this; iYUVIn = NULL; iState = ECreated; iFramePtr = NULL; iDPB = NULL; iFrameUsed = NULL; return true; } /* ///////////////////////////////////////////////////////////////////////// */ OSCL_EXPORT_REF TAVCEI_RETVAL PVAVCEncoder::Initialize(TAVCEIInputFormat *aVidInFormat, TAVCEIEncodeParam *aEncParam) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "Initialize"); AVCEncParams aEncOption; /* encoding options */ iOverrunBuffer = NULL; iOBSize = 0; if (EAVCEI_SUCCESS != Init(aVidInFormat, aEncParam, aEncOption)) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "Initialize (EAVCEI_SUCCESS != Init(aVidInFormat, aEncParam, aEncOption)) -> return EAVCEI_FAIL"); return EAVCEI_FAIL; } if (AVCENC_SUCCESS != PVAVCEncInitialize(&iAvcHandle, &aEncOption, NULL, NULL)) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "Initialize (AVCENC_SUCCESS != PVAVCEncInitialize(&iAvcHandle, &aEncOption, NULL, NULL)) -> return EAVCEI_FAIL"); return EAVCEI_FAIL; } iIDR = true; iDispOrd = 0; iState = EInitialized; // change state to initialized __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "Initialize final return EAVCEI_SUCCESS"); return EAVCEI_SUCCESS; } /* ///////////////////////////////////////////////////////////////////////// */ int32 PVAVCEncoder::GetMaxOutputBufferSize() { int size = 0; PVAVCEncGetMaxOutputBufferSize(&iAvcHandle, &size); return size; } /* ///////////////////////////////////////////////////////////////////////// */ TAVCEI_RETVAL PVAVCEncoder::Init(TAVCEIInputFormat* aVidInFormat, TAVCEIEncodeParam* aEncParam, AVCEncParams& aEncOption) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "Init"); if (iState == EInitialized || iState == EEncoding) /* clean up before re-initialized */ { PVAVCCleanUpEncoder(&iAvcHandle); if (iYUVIn) { oscl_free(iYUVIn); iYUVIn = NULL; } } iState = ECreated; // change state back to created iId = aEncParam->iEncodeID; iSrcWidth = aVidInFormat->iFrameWidth; iSrcHeight = aVidInFormat->iFrameHeight; iSrcFrameRate = aVidInFormat->iFrameRate; iVideoFormat = aVidInFormat->iVideoFormat; iFrameOrientation = aVidInFormat->iFrameOrientation; // allocate iYUVIn if (iVideoFormat == EAVCEI_VDOFMT_YUV420SEMIPLANAR) /* Not multiple of 16 */ { iYUVIn = (uint8*) oscl_malloc((iSrcWidth*iSrcHeight* 3)>>1); if (iYUVIn == NULL) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "Init (iYUVIn == NULL) -> return: EAVCEI_FAIL"); return EAVCEI_FAIL; } } // check the buffer delay according to the clip duration if (aEncParam->iClipDuration > 0 && aEncParam->iRateControlType == EAVCEI_RC_VBR_1) { if (aEncParam->iBufferDelay > (float)(aEncParam->iClipDuration / 10000.0)) //enforce 10% variation of the clip duration as the bound of buffer delay { aEncParam->iBufferDelay = (float)(aEncParam->iClipDuration / 10000.0); } } /* Check color format */ if ( (iVideoFormat != EAVCEI_VDOFMT_YUV420SEMIPLANAR)) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "Init ( (iVideoFormat != EAVCEI_VDOFMT_YUV420SEMIPLANAR)) -> return: EAVCEI_FAIL"); return EAVCEI_FAIL; } if (aEncParam->iNumLayer > 1) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "Init (aEncParam->iNumLayer > 1) -> return: EAVCEI_FAIL"); return EAVCEI_FAIL; } aEncOption.width = iEncWidth = aEncParam->iFrameWidth[0]; aEncOption.height = iEncHeight = aEncParam->iFrameHeight[0]; iEncFrameRate = aEncParam->iFrameRate[0]; aEncOption.frame_rate = (uint32)(1000 * iEncFrameRate); __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "Init aEncParam->iRateControlType = %d", aEncParam->iRateControlType); if (aEncParam->iRateControlType == EAVCEI_RC_CONSTANT_Q) { aEncOption.rate_control = AVC_OFF; aEncOption.bitrate = 64000; // default } else if (aEncParam->iRateControlType == EAVCEI_RC_CBR_1) { aEncOption.rate_control = AVC_ON; } else if (aEncParam->iRateControlType == EAVCEI_RC_VBR_1) { aEncOption.rate_control = AVC_ON; } else { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "Init (Unknown aEncParam->iRateControlType = %d) -> return: EAVCEI_FAIL", aEncParam->iRateControlType); return EAVCEI_FAIL; } // future :: map aEncParam->iEncMode to EncMode inside AVCEncoder iPacketSize = aEncParam->iPacketSize; aEncOption.profile = mapProfile(aEncParam->iProfile); aEncOption.level = mapLevel(aEncParam->iLevel); //aEncOption.src_interval = (int)(1000/aVidInFormat->iFrameRate + 0.5); aEncOption.bitrate = aEncParam->iBitRate[0]; aEncOption.initQP = aEncParam->iIquant[0]; aEncOption.init_CBP_removal_delay = (uint32)(aEncParam->iBufferDelay * 1000); // make it millisecond aEncOption.CPB_size = ((uint32)((uint32)aEncParam->iBufferDelay * (aEncOption.bitrate))); switch (aEncParam->iIFrameInterval) { case -1: aEncOption.idr_period = 0; /* all P-frames */ break; case 0: aEncOption.idr_period = 1; /* all IDR-frames */ break; default: aEncOption.idr_period = (int)(aEncParam->iIFrameInterval * aVidInFormat->iFrameRate); break; } aEncOption.intramb_refresh = aEncParam->iNumIntraMBRefresh; aEncOption.auto_scd = (aEncParam->iSceneDetection == true) ? AVC_ON : AVC_OFF; aEncOption.out_of_band_param_set = (aEncParam->iOutOfBandParamSet == true) ? AVC_ON : AVC_OFF; aEncOption.use_overrun_buffer = AVC_OFF; // hardcode it to off /* default values */ aEncOption.poc_type = 0; aEncOption.num_ref_frame = 1; aEncOption.log2_max_poc_lsb_minus_4 = 12; aEncOption.num_slice_group = 1; aEncOption.fmo_type = 0; /// FMO is disabled for now. aEncOption.db_filter = AVC_ON; aEncOption.disable_db_idc = 0; aEncOption.alpha_offset = 0; aEncOption.beta_offset = 0; aEncOption.constrained_intra_pred = AVC_OFF; aEncOption.data_par = AVC_OFF; aEncOption.fullsearch = AVC_OFF; aEncOption.search_range = 16; aEncOption.sub_pel = AVC_ON; aEncOption.submb_pred = AVC_OFF; aEncOption.rdopt_mode = AVC_OFF; aEncOption.bidir_pred = AVC_OFF; __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "Init final return: EAVCEI_SUCCESS"); return EAVCEI_SUCCESS; } /* ///////////////////////////////////////////////////////////////////////// */ OSCL_EXPORT_REF TAVCEI_RETVAL PVAVCEncoder::GetParameterSet(uint8 *paramSet, int32 *size, int *aNALType) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "GetParameterSet"); uint aSize; AVCEnc_Status avcStatus ; if (iState != EInitialized) {/* has to be initialized first */ __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "GetParameterSet return: EAVCEI_FAIL"); return EAVCEI_FAIL; } aSize = *size; if (paramSet == NULL || size == NULL) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "GetParameterSet return: EAVCEI_INPUT_ERROR"); return EAVCEI_INPUT_ERROR; } //=================> avcStatus = PVAVCEncodeNAL(&iAvcHandle, paramSet, &aSize, aNALType); if (avcStatus == AVCENC_WRONG_STATE) { *size = 0; __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "GetParameterSet return: AVCENC_WRONG_STATE-> EAVCEI_FAIL"); return EAVCEI_FAIL; } switch (*aNALType) { case AVC_NALTYPE_SPS: case AVC_NALTYPE_PPS: *size = aSize; __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "GetParameterSet return: SPS/PPS-> EAVCEI_SUCCESS"); return EAVCEI_SUCCESS; default: *size = 0; __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "GetParameterSet return: default-> EAVCEI_FAIL"); return EAVCEI_FAIL; } } /* ///////////////////////////////////////////////////////////////////////// */ OSCL_EXPORT_REF TAVCEI_RETVAL PVAVCEncoder::Encode(TAVCEIInputData *aVidIn) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "Encode"); AVCEnc_Status status; if ((aVidIn == NULL) || (aVidIn->iSource == NULL)) { return EAVCEI_INPUT_ERROR; } // we need to check the timestamp here. If it's before the proper time, // we need to return EAVCEI_FRAME_DROP here. // also check whether encoder is ready to take a new frame. if (iState == EEncoding) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "Encode (iState == EEncoding) -> return: EAVCEI_NOT_READY"); return EAVCEI_NOT_READY; } else if (iState == ECreated) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "Encode (iState == ECreated) -> return: EAVCEI_FAIL"); return EAVCEI_FAIL; } if (iVideoFormat == EAVCEI_VDOFMT_YUV420SEMIPLANAR) { if (iYUVIn) /* iSrcWidth is not multiple of 4 or iSrcHeight is odd number */ { CopyToYUVIn(aVidIn->iSource,iSrcWidth,iSrcHeight); iVideoIn = iYUVIn; } else /* otherwise, we can just use aVidIn->iSource */ { iVideoIn = aVidIn->iSource; // Sept 14, 2005 */ } } else { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "Encode (iVideoFormat != EAVCEI_VDOFMT_YUV420SEMIPLANAR) -> return: EAVCEI_INPUT_ERROR"); return EAVCEI_INPUT_ERROR; } /* assign with backward-P or B-Vop this timestamp must be re-ordered */ iTimeStamp = aVidIn->iTimeStamp; iVidIn.height = ((iSrcHeight + 15) >> 4) << 4; iVidIn.pitch = ((iSrcWidth + 15) >> 4) << 4; iVidIn.coding_timestamp = iTimeStamp; iVidIn.YCbCr[0] = (uint8*)iVideoIn; iVidIn.YCbCr[1] = (uint8*)(iVideoIn + iVidIn.height * iVidIn.pitch); iVidIn.YCbCr[2] = iVidIn.YCbCr[1] + ((iVidIn.height * iVidIn.pitch) >> 2); iVidIn.disp_order = iDispOrd; //================> status = PVAVCEncSetInput(&iAvcHandle, &iVidIn); switch (status) { case AVCENC_SKIPPED_PICTURE: __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "Encode AVCENC_SKIPPED_PICTURE-> return: EAVCEI_FRAME_DROP"); return EAVCEI_FRAME_DROP; case AVCENC_FAIL: // not in the right state __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "Encode AVCENC_FAIL-> return: EAVCEI_NOT_READY"); return EAVCEI_NOT_READY; case AVCENC_SUCCESS: iState = EEncoding; iDispOrd++; __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "Encode AVCENC_SUCCESS-> return: EAVCEI_SUCCESS"); return EAVCEI_SUCCESS; case AVCENC_NEW_IDR: iState = EEncoding; iDispOrd++; iIDR = true; __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "Encode AVCENC_NEW_IDR-> return: EAVCEI_SUCCESS"); return EAVCEI_SUCCESS; default: __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "Encode return: default->EAVCEI_FAIL"); return EAVCEI_FAIL; } } /* ///////////////////////////////////////////////////////////////////////// */ OSCL_EXPORT_REF TAVCEI_RETVAL PVAVCEncoder::GetOutput(TAVCEIOutputData *aVidOut, int *aRemainingBytes) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "GetOuput"); AVCEnc_Status status; TAVCEI_RETVAL ret; uint Size; int nalType; AVCFrameIO recon; *aRemainingBytes = 0; if (iState != EEncoding) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "GetOuput return: EAVCEI_NOT_READY"); return EAVCEI_NOT_READY; } if (aVidOut == NULL) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "GetOuput return: EAVCEI_INPUT_ERROR"); return EAVCEI_INPUT_ERROR; } if (iOverrunBuffer) // more output buffer to be copied out. { aVidOut->iFragment = true; aVidOut->iTimeStamp = iTimeStamp; aVidOut->iKeyFrame = iIDR; aVidOut->iLastNAL = (iEncStatus == AVCENC_PICTURE_READY) ? true : false; if (iOBSize > aVidOut->iBitstreamSize) { oscl_memcpy(aVidOut->iBitstream, iOverrunBuffer, aVidOut->iBitstreamSize); iOBSize -= aVidOut->iBitstreamSize; iOverrunBuffer += aVidOut->iBitstreamSize; aVidOut->iLastFragment = false; *aRemainingBytes = iOBSize; __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "GetOuput return: (iLastFragment = false) EAVCEI_MORE_DATA"); return EAVCEI_MORE_DATA; } else { oscl_memcpy(aVidOut->iBitstream, iOverrunBuffer, iOBSize); aVidOut->iBitstreamSize = iOBSize; iOverrunBuffer = NULL; iOBSize = 0; aVidOut->iLastFragment = true; *aRemainingBytes = 0; if (iEncStatus == AVCENC_PICTURE_READY) { iState = EInitialized; if (iIDR == true) { iIDR = false; } __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "GetOuput return: (iLastFragment = true) EAVCEI_SUCCESS"); return EAVCEI_SUCCESS; } else { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "GetOuput return: (iLastFragment = true) EAVCEI_MORE_DATA"); return EAVCEI_MORE_NAL; } } } // Otherwise, call library to encode another NAL Size = aVidOut->iBitstreamSize; // ==============> iEncStatus = PVAVCEncodeNAL(&iAvcHandle, (uint8*)aVidOut->iBitstream, &Size, &nalType); if (iEncStatus == AVCENC_SUCCESS) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "GetOuput (iEncStatus == AVCENC_SUCCESS) -> return EAVCEI_MORE_NAL"); aVidOut->iLastNAL = false; aVidOut->iKeyFrame = iIDR; ret = EAVCEI_MORE_NAL; } else if (iEncStatus == AVCENC_PICTURE_READY) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "GetOuput (iEncStatus == AVCENC_PICTURE_READY) -> return EAVCEI_SUCCESS"); aVidOut->iLastNAL = true; aVidOut->iKeyFrame = iIDR; ret = EAVCEI_SUCCESS; iState = EInitialized; status = PVAVCEncGetRecon(&iAvcHandle, &recon); if (status == AVCENC_SUCCESS) { aVidOut->iFrame = recon.YCbCr[0]; PVAVCEncReleaseRecon(&iAvcHandle, &recon); } } else if (iEncStatus == AVCENC_SKIPPED_PICTURE) { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "GetOuput iEncStatus == AVCENC_SKIPPED_PICTURE"); aVidOut->iLastFragment = true; aVidOut->iFragment = false; aVidOut->iBitstreamSize = 0; aVidOut->iTimeStamp = iTimeStamp; iState = EInitialized; return EAVCEI_FRAME_DROP; } else { __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "GetOuput iEncStatus else"); return EAVCEI_FAIL; } iOverrunBuffer = PVAVCEncGetOverrunBuffer(&iAvcHandle); if (iOverrunBuffer) // OB is used { if (Size < (uint)aVidOut->iBitstreamSize) // encoder decides to use OB even though the buffer is big enough { oscl_memcpy(aVidOut->iBitstream, iOverrunBuffer, Size); iOverrunBuffer = NULL; // reset it iOBSize = 0; } else { oscl_memcpy(aVidOut->iBitstream, iOverrunBuffer, aVidOut->iBitstreamSize); iOBSize = Size - aVidOut->iBitstreamSize; iOverrunBuffer += aVidOut->iBitstreamSize; if (iOBSize > 0) // there are more data { iState = EEncoding; // still encoding.. aVidOut->iLastFragment = false; aVidOut->iFragment = true; aVidOut->iTimeStamp = iTimeStamp; return EAVCEI_MORE_DATA; // only copy out from iOverrunBuffer next time. } } } aVidOut->iLastFragment = true; /* for now */ aVidOut->iFragment = false; /* for now */ aVidOut->iBitstreamSize = Size; aVidOut->iTimeStamp = iTimeStamp; if (iEncStatus == AVCENC_PICTURE_READY && iIDR == true) { iIDR = false; } return ret; } /* ///////////////////////////////////////////////////////////////////////// */ OSCL_EXPORT_REF TAVCEI_RETVAL PVAVCEncoder::FlushInput() { // do nothing for now. return EAVCEI_SUCCESS; } /* ///////////////////////////////////////////////////////////////////////// */ TAVCEI_RETVAL PVAVCEncoder::CleanupEncoder() { if (iState == EInitialized || iState == EEncoding) { PVAVCCleanUpEncoder(&iAvcHandle); iState = ECreated; if (iYUVIn) { oscl_free(iYUVIn); iYUVIn = NULL; } } if (iFrameUsed) { oscl_free(iFrameUsed); iFrameUsed = NULL; } if (iDPB) { oscl_free(iDPB); iDPB = NULL; } if (iFramePtr) { oscl_free(iFramePtr); iFramePtr = NULL; } return EAVCEI_SUCCESS; } /* ///////////////////////////////////////////////////////////////////////// */ OSCL_EXPORT_REF TAVCEI_RETVAL PVAVCEncoder::UpdateBitRate(int32 *aBitRate) { if (PVAVCEncUpdateBitRate(&iAvcHandle, aBitRate[0]) == AVCENC_SUCCESS) return EAVCEI_SUCCESS; else return EAVCEI_FAIL; } /* ///////////////////////////////////////////////////////////////////////// */ OSCL_EXPORT_REF TAVCEI_RETVAL PVAVCEncoder::UpdateFrameRate(OsclFloat *aFrameRate) { if (PVAVCEncUpdateFrameRate(&iAvcHandle, (uint32)(1000*aFrameRate[0]), 1000) == AVCENC_SUCCESS) return EAVCEI_SUCCESS; else return EAVCEI_FAIL; } /* ///////////////////////////////////////////////////////////////////////// */ OSCL_EXPORT_REF TAVCEI_RETVAL PVAVCEncoder::UpdateIDRFrameInterval(int32 aIDRFrameInterval) { if (PVAVCEncUpdateIDRInterval(&iAvcHandle, aIDRFrameInterval) == AVCENC_SUCCESS) return EAVCEI_SUCCESS; else return EAVCEI_FAIL; } /* ///////////////////////////////////////////////////////////////////////// */ OSCL_EXPORT_REF TAVCEI_RETVAL PVAVCEncoder::IDRRequest() { if (PVAVCEncIDRRequest(&iAvcHandle) == AVCENC_SUCCESS) return EAVCEI_SUCCESS; else return EAVCEI_FAIL; } /* ///////////////////////////////////////////////////////////////////////// */ OSCL_EXPORT_REF int32 PVAVCEncoder::GetEncodeWidth(int32 aLayer) { OSCL_UNUSED_ARG(aLayer); return iEncWidth; } /* ///////////////////////////////////////////////////////////////////////// */ OSCL_EXPORT_REF int32 PVAVCEncoder::GetEncodeHeight(int32 aLayer) { OSCL_UNUSED_ARG(aLayer); return iEncHeight; } /* ///////////////////////////////////////////////////////////////////////// */ OSCL_EXPORT_REF OsclFloat PVAVCEncoder::GetEncodeFrameRate(int32 aLayer) { OSCL_UNUSED_ARG(aLayer); return iEncFrameRate; } /* ///////////////////////////////////////////////////////////////////////// */ /* Copy from YUV input to YUV frame inside M4VEnc lib */ /* When input is not YUV, the color conv will write it directly to iVideoInOut. */ /* ///////////////////////////////////////////////////////////////////////// */ void PVAVCEncoder::CopyToYUVIn(uint8 *YUV, int width, int height) { // Save YUV pointer uint8* saveiYUVIn = iYUVIn; // Convert YUV input to have distinct Y U and V channels // Copy Y data for (int i=0;i>2); uint16 temp = 0; uint16* iVideoPtr = (uint16*)YUV; for (int i=0;i<(height>>1);i++){ for (int j=0;j<(width>>1);j++){ temp = *iVideoPtr++; // U1V1 *vPos++= (uint8)(temp & 0xFF); *uPos++= (uint8)((temp >> 8) & 0xFF); } } // Restore pointer iYUVIn = saveiYUVIn; return ; } AVCProfile PVAVCEncoder::mapProfile(TAVCEIProfile in) { AVCProfile out; switch (in) { case EAVCEI_PROFILE_DEFAULT: case EAVCEI_PROFILE_BASELINE: out = AVC_BASELINE; break; case EAVCEI_PROFILE_MAIN: out = AVC_MAIN; break; case EAVCEI_PROFILE_EXTENDED: out = AVC_EXTENDED; break; case EAVCEI_PROFILE_HIGH: out = AVC_HIGH; break; case EAVCEI_PROFILE_HIGH10: out = AVC_HIGH10; break; case EAVCEI_PROFILE_HIGH422: out = AVC_HIGH422; break; case EAVCEI_PROFILE_HIGH444: out = AVC_HIGH444; break; default: out = AVC_BASELINE; break; } return out; } AVCLevel PVAVCEncoder::mapLevel(TAVCEILevel in) { AVCLevel out; switch (in) { case EAVCEI_LEVEL_AUTODETECT: out = AVC_LEVEL_AUTO; break; case EAVCEI_LEVEL_1: out = AVC_LEVEL1; break; case EAVCEI_LEVEL_1B: out = AVC_LEVEL1_B; break; case EAVCEI_LEVEL_11: out = AVC_LEVEL1_1; break; case EAVCEI_LEVEL_12: out = AVC_LEVEL1_2; break; case EAVCEI_LEVEL_13: out = AVC_LEVEL1_3; break; case EAVCEI_LEVEL_2: out = AVC_LEVEL2; break; case EAVCEI_LEVEL_21: out = AVC_LEVEL2_1; break; case EAVCEI_LEVEL_22: out = AVC_LEVEL2_2; break; case EAVCEI_LEVEL_3: out = AVC_LEVEL3; break; case EAVCEI_LEVEL_31: out = AVC_LEVEL3_1; break; case EAVCEI_LEVEL_32: out = AVC_LEVEL3_2; break; case EAVCEI_LEVEL_4: out = AVC_LEVEL4; break; case EAVCEI_LEVEL_41: out = AVC_LEVEL4_1; break; case EAVCEI_LEVEL_42: out = AVC_LEVEL4_2; break; case EAVCEI_LEVEL_5: out = AVC_LEVEL5; break; case EAVCEI_LEVEL_51: out = AVC_LEVEL5_1; break; default: out = AVC_LEVEL5_1; break; } return out; } /* ///////////////////////////////////////////////////////////////////////// */ int PVAVCEncoder::AVC_DPBAlloc(uint frame_size_in_mbs, uint num_buffers) { int ii; uint frame_size = (frame_size_in_mbs << 8) + (frame_size_in_mbs << 7); if (iDPB) oscl_free(iDPB); // free previous one first iDPB = (uint8*) oscl_malloc(sizeof(uint8) * frame_size * num_buffers); if (iDPB == NULL) { return 0; } iNumFrames = num_buffers; if (iFrameUsed) oscl_free(iFrameUsed); // free previous one iFrameUsed = (bool*) oscl_malloc(sizeof(bool) * num_buffers); if (iFrameUsed == NULL) { return 0; } if (iFramePtr) oscl_free(iFramePtr); // free previous one iFramePtr = (uint8**) oscl_malloc(sizeof(uint8*) * num_buffers); if (iFramePtr == NULL) { return 0; } iFramePtr[0] = iDPB; iFrameUsed[0] = false; for (ii = 1; ii < (int)num_buffers; ii++) { iFrameUsed[ii] = false; iFramePtr[ii] = iFramePtr[ii-1] + frame_size; } return 1; } /* ///////////////////////////////////////////////////////////////////////// */ void PVAVCEncoder::AVC_FrameUnbind(int indx) { if (indx < iNumFrames) { iFrameUsed[indx] = false; } return ; } /* ///////////////////////////////////////////////////////////////////////// */ int PVAVCEncoder::AVC_FrameBind(int indx, uint8** yuv) { if ((iFrameUsed[indx] == true) || (indx >= iNumFrames)) { return 0; // already in used } iFrameUsed[indx] = true; *yuv = iFramePtr[indx]; return 1; } ================================================ FILE: RtspCamera/jni/avc_h264/enc/src/pvavcencoder_factory.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /** * @file pvavcencoder_factory.cpp * @brief Singleton factory for PVAVCEncoder */ #include "oscl_base.h" #include "pvavcencoder.h" #include "pvavcencoder_factory.h" #include "oscl_error_codes.h" #include "oscl_exception.h" // Use default DLL entry point #include "oscl_dll.h" OSCL_DLL_ENTRY_POINT_DEFAULT() //////////////////////////////////////////////////////////////////////////// OSCL_EXPORT_REF PVAVCEncoderInterface* PVAVCEncoderFactory::CreatePVAVCEncoder() { PVAVCEncoderInterface* videoenc = NULL; videoenc = PVAVCEncoder::New(); if (videoenc == NULL) { OSCL_LEAVE(OsclErrNoMemory); } return videoenc; } //////////////////////////////////////////////////////////////////////////// OSCL_EXPORT_REF bool PVAVCEncoderFactory::DeletePVAVCEncoder(PVAVCEncoderInterface* aVideoEnc) { if (aVideoEnc) { delete aVideoEnc; return true; } return false; } ================================================ FILE: RtspCamera/jni/avc_h264/enc/src/rate_control.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2010 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "avcenc_lib.h" #include "oscl_base_macros.h" #include "oscl_math.h" #include "oscl_string.h" /* rate control variables */ #define RC_MAX_QUANT 51 #define RC_MIN_QUANT 0 //cap to 10 to prevent rate fluctuation #define MAD_MIN 1 /* handle the case of devision by zero in RC */ /* local functions */ double QP2Qstep(int QP); int Qstep2QP(double Qstep); double ComputeFrameMAD(AVCCommonObj *video, AVCRateControl *rateCtrl); void targetBitCalculation(AVCEncObject *encvid, AVCCommonObj *video, AVCRateControl *rateCtrl, MultiPass *pMP); void calculateQuantizer_Multipass(AVCEncObject *encvid, AVCCommonObj *video, AVCRateControl *rateCtrl, MultiPass *pMP); void updateRC_PostProc(AVCRateControl *rateCtrl, MultiPass *pMP); void AVCSaveRDSamples(MultiPass *pMP, int counter_samples); void updateRateControl(AVCRateControl *rateControl, int nal_type); int GetAvgFrameQP(AVCRateControl *rateCtrl) { return rateCtrl->Qc; } AVCEnc_Status RCDetermineFrameNum(AVCEncObject *encvid, AVCRateControl *rateCtrl, uint32 modTime, uint *frameNum) { AVCCommonObj *video = encvid->common; AVCSliceHeader *sliceHdr = video->sliceHdr; uint32 modTimeRef = encvid->modTimeRef; int32 currFrameNum ; int frameInc; /* check with the buffer fullness to make sure that we have enough bits to encode this frame */ /* we can use a threshold to guarantee minimum picture quality */ /**********************************/ /* for now, the default is to encode every frame, To Be Changed */ if (rateCtrl->first_frame) { encvid->modTimeRef = modTime; encvid->wrapModTime = 0; encvid->prevFrameNum = 0; encvid->prevProcFrameNum = 0; *frameNum = 0; /* set frame type to IDR-frame */ video->nal_unit_type = AVC_NALTYPE_IDR; sliceHdr->slice_type = AVC_I_ALL_SLICE; video->slice_type = AVC_I_SLICE; return AVCENC_SUCCESS; } else { if (modTime < modTimeRef) /* modTime wrapped around */ { encvid->wrapModTime += ((uint32)0xFFFFFFFF - modTimeRef) + 1; encvid->modTimeRef = modTimeRef = 0; } modTime += encvid->wrapModTime; /* wrapModTime is non zero after wrap-around */ /* Calculate frame number based on frame rate starting from modTimeRef */ /* Note, this value is totally independent from sliceHdr->frame_num or video->CurrPicNum */ currFrameNum = (int32)(((modTime - modTimeRef) * rateCtrl->frame_rate + 200) / 1000); /* add small roundings */ if (currFrameNum <= (int32)encvid->prevProcFrameNum) { return AVCENC_FAIL; /* this is a late frame do not encode it */ } frameInc = currFrameNum - encvid->prevProcFrameNum; /* Check how many frames have been skipped since the last processed frame */ if (frameInc < rateCtrl->skip_next_frame + 1) { return AVCENC_FAIL; /* frame skip required to maintain the target bit rate. */ } RCUpdateBuffer(video, rateCtrl, frameInc - rateCtrl->skip_next_frame); /* in case more frames dropped */ /* This part would be similar to DetermineVopType of m4venc */ if ((currFrameNum >= (int32)rateCtrl->idrPeriod && rateCtrl->idrPeriod > 0) /* exceed IDR period */ || (currFrameNum >= (int32)video->MaxFrameNum)) /* this line for all P-frames (idrPeriod=0) */ { /* Re-assign modTimeRef to the new IDR frame */ encvid->modTimeRef += (uint32)(currFrameNum * 1000 / rateCtrl->frame_rate); /* Set frame type to IDR-frame */ video->nal_unit_type = AVC_NALTYPE_IDR; sliceHdr->slice_type = AVC_I_ALL_SLICE; video->slice_type = AVC_I_SLICE; encvid->prevProcFrameNum = *frameNum = 0; // Reset frameNum to zero. } else { video->nal_unit_type = AVC_NALTYPE_SLICE; sliceHdr->slice_type = AVC_P_ALL_SLICE; video->slice_type = AVC_P_SLICE; encvid->prevProcFrameNum = currFrameNum; *frameNum = currFrameNum; } } return AVCENC_SUCCESS; } void RCUpdateBuffer(AVCCommonObj *video, AVCRateControl *rateCtrl, int frameInc) { int tmp; MultiPass *pMP = rateCtrl->pMP; OSCL_UNUSED_ARG(video); if (rateCtrl->rcEnable == TRUE) { if (frameInc > 1) { tmp = rateCtrl->bitsPerFrame * (frameInc - 1); rateCtrl->VBV_fullness -= tmp; pMP->counter_BTsrc += 10 * (frameInc - 1); /* Check buffer underflow */ if (rateCtrl->VBV_fullness < rateCtrl->low_bound) { rateCtrl->VBV_fullness = rateCtrl->low_bound; // -rateCtrl->Bs/2; rateCtrl->TMN_W = rateCtrl->VBV_fullness - rateCtrl->low_bound; pMP->counter_BTsrc = pMP->counter_BTdst + (int)((OsclFloat)(rateCtrl->Bs / 2 - rateCtrl->low_bound) / 2.0 / (pMP->target_bits_per_frame / 10)); } } } } AVCEnc_Status InitRateControlModule(AVCHandle *avcHandle) { AVCEncObject *encvid = (AVCEncObject*) avcHandle->AVCObject; AVCCommonObj *video = encvid->common; AVCRateControl *rateCtrl = encvid->rateCtrl; double L1, L2, L3, bpp; int qp; int i, j; rateCtrl->basicUnit = video->PicSizeInMbs; rateCtrl->MADofMB = (double*) avcHandle->CBAVC_Malloc(encvid->avcHandle->userData, video->PicSizeInMbs * sizeof(double), DEFAULT_ATTR); if (!rateCtrl->MADofMB) { goto CLEANUP_RC; } if (rateCtrl->rcEnable == TRUE) { rateCtrl->pMP = (MultiPass*) avcHandle->CBAVC_Malloc(encvid->avcHandle->userData, sizeof(MultiPass), DEFAULT_ATTR); if (!rateCtrl->pMP) { goto CLEANUP_RC; } oscl_memset(rateCtrl->pMP, 0, sizeof(MultiPass)); rateCtrl->pMP->encoded_frames = -1; /* forget about the very first I frame */ /* RDInfo **pRDSamples */ rateCtrl->pMP->pRDSamples = (RDInfo **)avcHandle->CBAVC_Malloc(encvid->avcHandle->userData, (30 * sizeof(RDInfo *)), DEFAULT_ATTR); if (!rateCtrl->pMP->pRDSamples) { goto CLEANUP_RC; } for (i = 0; i < 30; i++) { rateCtrl->pMP->pRDSamples[i] = (RDInfo *)avcHandle->CBAVC_Malloc(encvid->avcHandle->userData, (32 * sizeof(RDInfo)), DEFAULT_ATTR); if (!rateCtrl->pMP->pRDSamples[i]) { goto CLEANUP_RC; } for (j = 0; j < 32; j++) oscl_memset(&(rateCtrl->pMP->pRDSamples[i][j]), 0, sizeof(RDInfo)); } rateCtrl->pMP->frameRange = (int)(rateCtrl->frame_rate * 1.0); /* 1.0s time frame*/ rateCtrl->pMP->frameRange = AVC_MAX(rateCtrl->pMP->frameRange, 5); rateCtrl->pMP->frameRange = AVC_MIN(rateCtrl->pMP->frameRange, 30); rateCtrl->pMP->framePos = -1; rateCtrl->bitsPerFrame = (int32)(rateCtrl->bitRate / rateCtrl->frame_rate); /* BX rate control */ rateCtrl->skip_next_frame = 0; /* must be initialized */ rateCtrl->Bs = rateCtrl->cpbSize; rateCtrl->TMN_W = 0; rateCtrl->VBV_fullness = (int)(rateCtrl->Bs * 0.5); /* rateCtrl->Bs */ rateCtrl->encoded_frames = 0; rateCtrl->TMN_TH = rateCtrl->bitsPerFrame; rateCtrl->max_BitVariance_num = (int)((OsclFloat)(rateCtrl->Bs - rateCtrl->VBV_fullness) / (rateCtrl->bitsPerFrame / 10.0)) - 5; if (rateCtrl->max_BitVariance_num < 0) rateCtrl->max_BitVariance_num += 5; // Set the initial buffer fullness /* According to the spec, the initial buffer fullness needs to be set to 1/3 */ rateCtrl->VBV_fullness = (int)(rateCtrl->Bs / 3.0 - rateCtrl->Bs / 2.0); /* the buffer range is [-Bs/2, Bs/2] */ rateCtrl->pMP->counter_BTsrc = (int)((rateCtrl->Bs / 2.0 - rateCtrl->Bs / 3.0) / (rateCtrl->bitsPerFrame / 10.0)); rateCtrl->TMN_W = (int)(rateCtrl->VBV_fullness + rateCtrl->pMP->counter_BTsrc * (rateCtrl->bitsPerFrame / 10.0)); rateCtrl->low_bound = -rateCtrl->Bs / 2; rateCtrl->VBV_fullness_offset = 0; /* Setting the bitrate and framerate */ rateCtrl->pMP->bitrate = rateCtrl->bitRate; rateCtrl->pMP->framerate = rateCtrl->frame_rate; rateCtrl->pMP->target_bits_per_frame = rateCtrl->pMP->bitrate / rateCtrl->pMP->framerate; /*compute the initial QP*/ bpp = 1.0 * rateCtrl->bitRate / (rateCtrl->frame_rate * (video->PicSizeInMbs << 8)); if (video->PicWidthInSamplesL == 176) { L1 = 0.1; L2 = 0.3; L3 = 0.6; } else if (video->PicWidthInSamplesL == 352) { L1 = 0.2; L2 = 0.6; L3 = 1.2; } else { L1 = 0.6; L2 = 1.4; L3 = 2.4; } if (rateCtrl->initQP == 0) { if (bpp <= L1) qp = 35; else if (bpp <= L2) qp = 25; else if (bpp <= L3) qp = 20; else qp = 15; rateCtrl->initQP = qp; } rateCtrl->Qc = rateCtrl->initQP; } return AVCENC_SUCCESS; CLEANUP_RC: CleanupRateControlModule(avcHandle); return AVCENC_MEMORY_FAIL; } void CleanupRateControlModule(AVCHandle *avcHandle) { AVCEncObject *encvid = (AVCEncObject*) avcHandle->AVCObject; AVCRateControl *rateCtrl = encvid->rateCtrl; int i; if (rateCtrl->MADofMB) { avcHandle->CBAVC_Free(avcHandle->userData, (int)(rateCtrl->MADofMB)); } if (rateCtrl->pMP) { if (rateCtrl->pMP->pRDSamples) { for (i = 0; i < 30; i++) { if (rateCtrl->pMP->pRDSamples[i]) { avcHandle->CBAVC_Free(avcHandle->userData, (int)rateCtrl->pMP->pRDSamples[i]); } } avcHandle->CBAVC_Free(avcHandle->userData, (int)rateCtrl->pMP->pRDSamples); } avcHandle->CBAVC_Free(avcHandle->userData, (int)(rateCtrl->pMP)); } return ; } void RCInitGOP(AVCEncObject *encvid) { /* in BX RC, there's no GOP-level RC */ OSCL_UNUSED_ARG(encvid); return ; } void RCInitFrameQP(AVCEncObject *encvid) { AVCCommonObj *video = encvid->common; AVCRateControl *rateCtrl = encvid->rateCtrl; AVCPicParamSet *picParam = video->currPicParams; MultiPass *pMP = rateCtrl->pMP; if (rateCtrl->rcEnable == TRUE) { /* frame layer rate control */ if (rateCtrl->encoded_frames == 0) { video->QPy = rateCtrl->Qc = rateCtrl->initQP; } else { calculateQuantizer_Multipass(encvid, video, rateCtrl, pMP); video->QPy = rateCtrl->Qc; } rateCtrl->NumberofHeaderBits = 0; rateCtrl->NumberofTextureBits = 0; rateCtrl->numFrameBits = 0; // reset /* update pMP->framePos */ if (++pMP->framePos == pMP->frameRange) pMP->framePos = 0; if (rateCtrl->T == 0) { pMP->counter_BTdst = (int)(rateCtrl->frame_rate * 7.5 + 0.5); /* 0.75s time frame */ pMP->counter_BTdst = AVC_MIN(pMP->counter_BTdst, (int)(rateCtrl->max_BitVariance_num / 2 * 0.40)); /* 0.75s time frame may go beyond VBV buffer if we set the buffer size smaller than 0.75s */ pMP->counter_BTdst = AVC_MAX(pMP->counter_BTdst, (int)((rateCtrl->Bs / 2 - rateCtrl->VBV_fullness) * 0.30 / (rateCtrl->TMN_TH / 10.0) + 0.5)); /* At least 30% of VBV buffer size/2 */ pMP->counter_BTdst = AVC_MIN(pMP->counter_BTdst, 20); /* Limit the target to be smaller than 3C */ pMP->target_bits = rateCtrl->T = rateCtrl->TMN_TH = (int)(rateCtrl->TMN_TH * (1.0 + pMP->counter_BTdst * 0.1)); pMP->diff_counter = pMP->counter_BTdst; } /* collect the necessary data: target bits, actual bits, mad and QP */ pMP->target_bits = rateCtrl->T; pMP->QP = video->QPy; pMP->mad = (OsclFloat)rateCtrl->totalSAD / video->PicSizeInMbs; //ComputeFrameMAD(video, rateCtrl); if (pMP->mad < MAD_MIN) pMP->mad = MAD_MIN; /* MAD_MIN is defined as 1 in mp4def.h */ pMP->bitrate = rateCtrl->bitRate; /* calculated in RCVopQPSetting */ pMP->framerate = rateCtrl->frame_rate; /* first pass encoding */ pMP->nRe_Quantized = 0; } // rcEnable else { video->QPy = rateCtrl->initQP; } // printf(" %d ",video->QPy); if (video->CurrPicNum == 0 && encvid->outOfBandParamSet == FALSE) { picParam->pic_init_qs_minus26 = 0; picParam->pic_init_qp_minus26 = video->QPy - 26; } // need this for motion estimation encvid->lambda_mode = QP2QUANT[AVC_MAX(0, video->QPy-SHIFT_QP)]; encvid->lambda_motion = LAMBDA_FACTOR(encvid->lambda_mode); return ; } /* Mad based variable bit allocation + QP calculation with a new quadratic method */ void calculateQuantizer_Multipass(AVCEncObject *encvid, AVCCommonObj *video, AVCRateControl *rateCtrl, MultiPass *pMP) { int prev_actual_bits = 0, curr_target, /*pos=0,*/i, j; OsclFloat Qstep, prev_QP = 0.625; OsclFloat curr_mad, prev_mad, curr_RD, prev_RD, average_mad, aver_QP; /* Mad based variable bit allocation */ targetBitCalculation(encvid, video, rateCtrl, pMP); if (rateCtrl->T <= 0 || rateCtrl->totalSAD == 0) { if (rateCtrl->T < 0) rateCtrl->Qc = RC_MAX_QUANT; return; } /* ---------------------------------------------------------------------------------------------------*/ /* current frame QP estimation */ curr_target = rateCtrl->T; curr_mad = (OsclFloat)rateCtrl->totalSAD / video->PicSizeInMbs; if (curr_mad < MAD_MIN) curr_mad = MAD_MIN; /* MAD_MIN is defined as 1 in mp4def.h */ curr_RD = (OsclFloat)curr_target / curr_mad; if (rateCtrl->skip_next_frame == -1) // previous was skipped { i = pMP->framePos; prev_mad = pMP->pRDSamples[i][0].mad; prev_QP = pMP->pRDSamples[i][0].QP; prev_actual_bits = pMP->pRDSamples[i][0].actual_bits; } else { /* Another version of search the optimal point */ prev_mad = 0.0; i = 0; while (i < pMP->frameRange && prev_mad < 0.001) /* find first one with nonzero prev_mad */ { prev_mad = pMP->pRDSamples[i][0].mad; i++; } if (i < pMP->frameRange) { prev_actual_bits = pMP->pRDSamples[i-1][0].actual_bits; for (j = 0; i < pMP->frameRange; i++) { if (pMP->pRDSamples[i][0].mad != 0 && AVC_ABS(prev_mad - curr_mad) > AVC_ABS(pMP->pRDSamples[i][0].mad - curr_mad)) { prev_mad = pMP->pRDSamples[i][0].mad; prev_actual_bits = pMP->pRDSamples[i][0].actual_bits; j = i; } } prev_QP = QP2Qstep(pMP->pRDSamples[j][0].QP); for (i = 1; i < pMP->samplesPerFrame[j]; i++) { if (AVC_ABS(prev_actual_bits - curr_target) > AVC_ABS(pMP->pRDSamples[j][i].actual_bits - curr_target)) { prev_actual_bits = pMP->pRDSamples[j][i].actual_bits; prev_QP = QP2Qstep(pMP->pRDSamples[j][i].QP); } } } } // quadratic approximation if (prev_mad > 0.001) // only when prev_mad is greater than 0, otherwise keep using the same QP { prev_RD = (OsclFloat)prev_actual_bits / prev_mad; //rateCtrl->Qc = (Int)(prev_QP * sqrt(prev_actual_bits/curr_target) + 0.4); if (prev_QP == 0.625) // added this to allow getting out of QP = 0 easily { Qstep = (int)(prev_RD / curr_RD + 0.5); } else { // rateCtrl->Qc =(Int)(prev_QP * M4VENC_SQRT(prev_RD/curr_RD) + 0.9); if (prev_RD / curr_RD > 0.5 && prev_RD / curr_RD < 2.0) Qstep = (int)(prev_QP * (oscl_sqrt(prev_RD / curr_RD) + prev_RD / curr_RD) / 2.0 + 0.9); /* Quadratic and linear approximation */ else Qstep = (int)(prev_QP * (oscl_sqrt(prev_RD / curr_RD) + oscl_pow(prev_RD / curr_RD, 1.0 / 3.0)) / 2.0 + 0.9); } // lower bound on Qc should be a function of curr_mad // When mad is already low, lower bound on Qc doesn't have to be small. // Note, this doesn't work well for low complexity clip encoded at high bit rate // it doesn't hit the target bit rate due to this QP lower bound. /// if((curr_mad < 8) && (rateCtrl->Qc < 12)) rateCtrl->Qc = 12; // else if((curr_mad < 128) && (rateCtrl->Qc < 3)) rateCtrl->Qc = 3; rateCtrl->Qc = Qstep2QP(Qstep); if (rateCtrl->Qc < RC_MIN_QUANT) rateCtrl->Qc = RC_MIN_QUANT; if (rateCtrl->Qc > RC_MAX_QUANT) rateCtrl->Qc = RC_MAX_QUANT; } /* active bit resource protection */ aver_QP = (pMP->encoded_frames == 0 ? 0 : pMP->sum_QP / (OsclFloat)pMP->encoded_frames); average_mad = (pMP->encoded_frames == 0 ? 0 : pMP->sum_mad / (OsclFloat)pMP->encoded_frames); /* this function is called from the scond encoded frame*/ if (pMP->diff_counter == 0 && ((OsclFloat)rateCtrl->Qc <= aver_QP*1.1 || curr_mad <= average_mad*1.1) && pMP->counter_BTsrc <= (pMP->counter_BTdst + (int)(pMP->framerate*1.0 + 0.5))) { rateCtrl->TMN_TH -= (int)(pMP->target_bits_per_frame / 10.0); rateCtrl->T = rateCtrl->TMN_TH - rateCtrl->TMN_W; pMP->counter_BTsrc++; pMP->diff_counter--; } } void targetBitCalculation(AVCEncObject *encvid, AVCCommonObj *video, AVCRateControl *rateCtrl, MultiPass *pMP) { OSCL_UNUSED_ARG(encvid); OsclFloat curr_mad;//, average_mad; int diff_counter_BTsrc, diff_counter_BTdst, prev_counter_diff, curr_counter_diff, bound; /* BT = Bit Transfer, for pMP->counter_BTsrc, pMP->counter_BTdst */ /* some stuff about frame dropping remained here to be done because pMP cannot be inserted into updateRateControl()*/ updateRC_PostProc(rateCtrl, pMP); /* update pMP->counter_BTsrc and pMP->counter_BTdst to avoid interger overflow */ if (pMP->counter_BTsrc > 1000 && pMP->counter_BTdst > 1000) { pMP->counter_BTsrc -= 1000; pMP->counter_BTdst -= 1000; } /* ---------------------------------------------------------------------------------------------------*/ /* target calculation */ curr_mad = (OsclFloat)rateCtrl->totalSAD / video->PicSizeInMbs; if (curr_mad < MAD_MIN) curr_mad = MAD_MIN; /* MAD_MIN is defined as 1 in mp4def.h */ diff_counter_BTsrc = diff_counter_BTdst = 0; pMP->diff_counter = 0; /*1.calculate average mad */ pMP->sum_mad += curr_mad; //average_mad = (pMP->encoded_frames < 1 ? curr_mad : pMP->sum_mad/(OsclFloat)(pMP->encoded_frames+1)); /* this function is called from the scond encoded frame*/ //pMP->aver_mad = average_mad; if (pMP->encoded_frames >= 0) /* pMP->encoded_frames is set to -1 initially, so forget about the very first I frame */ pMP->aver_mad = (pMP->aver_mad * pMP->encoded_frames + curr_mad) / (pMP->encoded_frames + 1); if (pMP->overlapped_win_size > 0 && pMP->encoded_frames_prev >= 0) pMP->aver_mad_prev = (pMP->aver_mad_prev * pMP->encoded_frames_prev + curr_mad) / (pMP->encoded_frames_prev + 1); /*2.average_mad, mad ==> diff_counter_BTsrc, diff_counter_BTdst */ if (pMP->overlapped_win_size == 0) { /* original verison */ if (curr_mad > pMP->aver_mad*1.1) { if (curr_mad / (pMP->aver_mad + 0.0001) > 2) diff_counter_BTdst = (int)(oscl_sqrt(curr_mad / (pMP->aver_mad + 0.0001)) * 10 + 0.4) - 10; //diff_counter_BTdst = (int)((sqrt(curr_mad/pMP->aver_mad)*2+curr_mad/pMP->aver_mad)/(3*0.1) + 0.4) - 10; else diff_counter_BTdst = (int)(curr_mad / (pMP->aver_mad + 0.0001) * 10 + 0.4) - 10; } else /* curr_mad <= average_mad*1.1 */ //diff_counter_BTsrc = 10 - (int)((sqrt(curr_mad/pMP->aver_mad) + pow(curr_mad/pMP->aver_mad, 1.0/3.0))/(2.0*0.1) + 0.4); diff_counter_BTsrc = 10 - (int)(oscl_sqrt(curr_mad / (pMP->aver_mad + 0.0001)) * 10 + 0.5); /* actively fill in the possible gap */ if (diff_counter_BTsrc == 0 && diff_counter_BTdst == 0 && curr_mad <= pMP->aver_mad*1.1 && pMP->counter_BTsrc < pMP->counter_BTdst) diff_counter_BTsrc = 1; } else if (pMP->overlapped_win_size > 0) { /* transition time: use previous average mad "pMP->aver_mad_prev" instead of the current average mad "pMP->aver_mad" */ if (curr_mad > pMP->aver_mad_prev*1.1) { if (curr_mad / pMP->aver_mad_prev > 2) diff_counter_BTdst = (int)(oscl_sqrt(curr_mad / (pMP->aver_mad_prev + 0.0001)) * 10 + 0.4) - 10; //diff_counter_BTdst = (int)((M4VENC_SQRT(curr_mad/pMP->aver_mad_prev)*2+curr_mad/pMP->aver_mad_prev)/(3*0.1) + 0.4) - 10; else diff_counter_BTdst = (int)(curr_mad / (pMP->aver_mad_prev + 0.0001) * 10 + 0.4) - 10; } else /* curr_mad <= average_mad*1.1 */ //diff_counter_BTsrc = 10 - (Int)((sqrt(curr_mad/pMP->aver_mad_prev) + pow(curr_mad/pMP->aver_mad_prev, 1.0/3.0))/(2.0*0.1) + 0.4); diff_counter_BTsrc = 10 - (int)(oscl_sqrt(curr_mad / (pMP->aver_mad_prev + 0.0001)) * 10 + 0.5); /* actively fill in the possible gap */ if (diff_counter_BTsrc == 0 && diff_counter_BTdst == 0 && curr_mad <= pMP->aver_mad_prev*1.1 && pMP->counter_BTsrc < pMP->counter_BTdst) diff_counter_BTsrc = 1; if (--pMP->overlapped_win_size <= 0) pMP->overlapped_win_size = 0; } /* if difference is too much, do clipping */ /* First, set the upper bound for current bit allocation variance: 80% of available buffer */ bound = (int)((rateCtrl->Bs / 2 - rateCtrl->VBV_fullness) * 0.6 / (pMP->target_bits_per_frame / 10)); /* rateCtrl->Bs */ diff_counter_BTsrc = AVC_MIN(diff_counter_BTsrc, bound); diff_counter_BTdst = AVC_MIN(diff_counter_BTdst, bound); /* Second, set another upper bound for current bit allocation: 4-5*bitrate/framerate */ bound = 50; // if(video->encParams->RC_Type == CBR_LOWDELAY) // not necessary bound = 10; -- For Low delay */ diff_counter_BTsrc = AVC_MIN(diff_counter_BTsrc, bound); diff_counter_BTdst = AVC_MIN(diff_counter_BTdst, bound); /* Third, check the buffer */ prev_counter_diff = pMP->counter_BTdst - pMP->counter_BTsrc; curr_counter_diff = prev_counter_diff + (diff_counter_BTdst - diff_counter_BTsrc); if (AVC_ABS(prev_counter_diff) >= rateCtrl->max_BitVariance_num || AVC_ABS(curr_counter_diff) >= rateCtrl->max_BitVariance_num) { //diff_counter_BTsrc = diff_counter_BTdst = 0; if (curr_counter_diff > rateCtrl->max_BitVariance_num && diff_counter_BTdst) { diff_counter_BTdst = (rateCtrl->max_BitVariance_num - prev_counter_diff) + diff_counter_BTsrc; if (diff_counter_BTdst < 0) diff_counter_BTdst = 0; } else if (curr_counter_diff < -rateCtrl->max_BitVariance_num && diff_counter_BTsrc) { diff_counter_BTsrc = diff_counter_BTdst - (-rateCtrl->max_BitVariance_num - prev_counter_diff); if (diff_counter_BTsrc < 0) diff_counter_BTsrc = 0; } } /*3.diff_counter_BTsrc, diff_counter_BTdst ==> TMN_TH */ rateCtrl->TMN_TH = (int)(pMP->target_bits_per_frame); pMP->diff_counter = 0; if (diff_counter_BTsrc) { rateCtrl->TMN_TH -= (int)(pMP->target_bits_per_frame * diff_counter_BTsrc * 0.1); pMP->diff_counter = -diff_counter_BTsrc; } else if (diff_counter_BTdst) { rateCtrl->TMN_TH += (int)(pMP->target_bits_per_frame * diff_counter_BTdst * 0.1); pMP->diff_counter = diff_counter_BTdst; } /*4.update pMP->counter_BTsrc, pMP->counter_BTdst */ pMP->counter_BTsrc += diff_counter_BTsrc; pMP->counter_BTdst += diff_counter_BTdst; /*5.target bit calculation */ rateCtrl->T = rateCtrl->TMN_TH - rateCtrl->TMN_W; return ; } void updateRC_PostProc(AVCRateControl *rateCtrl, MultiPass *pMP) { if (rateCtrl->skip_next_frame > 0) /* skip next frame */ { pMP->counter_BTsrc += 10 * rateCtrl->skip_next_frame; } else if (rateCtrl->skip_next_frame == -1) /* skip current frame */ { pMP->counter_BTdst -= pMP->diff_counter; pMP->counter_BTsrc += 10; pMP->sum_mad -= pMP->mad; pMP->aver_mad = (pMP->aver_mad * pMP->encoded_frames - pMP->mad) / (pMP->encoded_frames - 1 + 0.0001); pMP->sum_QP -= pMP->QP; pMP->encoded_frames --; } /* some stuff in update VBV_fullness remains here */ //if(rateCtrl->VBV_fullness < -rateCtrl->Bs/2) /* rateCtrl->Bs */ if (rateCtrl->VBV_fullness < rateCtrl->low_bound) { rateCtrl->VBV_fullness = rateCtrl->low_bound; // -rateCtrl->Bs/2; rateCtrl->TMN_W = rateCtrl->VBV_fullness - rateCtrl->low_bound; pMP->counter_BTsrc = pMP->counter_BTdst + (int)((OsclFloat)(rateCtrl->Bs / 2 - rateCtrl->low_bound) / 2.0 / (pMP->target_bits_per_frame / 10)); } } void RCInitChromaQP(AVCEncObject *encvid) { AVCCommonObj *video = encvid->common; AVCMacroblock *currMB = video->currMB; int q_bits; /* we have to do the same thing for AVC_CLIP3(0,51,video->QSy) */ video->QPy_div_6 = (currMB->QPy * 43) >> 8; video->QPy_mod_6 = currMB->QPy - 6 * video->QPy_div_6; currMB->QPc = video->QPc = mapQPi2QPc[AVC_CLIP3(0, 51, currMB->QPy + video->currPicParams->chroma_qp_index_offset)]; video->QPc_div_6 = (video->QPc * 43) >> 8; video->QPc_mod_6 = video->QPc - 6 * video->QPc_div_6; /* pre-calculate this to save computation */ q_bits = 4 + video->QPy_div_6; if (video->slice_type == AVC_I_SLICE) { encvid->qp_const = 682 << q_bits; // intra } else { encvid->qp_const = 342 << q_bits; // inter } q_bits = 4 + video->QPc_div_6; if (video->slice_type == AVC_I_SLICE) { encvid->qp_const_c = 682 << q_bits; // intra } else { encvid->qp_const_c = 342 << q_bits; // inter } encvid->lambda_mode = QP2QUANT[AVC_MAX(0, currMB->QPy-SHIFT_QP)]; encvid->lambda_motion = LAMBDA_FACTOR(encvid->lambda_mode); return ; } void RCInitMBQP(AVCEncObject *encvid) { AVCCommonObj *video = encvid->common; AVCMacroblock *currMB = video->currMB; currMB->QPy = video->QPy; /* set to previous value or picture level */ RCInitChromaQP(encvid); } void RCPostMB(AVCCommonObj *video, AVCRateControl *rateCtrl, int num_header_bits, int num_texture_bits) { OSCL_UNUSED_ARG(video); rateCtrl->numMBHeaderBits = num_header_bits; rateCtrl->numMBTextureBits = num_texture_bits; rateCtrl->NumberofHeaderBits += rateCtrl->numMBHeaderBits; rateCtrl->NumberofTextureBits += rateCtrl->numMBTextureBits; } void RCRestoreQP(AVCMacroblock *currMB, AVCCommonObj *video, AVCEncObject *encvid) { currMB->QPy = video->QPy; /* use previous QP */ RCInitChromaQP(encvid); return ; } void RCCalculateMAD(AVCEncObject *encvid, AVCMacroblock *currMB, uint8 *orgL, int orgPitch) { AVCCommonObj *video = encvid->common; AVCRateControl *rateCtrl = encvid->rateCtrl; uint32 dmin_lx; if (rateCtrl->rcEnable == TRUE) { if (currMB->mb_intra) { if (currMB->mbMode == AVC_I16) { dmin_lx = (0xFFFF << 16) | orgPitch; rateCtrl->MADofMB[video->mbNum] = AVCSAD_Macroblock_C(orgL, encvid->pred_i16[currMB->i16Mode], dmin_lx, NULL); } else /* i4 */ { rateCtrl->MADofMB[video->mbNum] = encvid->i4_sad / 256.; } } /* for INTER, we have already saved it with the MV search */ } return ; } AVCEnc_Status RCUpdateFrame(AVCEncObject *encvid) { AVCCommonObj *video = encvid->common; AVCRateControl *rateCtrl = encvid->rateCtrl; AVCEnc_Status status = AVCENC_SUCCESS; MultiPass *pMP = rateCtrl->pMP; int diff_BTCounter; int nal_type = video->nal_unit_type; /* update the complexity weight of I, P, B frame */ if (rateCtrl->rcEnable == TRUE) { pMP->actual_bits = rateCtrl->numFrameBits; pMP->mad = (OsclFloat)rateCtrl->totalSAD / video->PicSizeInMbs; //ComputeFrameMAD(video, rateCtrl); AVCSaveRDSamples(pMP, 0); pMP->encoded_frames++; /* for pMP->samplesPerFrame */ pMP->samplesPerFrame[pMP->framePos] = 0; pMP->sum_QP += pMP->QP; /* update pMP->counter_BTsrc, pMP->counter_BTdst */ /* re-allocate the target bit again and then stop encoding */ diff_BTCounter = (int)((OsclFloat)(rateCtrl->TMN_TH - rateCtrl->TMN_W - pMP->actual_bits) / (pMP->bitrate / (pMP->framerate + 0.0001) + 0.0001) / 0.1); if (diff_BTCounter >= 0) pMP->counter_BTsrc += diff_BTCounter; /* pMP->actual_bits is smaller */ else pMP->counter_BTdst -= diff_BTCounter; /* pMP->actual_bits is bigger */ rateCtrl->TMN_TH -= (int)((OsclFloat)pMP->bitrate / (pMP->framerate + 0.0001) * (diff_BTCounter * 0.1)); rateCtrl->T = pMP->target_bits = rateCtrl->TMN_TH - rateCtrl->TMN_W; pMP->diff_counter -= diff_BTCounter; rateCtrl->Rc = rateCtrl->numFrameBits; /* Total Bits for current frame */ rateCtrl->Hc = rateCtrl->NumberofHeaderBits; /* Total Bits in Header and Motion Vector */ /* BX_RC */ updateRateControl(rateCtrl, nal_type); if (rateCtrl->skip_next_frame == -1) // skip current frame { status = AVCENC_SKIPPED_PICTURE; } } rateCtrl->first_frame = 0; // reset here after we encode the first frame. return status; } void AVCSaveRDSamples(MultiPass *pMP, int counter_samples) { /* for pMP->pRDSamples */ pMP->pRDSamples[pMP->framePos][counter_samples].QP = pMP->QP; pMP->pRDSamples[pMP->framePos][counter_samples].actual_bits = pMP->actual_bits; pMP->pRDSamples[pMP->framePos][counter_samples].mad = pMP->mad; pMP->pRDSamples[pMP->framePos][counter_samples].R_D = (OsclFloat)pMP->actual_bits / (pMP->mad + 0.0001); return ; } void updateRateControl(AVCRateControl *rateCtrl, int nal_type) { int frame_bits; MultiPass *pMP = rateCtrl->pMP; /* BX rate contro\l */ frame_bits = (int)(rateCtrl->bitRate / rateCtrl->frame_rate); rateCtrl->TMN_W += (rateCtrl->Rc - rateCtrl->TMN_TH); rateCtrl->VBV_fullness += (rateCtrl->Rc - frame_bits); //rateCtrl->Rp); //if(rateCtrl->VBV_fullness < 0) rateCtrl->VBV_fullness = -1; rateCtrl->encoded_frames++; /* frame dropping */ rateCtrl->skip_next_frame = 0; if ((rateCtrl->VBV_fullness > rateCtrl->Bs / 2) && nal_type != AVC_NALTYPE_IDR) /* skip the current frame */ /* rateCtrl->Bs */ { rateCtrl->TMN_W -= (rateCtrl->Rc - rateCtrl->TMN_TH); rateCtrl->VBV_fullness -= rateCtrl->Rc; rateCtrl->skip_next_frame = -1; } else if ((OsclFloat)(rateCtrl->VBV_fullness - rateCtrl->VBV_fullness_offset) > (rateCtrl->Bs / 2 - rateCtrl->VBV_fullness_offset)*0.95) /* skip next frame */ { rateCtrl->VBV_fullness -= frame_bits; //rateCtrl->Rp; rateCtrl->skip_next_frame = 1; pMP->counter_BTsrc -= (int)((OsclFloat)(rateCtrl->Bs / 2 - rateCtrl->low_bound) / 2.0 / (pMP->target_bits_per_frame / 10)); /* BX_1, skip more than 1 frames */ //while(rateCtrl->VBV_fullness > rateCtrl->Bs*0.475) while ((rateCtrl->VBV_fullness - rateCtrl->VBV_fullness_offset) > (rateCtrl->Bs / 2 - rateCtrl->VBV_fullness_offset)*0.95) { rateCtrl->VBV_fullness -= frame_bits; //rateCtrl->Rp; rateCtrl->skip_next_frame++; pMP->counter_BTsrc -= (int)((OsclFloat)(rateCtrl->Bs / 2 - rateCtrl->low_bound) / 2.0 / (pMP->target_bits_per_frame / 10)); } /* END BX_1 */ } } double ComputeFrameMAD(AVCCommonObj *video, AVCRateControl *rateCtrl) { double TotalMAD; int i; TotalMAD = 0.0; for (i = 0; i < (int)video->PicSizeInMbs; i++) TotalMAD += rateCtrl->MADofMB[i]; TotalMAD /= video->PicSizeInMbs; return TotalMAD; } /* convert from QP to Qstep */ double QP2Qstep(int QP) { int i; double Qstep; static const double QP2QSTEP[6] = { 0.625, 0.6875, 0.8125, 0.875, 1.0, 1.125 }; Qstep = QP2QSTEP[QP % 6]; for (i = 0; i < (QP / 6); i++) Qstep *= 2; return Qstep; } /* convert from step size to QP */ int Qstep2QP(double Qstep) { int q_per = 0, q_rem = 0; // assert( Qstep >= QP2Qstep(0) && Qstep <= QP2Qstep(51) ); if (Qstep < QP2Qstep(0)) return 0; else if (Qstep > QP2Qstep(51)) return 51; while (Qstep > QP2Qstep(5)) { Qstep /= 2; q_per += 1; } if (Qstep <= (0.625 + 0.6875) / 2) { Qstep = 0.625; q_rem = 0; } else if (Qstep <= (0.6875 + 0.8125) / 2) { Qstep = 0.6875; q_rem = 1; } else if (Qstep <= (0.8125 + 0.875) / 2) { Qstep = 0.8125; q_rem = 2; } else if (Qstep <= (0.875 + 1.0) / 2) { Qstep = 0.875; q_rem = 3; } else if (Qstep <= (1.0 + 1.125) / 2) { Qstep = 1.0; q_rem = 4; } else { Qstep = 1.125; q_rem = 5; } return (q_per * 6 + q_rem); } void RCUpdateParams(AVCRateControl *rateCtrl, AVCEncObject *encvid) { int32 prevFrameNum, newFrameNum; uint32 prevModTime; if (rateCtrl->frame_rate != rateCtrl->pMP->framerate) { /* this part for frame rate change */ rateCtrl->pMP->frameRange = (int)(rateCtrl->frame_rate * 1.0); /* 1.0s time frame*/ rateCtrl->pMP->frameRange = AVC_MAX(rateCtrl->pMP->frameRange, 5); rateCtrl->pMP->frameRange = AVC_MIN(rateCtrl->pMP->frameRange, 30); prevFrameNum = encvid->prevProcFrameNum; // previous frame number // convert from frame num to time based on the previous frame rate prevModTime = (uint32)(prevFrameNum * 1000 / rateCtrl->pMP->framerate); // offseted by modTimeRef // convert back from time to frame num based on new frame rate newFrameNum = (int32)((prevModTime * rateCtrl->frame_rate) / 1000); // assign the newFrameNum to prevFrameNum // note, this will cause the IDR frame to come earlier and later than expected !! encvid->prevProcFrameNum = newFrameNum; } // recalculate fixed values that are dependent on bitrate and framerate rateCtrl->bitsPerFrame = (int32)(rateCtrl->bitRate / rateCtrl->frame_rate); rateCtrl->max_BitVariance_num = (int)((OsclFloat)(rateCtrl->Bs - rateCtrl->VBV_fullness) / (rateCtrl->bitsPerFrame / 10.0)) - 5; if (rateCtrl->max_BitVariance_num < 0) rateCtrl->max_BitVariance_num += 5; /* no change to rateCtrl->cpbSize, rateCtrl->Bs, rateCtrl->low_bound, rateCtrl->VBV_fullness_offset*/ /* keep continuity to the following values */ /* rateCtrl->pMP->framePos, rateCtrl->TMN_TH, rateCtrl->TMN_W */ /* rateCtrl->VBV_fullness, rateCtrl->pMP->counter_BTsrc, */ /* reset some stats for CalculateQuantizerMultiPass and active bit resource protection */ rateCtrl->pMP->sum_QP /= rateCtrl->pMP->encoded_frames; // reset it to 1 rateCtrl->pMP->encoded_frames = 1; rateCtrl->pMP->sum_mad = 0; rateCtrl->T = 0; /* Finalizing bitrate and framerate to pMP structure*/ rateCtrl->pMP->bitrate = rateCtrl->bitRate; rateCtrl->pMP->framerate = rateCtrl->frame_rate; rateCtrl->pMP->target_bits_per_frame = rateCtrl->pMP->bitrate / rateCtrl->pMP->framerate; return ; } ================================================ FILE: RtspCamera/jni/avc_h264/enc/src/residual.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "avcenc_lib.h" #include "oscl_mem.h" AVCEnc_Status EncodeIntraPCM(AVCEncObject *encvid) { AVCEnc_Status status = AVCENC_SUCCESS; AVCCommonObj *video = encvid->common; AVCFrameIO *currInput = encvid->currInput; AVCEncBitstream *stream = encvid->bitstream; int x_position = (video->mb_x << 4); int y_position = (video->mb_y << 4); int orgPitch = currInput->pitch; int offset1 = y_position * orgPitch + x_position; int i, j; int offset; uint8 *pDst, *pSrc; uint code; ue_v(stream, 25); i = stream->bit_left & 0x7; if (i) /* not byte-aligned */ { BitstreamWriteBits(stream, 0, i); } pSrc = currInput->YCbCr[0] + offset1; pDst = video->currPic->Sl + offset1; offset = video->PicWidthInSamplesL - 16; /* at this point bitstream is byte-aligned */ j = 16; while (j > 0) { #if (WORD_SIZE==32) for (i = 0; i < 4; i++) { code = *((uint*)pSrc); pSrc += 4; *((uint*)pDst) = code; pDst += 4; status = BitstreamWriteBits(stream, 32, code); } #else for (i = 0; i < 8; i++) { code = *((uint*)pSrc); pSrc += 2; *((uint*)pDst) = code; pDst += 2; status = BitstreamWriteBits(stream, 16, code); } #endif pDst += offset; pSrc += offset; j--; } if (status != AVCENC_SUCCESS) /* check only once per line */ return status; pDst = video->currPic->Scb + ((offset1 + x_position) >> 2); pSrc = currInput->YCbCr[1] + ((offset1 + x_position) >> 2); offset >>= 1; j = 8; while (j > 0) { #if (WORD_SIZE==32) for (i = 0; i < 2; i++) { code = *((uint*)pSrc); pSrc += 4; *((uint*)pDst) = code; pDst += 4; status = BitstreamWriteBits(stream, 32, code); } #else for (i = 0; i < 4; i++) { code = *((uint*)pSrc); pSrc += 2; *((uint*)pDst) = code; pDst += 2; status = BitstreamWriteBits(stream, 16, code); } #endif pDst += offset; pSrc += offset; j--; } if (status != AVCENC_SUCCESS) /* check only once per line */ return status; pDst = video->currPic->Scr + ((offset1 + x_position) >> 2); pSrc = currInput->YCbCr[2] + ((offset1 + x_position) >> 2); j = 8; while (j > 0) { #if (WORD_SIZE==32) for (i = 0; i < 2; i++) { code = *((uint*)pSrc); pSrc += 4; *((uint*)pDst) = code; pDst += 4; status = BitstreamWriteBits(stream, 32, code); } #else for (i = 0; i < 4; i++) { code = *((uint*)pSrc); pSrc += 2; *((uint*)pDst) = code; pDst += 2; status = BitstreamWriteBits(stream, 16, code); } #endif pDst += offset; pSrc += offset; j--; } return status; } AVCEnc_Status enc_residual_block(AVCEncObject *encvid, AVCResidualType type, int cindx, AVCMacroblock *currMB) { AVCEnc_Status status = AVCENC_SUCCESS; AVCCommonObj *video = encvid->common; int i, maxNumCoeff, nC; int cdc = 0, cac = 0; int TrailingOnes; AVCEncBitstream *stream = encvid->bitstream; uint trailing_ones_sign_flag; int zerosLeft; int *level, *run; int TotalCoeff; const static int incVlc[] = {0, 3, 6, 12, 24, 48, 32768}; // maximum vlc = 6 int escape, numPrefix, sufmask, suffix, shift, sign, value, absvalue, vlcnum, level_two_or_higher; int bindx = blkIdx2blkXY[cindx>>2][cindx&3] ; // raster scan index switch (type) { case AVC_Luma: maxNumCoeff = 16; level = encvid->level[cindx]; run = encvid->run[cindx]; TotalCoeff = currMB->nz_coeff[bindx]; break; case AVC_Intra16DC: maxNumCoeff = 16; level = encvid->leveldc; run = encvid->rundc; TotalCoeff = cindx; /* special case */ bindx = 0; cindx = 0; break; case AVC_Intra16AC: maxNumCoeff = 15; level = encvid->level[cindx]; run = encvid->run[cindx]; TotalCoeff = currMB->nz_coeff[bindx]; break; case AVC_ChromaDC: /* how to differentiate Cb from Cr */ maxNumCoeff = 4; cdc = 1; if (cindx >= 8) { level = encvid->levelcdc + 4; run = encvid->runcdc + 4; TotalCoeff = cindx - 8; /* special case */ } else { level = encvid->levelcdc; run = encvid->runcdc; TotalCoeff = cindx; /* special case */ } break; case AVC_ChromaAC: maxNumCoeff = 15; cac = 1; level = encvid->level[cindx]; run = encvid->run[cindx]; cindx -= 16; bindx = 16 + blkIdx2blkXY[cindx>>2][cindx&3]; cindx += 16; TotalCoeff = currMB->nz_coeff[bindx]; break; default: return AVCENC_FAIL; } /* find TrailingOnes */ TrailingOnes = 0; zerosLeft = 0; i = TotalCoeff - 1; nC = 1; while (i >= 0) { zerosLeft += run[i]; if (nC && (level[i] == 1 || level[i] == -1)) { TrailingOnes++; } else { nC = 0; } i--; } if (TrailingOnes > 3) { TrailingOnes = 3; /* clip it */ } if (!cdc) { if (!cac) /* not chroma */ { nC = predict_nnz(video, bindx & 3, bindx >> 2); } else /* chroma ac but not chroma dc */ { nC = predict_nnz_chroma(video, bindx & 3, bindx >> 2); } status = ce_TotalCoeffTrailingOnes(stream, TrailingOnes, TotalCoeff, nC); } else { nC = -1; /* Chroma DC level */ status = ce_TotalCoeffTrailingOnesChromaDC(stream, TrailingOnes, TotalCoeff); } /* This part is done quite differently in ReadCoef4x4_CAVLC() */ if (TotalCoeff > 0) { i = TotalCoeff - 1; if (TrailingOnes) /* keep reading the sign of those trailing ones */ { nC = TrailingOnes; trailing_ones_sign_flag = 0; while (nC) { trailing_ones_sign_flag <<= 1; trailing_ones_sign_flag |= ((uint32)level[i--] >> 31); /* 0 or positive, 1 for negative */ nC--; } /* instead of writing one bit at a time, read the whole thing at once */ status = BitstreamWriteBits(stream, TrailingOnes, trailing_ones_sign_flag); } level_two_or_higher = 1; if (TotalCoeff > 3 && TrailingOnes == 3) { level_two_or_higher = 0; } if (TotalCoeff > 10 && TrailingOnes < 3) { vlcnum = 1; } else { vlcnum = 0; } /* then do this TotalCoeff-TrailingOnes times */ for (i = TotalCoeff - TrailingOnes - 1; i >= 0; i--) { value = level[i]; absvalue = (value >= 0) ? value : -value; if (level_two_or_higher) { if (value > 0) value--; else value++; level_two_or_higher = 0; } if (value >= 0) { sign = 0; } else { sign = 1; value = -value; } if (vlcnum == 0) // VLC1 { if (value < 8) { status = BitstreamWriteBits(stream, value * 2 + sign - 1, 1); } else if (value < 8 + 8) { status = BitstreamWriteBits(stream, 14 + 1 + 4, (1 << 4) | ((value - 8) << 1) | sign); } else { status = BitstreamWriteBits(stream, 14 + 2 + 12, (1 << 12) | ((value - 16) << 1) | sign) ; } } else // VLCN { shift = vlcnum - 1; escape = (15 << shift) + 1; numPrefix = (value - 1) >> shift; sufmask = ~((0xffffffff) << shift); suffix = (value - 1) & sufmask; if (value < escape) { status = BitstreamWriteBits(stream, numPrefix + vlcnum + 1, (1 << (shift + 1)) | (suffix << 1) | sign); } else { status = BitstreamWriteBits(stream, 28, (1 << 12) | ((value - escape) << 1) | sign); } } if (absvalue > incVlc[vlcnum]) vlcnum++; if (i == TotalCoeff - TrailingOnes - 1 && absvalue > 3) vlcnum = 2; } if (status != AVCENC_SUCCESS) /* occasionally check the bitstream */ { return status; } if (TotalCoeff < maxNumCoeff) { if (!cdc) { ce_TotalZeros(stream, zerosLeft, TotalCoeff); } else { ce_TotalZerosChromaDC(stream, zerosLeft, TotalCoeff); } } else { zerosLeft = 0; } i = TotalCoeff - 1; while (i > 0) /* don't do the last one */ { if (zerosLeft > 0) { ce_RunBefore(stream, run[i], zerosLeft); } zerosLeft = zerosLeft - run[i]; i--; } } return status; } ================================================ FILE: RtspCamera/jni/avc_h264/enc/src/sad.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "avcenc_lib.h" #include "sad_inline.h" #define Cached_lx 176 #ifdef _SAD_STAT uint32 num_sad_MB = 0; uint32 num_sad_Blk = 0; uint32 num_sad_MB_call = 0; uint32 num_sad_Blk_call = 0; #define NUM_SAD_MB_CALL() num_sad_MB_call++ #define NUM_SAD_MB() num_sad_MB++ #define NUM_SAD_BLK_CALL() num_sad_Blk_call++ #define NUM_SAD_BLK() num_sad_Blk++ #else #define NUM_SAD_MB_CALL() #define NUM_SAD_MB() #define NUM_SAD_BLK_CALL() #define NUM_SAD_BLK() #endif /* consist of int AVCSAD_Macroblock_C(uint8 *ref,uint8 *blk,int dmin,int lx,void *extra_info) int AVCSAD_MB_HTFM_Collect(uint8 *ref,uint8 *blk,int dmin,int lx,void *extra_info) int AVCSAD_MB_HTFM(uint8 *ref,uint8 *blk,int dmin,int lx,void *extra_info) */ /*================================================================== Function: SAD_Macroblock Date: 09/07/2000 Purpose: Compute SAD 16x16 between blk and ref. To do: Uniform subsampling will be inserted later! Hypothesis Testing Fast Matching to be used later! Changes: 11/7/00: implemented MMX 1/24/01: implemented SSE ==================================================================*/ /********** C ************/ int AVCSAD_Macroblock_C(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info) { (void)(extra_info); int32 x10; int dmin = (uint32)dmin_lx >> 16; int lx = dmin_lx & 0xFFFF; NUM_SAD_MB_CALL(); x10 = simd_sad_mb(ref, blk, dmin, lx); return x10; } #ifdef HTFM /* HTFM with uniform subsampling implementation 2/28/01 */ /*=============================================================== Function: AVCAVCSAD_MB_HTFM_Collect and AVCSAD_MB_HTFM Date: 3/2/1 Purpose: Compute the SAD on a 16x16 block using uniform subsampling and hypothesis testing fast matching for early dropout. SAD_MB_HP_HTFM_Collect is to collect the statistics to compute the thresholds to be used in SAD_MB_HP_HTFM. Input/Output: Changes: ===============================================================*/ int AVCAVCSAD_MB_HTFM_Collect(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info) { int i; int sad = 0; uint8 *p1; int lx4 = (dmin_lx << 2) & 0x3FFFC; uint32 cur_word; int saddata[16], tmp, tmp2; /* used when collecting flag (global) is on */ int difmad; int madstar; HTFM_Stat *htfm_stat = (HTFM_Stat*) extra_info; int *abs_dif_mad_avg = &(htfm_stat->abs_dif_mad_avg); uint *countbreak = &(htfm_stat->countbreak); int *offsetRef = htfm_stat->offsetRef; madstar = (uint32)dmin_lx >> 20; NUM_SAD_MB_CALL(); blk -= 4; for (i = 0; i < 16; i++) { p1 = ref + offsetRef[i]; cur_word = *((uint32*)(blk += 4)); tmp = p1[12]; tmp2 = (cur_word >> 24) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[8]; tmp2 = (cur_word >> 16) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[4]; tmp2 = (cur_word >> 8) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[0]; p1 += lx4; tmp2 = (cur_word & 0xFF); sad = SUB_SAD(sad, tmp, tmp2); cur_word = *((uint32*)(blk += 4)); tmp = p1[12]; tmp2 = (cur_word >> 24) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[8]; tmp2 = (cur_word >> 16) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[4]; tmp2 = (cur_word >> 8) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[0]; p1 += lx4; tmp2 = (cur_word & 0xFF); sad = SUB_SAD(sad, tmp, tmp2); cur_word = *((uint32*)(blk += 4)); tmp = p1[12]; tmp2 = (cur_word >> 24) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[8]; tmp2 = (cur_word >> 16) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[4]; tmp2 = (cur_word >> 8) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[0]; p1 += lx4; tmp2 = (cur_word & 0xFF); sad = SUB_SAD(sad, tmp, tmp2); cur_word = *((uint32*)(blk += 4)); tmp = p1[12]; tmp2 = (cur_word >> 24) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[8]; tmp2 = (cur_word >> 16) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[4]; tmp2 = (cur_word >> 8) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[0]; p1 += lx4; tmp2 = (cur_word & 0xFF); sad = SUB_SAD(sad, tmp, tmp2); NUM_SAD_MB(); saddata[i] = sad; if (i > 0) { if ((uint32)sad > ((uint32)dmin_lx >> 16)) { difmad = saddata[0] - ((saddata[1] + 1) >> 1); (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad); (*countbreak)++; return sad; } } } difmad = saddata[0] - ((saddata[1] + 1) >> 1); (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad); (*countbreak)++; return sad; } int AVCSAD_MB_HTFM(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info) { int sad = 0; uint8 *p1; int i; int tmp, tmp2; int lx4 = (dmin_lx << 2) & 0x3FFFC; int sadstar = 0, madstar; int *nrmlz_th = (int*) extra_info; int *offsetRef = (int*) extra_info + 32; uint32 cur_word; madstar = (uint32)dmin_lx >> 20; NUM_SAD_MB_CALL(); blk -= 4; for (i = 0; i < 16; i++) { p1 = ref + offsetRef[i]; cur_word = *((uint32*)(blk += 4)); tmp = p1[12]; tmp2 = (cur_word >> 24) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[8]; tmp2 = (cur_word >> 16) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[4]; tmp2 = (cur_word >> 8) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[0]; p1 += lx4; tmp2 = (cur_word & 0xFF); sad = SUB_SAD(sad, tmp, tmp2); cur_word = *((uint32*)(blk += 4)); tmp = p1[12]; tmp2 = (cur_word >> 24) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[8]; tmp2 = (cur_word >> 16) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[4]; tmp2 = (cur_word >> 8) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[0]; p1 += lx4; tmp2 = (cur_word & 0xFF); sad = SUB_SAD(sad, tmp, tmp2); cur_word = *((uint32*)(blk += 4)); tmp = p1[12]; tmp2 = (cur_word >> 24) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[8]; tmp2 = (cur_word >> 16) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[4]; tmp2 = (cur_word >> 8) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[0]; p1 += lx4; tmp2 = (cur_word & 0xFF); sad = SUB_SAD(sad, tmp, tmp2); cur_word = *((uint32*)(blk += 4)); tmp = p1[12]; tmp2 = (cur_word >> 24) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[8]; tmp2 = (cur_word >> 16) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[4]; tmp2 = (cur_word >> 8) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[0]; p1 += lx4; tmp2 = (cur_word & 0xFF); sad = SUB_SAD(sad, tmp, tmp2); NUM_SAD_MB(); sadstar += madstar; if (((uint32)sad <= ((uint32)dmin_lx >> 16)) && (sad <= (sadstar - *nrmlz_th++))) ; else return 65536; } return sad; } #endif /* HTFM */ ================================================ FILE: RtspCamera/jni/avc_h264/enc/src/sad_halfpel.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /* contains int AVCHalfPel1_SAD_MB(uint8 *ref,uint8 *blk,int dmin,int width,int ih,int jh) int AVCHalfPel2_SAD_MB(uint8 *ref,uint8 *blk,int dmin,int width) int AVCHalfPel1_SAD_Blk(uint8 *ref,uint8 *blk,int dmin,int width,int ih,int jh) int AVCHalfPel2_SAD_Blk(uint8 *ref,uint8 *blk,int dmin,int width) int AVCSAD_MB_HalfPel_C(uint8 *ref,uint8 *blk,int dmin,int width,int rx,int xh,int yh,void *extra_info) int AVCSAD_MB_HP_HTFM_Collect(uint8 *ref,uint8 *blk,int dmin,int width,int rx,int xh,int yh,void *extra_info) int AVCSAD_MB_HP_HTFM(uint8 *ref,uint8 *blk,int dmin,int width,int rx,int xh,int yh,void *extra_info) int AVCSAD_Blk_HalfPel_C(uint8 *ref,uint8 *blk,int dmin,int width,int rx,int xh,int yh,void *extra_info) */ #include "avcenc_lib.h" #include "sad_halfpel_inline.h" #ifdef _SAD_STAT uint32 num_sad_HP_MB = 0; uint32 num_sad_HP_Blk = 0; uint32 num_sad_HP_MB_call = 0; uint32 num_sad_HP_Blk_call = 0; #define NUM_SAD_HP_MB_CALL() num_sad_HP_MB_call++ #define NUM_SAD_HP_MB() num_sad_HP_MB++ #define NUM_SAD_HP_BLK_CALL() num_sad_HP_Blk_call++ #define NUM_SAD_HP_BLK() num_sad_HP_Blk++ #else #define NUM_SAD_HP_MB_CALL() #define NUM_SAD_HP_MB() #define NUM_SAD_HP_BLK_CALL() #define NUM_SAD_HP_BLK() #endif /*=============================================================== Function: SAD_MB_HalfPel Date: 09/17/2000 Purpose: Compute the SAD on the half-pel resolution Input/Output: hmem is assumed to be a pointer to the starting point of the search in the 33x33 matrix search region Changes: 11/7/00: implemented MMX ===============================================================*/ /*================================================================== Function: AVCSAD_MB_HalfPel_C Date: 04/30/2001 Purpose: Compute SAD 16x16 between blk and ref in halfpel resolution, Changes: ==================================================================*/ /* One component is half-pel */ int AVCSAD_MB_HalfPel_Cxhyh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info) { (void)(extra_info); int i, j; int sad = 0; uint8 *kk, *p1, *p2, *p3, *p4; // int sumref=0; int temp; int rx = dmin_rx & 0xFFFF; NUM_SAD_HP_MB_CALL(); p1 = ref; p2 = ref + 1; p3 = ref + rx; p4 = ref + rx + 1; kk = blk; for (i = 0; i < 16; i++) { for (j = 0; j < 16; j++) { temp = ((p1[j] + p2[j] + p3[j] + p4[j] + 2) >> 2) - *kk++; sad += AVC_ABS(temp); } NUM_SAD_HP_MB(); if (sad > (int)((uint32)dmin_rx >> 16)) return sad; p1 += rx; p3 += rx; p2 += rx; p4 += rx; } return sad; } int AVCSAD_MB_HalfPel_Cyh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info) { (void)(extra_info); int i, j; int sad = 0; uint8 *kk, *p1, *p2; // int sumref=0; int temp; int rx = dmin_rx & 0xFFFF; NUM_SAD_HP_MB_CALL(); p1 = ref; p2 = ref + rx; /* either left/right or top/bottom pixel */ kk = blk; for (i = 0; i < 16; i++) { for (j = 0; j < 16; j++) { temp = ((p1[j] + p2[j] + 1) >> 1) - *kk++; sad += AVC_ABS(temp); } NUM_SAD_HP_MB(); if (sad > (int)((uint32)dmin_rx >> 16)) return sad; p1 += rx; p2 += rx; } return sad; } int AVCSAD_MB_HalfPel_Cxh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info) { (void)(extra_info); int i, j; int sad = 0; uint8 *kk, *p1; int temp; int rx = dmin_rx & 0xFFFF; NUM_SAD_HP_MB_CALL(); p1 = ref; kk = blk; for (i = 0; i < 16; i++) { for (j = 0; j < 16; j++) { temp = ((p1[j] + p1[j+1] + 1) >> 1) - *kk++; sad += AVC_ABS(temp); } NUM_SAD_HP_MB(); if (sad > (int)((uint32)dmin_rx >> 16)) return sad; p1 += rx; } return sad; } #ifdef HTFM /* HTFM with uniform subsampling implementation, 2/28/01 */ //Checheck here int AVCAVCSAD_MB_HP_HTFM_Collectxhyh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info) { int i, j; int sad = 0; uint8 *p1, *p2; int rx = dmin_rx & 0xFFFF; int refwx4 = rx << 2; int saddata[16]; /* used when collecting flag (global) is on */ int difmad, tmp, tmp2; int madstar; HTFM_Stat *htfm_stat = (HTFM_Stat*) extra_info; int *abs_dif_mad_avg = &(htfm_stat->abs_dif_mad_avg); UInt *countbreak = &(htfm_stat->countbreak); int *offsetRef = htfm_stat->offsetRef; uint32 cur_word; madstar = (uint32)dmin_rx >> 20; NUM_SAD_HP_MB_CALL(); blk -= 4; for (i = 0; i < 16; i++) /* 16 stages */ { p1 = ref + offsetRef[i]; p2 = p1 + rx; j = 4;/* 4 lines */ do { cur_word = *((uint32*)(blk += 4)); tmp = p1[12] + p2[12]; tmp2 = p1[13] + p2[13]; tmp += tmp2; tmp2 = (cur_word >> 24) & 0xFF; tmp += 2; sad = INTERP2_SUB_SAD(sad, tmp, tmp2);; tmp = p1[8] + p2[8]; tmp2 = p1[9] + p2[9]; tmp += tmp2; tmp2 = (cur_word >> 16) & 0xFF; tmp += 2; sad = INTERP2_SUB_SAD(sad, tmp, tmp2);; tmp = p1[4] + p2[4]; tmp2 = p1[5] + p2[5]; tmp += tmp2; tmp2 = (cur_word >> 8) & 0xFF; tmp += 2; sad = INTERP2_SUB_SAD(sad, tmp, tmp2);; tmp2 = p1[1] + p2[1]; tmp = p1[0] + p2[0]; p1 += refwx4; p2 += refwx4; tmp += tmp2; tmp2 = (cur_word & 0xFF); tmp += 2; sad = INTERP2_SUB_SAD(sad, tmp, tmp2);; } while (--j); NUM_SAD_HP_MB(); saddata[i] = sad; if (i > 0) { if (sad > ((uint32)dmin_rx >> 16)) { difmad = saddata[0] - ((saddata[1] + 1) >> 1); (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad); (*countbreak)++; return sad; } } } difmad = saddata[0] - ((saddata[1] + 1) >> 1); (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad); (*countbreak)++; return sad; } int AVCAVCSAD_MB_HP_HTFM_Collectyh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info) { int i, j; int sad = 0; uint8 *p1, *p2; int rx = dmin_rx & 0xFFFF; int refwx4 = rx << 2; int saddata[16]; /* used when collecting flag (global) is on */ int difmad, tmp, tmp2; int madstar; HTFM_Stat *htfm_stat = (HTFM_Stat*) extra_info; int *abs_dif_mad_avg = &(htfm_stat->abs_dif_mad_avg); UInt *countbreak = &(htfm_stat->countbreak); int *offsetRef = htfm_stat->offsetRef; uint32 cur_word; madstar = (uint32)dmin_rx >> 20; NUM_SAD_HP_MB_CALL(); blk -= 4; for (i = 0; i < 16; i++) /* 16 stages */ { p1 = ref + offsetRef[i]; p2 = p1 + rx; j = 4; do { cur_word = *((uint32*)(blk += 4)); tmp = p1[12]; tmp2 = p2[12]; tmp++; tmp2 += tmp; tmp = (cur_word >> 24) & 0xFF; sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; tmp = p1[8]; tmp2 = p2[8]; tmp++; tmp2 += tmp; tmp = (cur_word >> 16) & 0xFF; sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; tmp = p1[4]; tmp2 = p2[4]; tmp++; tmp2 += tmp; tmp = (cur_word >> 8) & 0xFF; sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; tmp = p1[0]; p1 += refwx4; tmp2 = p2[0]; p2 += refwx4; tmp++; tmp2 += tmp; tmp = (cur_word & 0xFF); sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; } while (--j); NUM_SAD_HP_MB(); saddata[i] = sad; if (i > 0) { if (sad > ((uint32)dmin_rx >> 16)) { difmad = saddata[0] - ((saddata[1] + 1) >> 1); (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad); (*countbreak)++; return sad; } } } difmad = saddata[0] - ((saddata[1] + 1) >> 1); (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad); (*countbreak)++; return sad; } int AVCAVCSAD_MB_HP_HTFM_Collectxh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info) { int i, j; int sad = 0; uint8 *p1; int rx = dmin_rx & 0xFFFF; int refwx4 = rx << 2; int saddata[16]; /* used when collecting flag (global) is on */ int difmad, tmp, tmp2; int madstar; HTFM_Stat *htfm_stat = (HTFM_Stat*) extra_info; int *abs_dif_mad_avg = &(htfm_stat->abs_dif_mad_avg); UInt *countbreak = &(htfm_stat->countbreak); int *offsetRef = htfm_stat->offsetRef; uint32 cur_word; madstar = (uint32)dmin_rx >> 20; NUM_SAD_HP_MB_CALL(); blk -= 4; for (i = 0; i < 16; i++) /* 16 stages */ { p1 = ref + offsetRef[i]; j = 4; /* 4 lines */ do { cur_word = *((uint32*)(blk += 4)); tmp = p1[12]; tmp2 = p1[13]; tmp++; tmp2 += tmp; tmp = (cur_word >> 24) & 0xFF; sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; tmp = p1[8]; tmp2 = p1[9]; tmp++; tmp2 += tmp; tmp = (cur_word >> 16) & 0xFF; sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; tmp = p1[4]; tmp2 = p1[5]; tmp++; tmp2 += tmp; tmp = (cur_word >> 8) & 0xFF; sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; tmp = p1[0]; tmp2 = p1[1]; p1 += refwx4; tmp++; tmp2 += tmp; tmp = (cur_word & 0xFF); sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; } while (--j); NUM_SAD_HP_MB(); saddata[i] = sad; if (i > 0) { if (sad > ((uint32)dmin_rx >> 16)) { difmad = saddata[0] - ((saddata[1] + 1) >> 1); (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad); (*countbreak)++; return sad; } } } difmad = saddata[0] - ((saddata[1] + 1) >> 1); (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad); (*countbreak)++; return sad; } int AVCSAD_MB_HP_HTFMxhyh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info) { int i, j; int sad = 0, tmp, tmp2; uint8 *p1, *p2; int rx = dmin_rx & 0xFFFF; int refwx4 = rx << 2; int sadstar = 0, madstar; int *nrmlz_th = (int*) extra_info; int *offsetRef = nrmlz_th + 32; uint32 cur_word; madstar = (uint32)dmin_rx >> 20; NUM_SAD_HP_MB_CALL(); blk -= 4; for (i = 0; i < 16; i++) /* 16 stages */ { p1 = ref + offsetRef[i]; p2 = p1 + rx; j = 4; /* 4 lines */ do { cur_word = *((uint32*)(blk += 4)); tmp = p1[12] + p2[12]; tmp2 = p1[13] + p2[13]; tmp += tmp2; tmp2 = (cur_word >> 24) & 0xFF; tmp += 2; sad = INTERP2_SUB_SAD(sad, tmp, tmp2);; tmp = p1[8] + p2[8]; tmp2 = p1[9] + p2[9]; tmp += tmp2; tmp2 = (cur_word >> 16) & 0xFF; tmp += 2; sad = INTERP2_SUB_SAD(sad, tmp, tmp2);; tmp = p1[4] + p2[4]; tmp2 = p1[5] + p2[5]; tmp += tmp2; tmp2 = (cur_word >> 8) & 0xFF; tmp += 2; sad = INTERP2_SUB_SAD(sad, tmp, tmp2);; tmp2 = p1[1] + p2[1]; tmp = p1[0] + p2[0]; p1 += refwx4; p2 += refwx4; tmp += tmp2; tmp2 = (cur_word & 0xFF); tmp += 2; sad = INTERP2_SUB_SAD(sad, tmp, tmp2);; } while (--j); NUM_SAD_HP_MB(); sadstar += madstar; if (sad > sadstar - nrmlz_th[i] || sad > ((uint32)dmin_rx >> 16)) { return 65536; } } return sad; } int AVCSAD_MB_HP_HTFMyh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info) { int i, j; int sad = 0, tmp, tmp2; uint8 *p1, *p2; int rx = dmin_rx & 0xFFFF; int refwx4 = rx << 2; int sadstar = 0, madstar; int *nrmlz_th = (int*) extra_info; int *offsetRef = nrmlz_th + 32; uint32 cur_word; madstar = (uint32)dmin_rx >> 20; NUM_SAD_HP_MB_CALL(); blk -= 4; for (i = 0; i < 16; i++) /* 16 stages */ { p1 = ref + offsetRef[i]; p2 = p1 + rx; j = 4; do { cur_word = *((uint32*)(blk += 4)); tmp = p1[12]; tmp2 = p2[12]; tmp++; tmp2 += tmp; tmp = (cur_word >> 24) & 0xFF; sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; tmp = p1[8]; tmp2 = p2[8]; tmp++; tmp2 += tmp; tmp = (cur_word >> 16) & 0xFF; sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; tmp = p1[4]; tmp2 = p2[4]; tmp++; tmp2 += tmp; tmp = (cur_word >> 8) & 0xFF; sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; tmp = p1[0]; p1 += refwx4; tmp2 = p2[0]; p2 += refwx4; tmp++; tmp2 += tmp; tmp = (cur_word & 0xFF); sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; } while (--j); NUM_SAD_HP_MB(); sadstar += madstar; if (sad > sadstar - nrmlz_th[i] || sad > ((uint32)dmin_rx >> 16)) { return 65536; } } return sad; } int AVCSAD_MB_HP_HTFMxh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info) { int i, j; int sad = 0, tmp, tmp2; uint8 *p1; int rx = dmin_rx & 0xFFFF; int refwx4 = rx << 2; int sadstar = 0, madstar; int *nrmlz_th = (int*) extra_info; int *offsetRef = nrmlz_th + 32; uint32 cur_word; madstar = (uint32)dmin_rx >> 20; NUM_SAD_HP_MB_CALL(); blk -= 4; for (i = 0; i < 16; i++) /* 16 stages */ { p1 = ref + offsetRef[i]; j = 4;/* 4 lines */ do { cur_word = *((uint32*)(blk += 4)); tmp = p1[12]; tmp2 = p1[13]; tmp++; tmp2 += tmp; tmp = (cur_word >> 24) & 0xFF; sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; tmp = p1[8]; tmp2 = p1[9]; tmp++; tmp2 += tmp; tmp = (cur_word >> 16) & 0xFF; sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; tmp = p1[4]; tmp2 = p1[5]; tmp++; tmp2 += tmp; tmp = (cur_word >> 8) & 0xFF; sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; tmp = p1[0]; tmp2 = p1[1]; p1 += refwx4; tmp++; tmp2 += tmp; tmp = (cur_word & 0xFF); sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; } while (--j); NUM_SAD_HP_MB(); sadstar += madstar; if (sad > sadstar - nrmlz_th[i] || sad > ((uint32)dmin_rx >> 16)) { return 65536; } } return sad; } #endif /* HTFM */ ================================================ FILE: RtspCamera/jni/avc_h264/enc/src/sad_halfpel_inline.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef _SAD_HALFPEL_INLINE_H_ #define _SAD_HALFPEL_INLINE_H_ #ifdef __cplusplus extern "C" { #endif #if defined(__GNUC__) && defined(__arm__) /* ARM GNU COMPILER */ __inline int32 INTERP1_SUB_SAD(int32 sad, int32 tmp, int32 tmp2) { tmp = (tmp2 >> 1) - tmp; if (tmp > 0) sad += tmp; else sad -= tmp; return sad; } __inline int32 INTERP2_SUB_SAD(int32 sad, int32 tmp, int32 tmp2) { tmp = (tmp >> 2) - tmp2; if (tmp > 0) sad += tmp; else sad -= tmp; return sad; } #elif defined(__CC_ARM) /* only work with arm v5 */ __inline int32 INTERP1_SUB_SAD(int32 sad, int32 tmp, int32 tmp2) { __asm { rsbs tmp, tmp, tmp2, asr #1 ; rsbmi tmp, tmp, #0 ; add sad, sad, tmp ; } return sad; } __inline int32 INTERP2_SUB_SAD(int32 sad, int32 tmp, int32 tmp2) { __asm { rsbs tmp, tmp2, tmp, asr #2 ; rsbmi tmp, tmp, #0 ; add sad, sad, tmp ; } return sad; } #elif defined(__GNUC__) && defined(__arm__) /* ARM GNU COMPILER */ __inline int32 INTERP1_SUB_SAD(int32 sad, int32 tmp, int32 tmp2) { __asm__ volatile("rsbs %1, %1, %2, asr #1\n\trsbmi %1, %1, #0\n\tadd %0, %0, %1": "=r"(sad), "=r"(tmp): "r"(tmp2)); return sad; } __inline int32 INTERP2_SUB_SAD(int32 sad, int32 tmp, int32 tmp2) { __asm__ volatile("rsbs %1, %2, %1, asr #2\n\trsbmi %1, %1, #0\n\tadd %0, %0, %1": "=r"(sad), "=r"(tmp): "r"(tmp2)); return sad; } #endif #ifdef __cplusplus } #endif #endif //_SAD_HALFPEL_INLINE_H_ ================================================ FILE: RtspCamera/jni/avc_h264/enc/src/sad_inline.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef _SAD_INLINE_H_ #define _SAD_INLINE_H_ #ifdef __cplusplus extern "C" { #endif #if defined(__GNUC__) && defined(__arm__) /* ARM GNU COMPILER */ __inline int32 SUB_SAD(int32 sad, int32 tmp, int32 tmp2) { tmp = tmp - tmp2; if (tmp > 0) sad += tmp; else sad -= tmp; return sad; } __inline int32 sad_4pixel(int32 src1, int32 src2, int32 mask) { int32 x7; x7 = src2 ^ src1; /* check odd/even combination */ if ((uint32)src2 >= (uint32)src1) { src1 = src2 - src1; /* subs */ } else { src1 = src1 - src2; } x7 = x7 ^ src1; /* only odd bytes need to add carry */ x7 = mask & ((uint32)x7 >> 1); x7 = (x7 << 8) - x7; src1 = src1 + (x7 >> 7); /* add 0xFF to the negative byte, add back carry */ src1 = src1 ^(x7 >> 7); /* take absolute value of negative byte */ return src1; } #define NUMBER 3 #define SHIFT 24 #include "sad_mb_offset.h" #undef NUMBER #define NUMBER 2 #undef SHIFT #define SHIFT 16 #include "sad_mb_offset.h" #undef NUMBER #define NUMBER 1 #undef SHIFT #define SHIFT 8 #include "sad_mb_offset.h" __inline int32 simd_sad_mb(uint8 *ref, uint8 *blk, int dmin, int lx) { int32 x4, x5, x6, x8, x9, x10, x11, x12, x14; x9 = 0x80808080; /* const. */ x8 = (uint32)ref & 0x3; if (x8 == 3) goto SadMBOffset3; if (x8 == 2) goto SadMBOffset2; if (x8 == 1) goto SadMBOffset1; // x5 = (x4<<8)-x4; /* x5 = x4*255; */ x4 = x5 = 0; x6 = 0xFFFF00FF; ref -= lx; blk -= 16; x8 = 16; LOOP_SAD0: /****** process 8 pixels ******/ x10 = *((uint32*)(ref += lx)); x11 = *((uint32*)(ref + 4)); x12 = *((uint32*)(blk += 16)); x14 = *((uint32*)(blk + 4)); /* process x11 & x14 */ x11 = sad_4pixel(x11, x14, x9); /* process x12 & x10 */ x10 = sad_4pixel(x10, x12, x9); x5 = x5 + x10; /* accumulate low bytes */ x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */ x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */ x5 = x5 + x11; /* accumulate low bytes */ x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */ x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */ /****** process 8 pixels ******/ x10 = *((uint32*)(ref + 8)); x11 = *((uint32*)(ref + 12)); x12 = *((uint32*)(blk + 8)); x14 = *((uint32*)(blk + 12)); /* process x11 & x14 */ x11 = sad_4pixel(x11, x14, x9); /* process x12 & x10 */ x10 = sad_4pixel(x10, x12, x9); x5 = x5 + x10; /* accumulate low bytes */ x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */ x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */ x5 = x5 + x11; /* accumulate low bytes */ x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */ x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */ /****************/ x10 = x5 - (x4 << 8); /* extract low bytes */ x10 = x10 + x4; /* add with high bytes */ x10 = x10 + (x10 << 16); /* add with lower half word */ if ((int)((uint32)x10 >> 16) <= dmin) /* compare with dmin */ { if (--x8) { goto LOOP_SAD0; } } return ((uint32)x10 >> 16); SadMBOffset3: return sad_mb_offset3(ref, blk, lx, dmin); SadMBOffset2: return sad_mb_offset2(ref, blk, lx, dmin); SadMBOffset1: return sad_mb_offset1(ref, blk, lx, dmin); } #elif defined(__CC_ARM) /* only work with arm v5 */ __inline int32 SUB_SAD(int32 sad, int32 tmp, int32 tmp2) { __asm { rsbs tmp, tmp, tmp2 ; rsbmi tmp, tmp, #0 ; add sad, sad, tmp ; } return sad; } __inline int32 sad_4pixel(int32 src1, int32 src2, int32 mask) { int32 x7; __asm { EOR x7, src2, src1; /* check odd/even combination */ SUBS src1, src2, src1; EOR x7, x7, src1; AND x7, mask, x7, lsr #1; ORRCC x7, x7, #0x80000000; RSB x7, x7, x7, lsl #8; ADD src1, src1, x7, asr #7; /* add 0xFF to the negative byte, add back carry */ EOR src1, src1, x7, asr #7; /* take absolute value of negative byte */ } return src1; } __inline int32 sad_4pixelN(int32 src1, int32 src2, int32 mask) { int32 x7; __asm { EOR x7, src2, src1; /* check odd/even combination */ ADDS src1, src2, src1; EOR x7, x7, src1; /* only odd bytes need to add carry */ ANDS x7, mask, x7, rrx; RSB x7, x7, x7, lsl #8; SUB src1, src1, x7, asr #7; /* add 0xFF to the negative byte, add back carry */ EOR src1, src1, x7, asr #7; /* take absolute value of negative byte */ } return src1; } #define sum_accumulate __asm{ SBC x5, x5, x10; /* accumulate low bytes */ \ BIC x10, x6, x10; /* x10 & 0xFF00FF00 */ \ ADD x4, x4, x10,lsr #8; /* accumulate high bytes */ \ SBC x5, x5, x11; /* accumulate low bytes */ \ BIC x11, x6, x11; /* x11 & 0xFF00FF00 */ \ ADD x4, x4, x11,lsr #8; } /* accumulate high bytes */ #define NUMBER 3 #define SHIFT 24 #define INC_X8 0x08000001 #include "sad_mb_offset.h" #undef NUMBER #define NUMBER 2 #undef SHIFT #define SHIFT 16 #undef INC_X8 #define INC_X8 0x10000001 #include "sad_mb_offset.h" #undef NUMBER #define NUMBER 1 #undef SHIFT #define SHIFT 8 #undef INC_X8 #define INC_X8 0x08000001 #include "sad_mb_offset.h" __inline int32 simd_sad_mb(uint8 *ref, uint8 *blk, int dmin, int lx) { int32 x4, x5, x6, x8, x9, x10, x11, x12, x14; x9 = 0x80808080; /* const. */ x4 = x5 = 0; __asm { MOVS x8, ref, lsl #31 ; BHI SadMBOffset3; BCS SadMBOffset2; BMI SadMBOffset1; MVN x6, #0xFF00; } LOOP_SAD0: /****** process 8 pixels ******/ x11 = *((int32*)(ref + 12)); x10 = *((int32*)(ref + 8)); x14 = *((int32*)(blk + 12)); x12 = *((int32*)(blk + 8)); /* process x11 & x14 */ x11 = sad_4pixel(x11, x14, x9); /* process x12 & x10 */ x10 = sad_4pixel(x10, x12, x9); x5 = x5 + x10; /* accumulate low bytes */ x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */ x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */ x5 = x5 + x11; /* accumulate low bytes */ x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */ x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */ __asm { /****** process 8 pixels ******/ LDR x11, [ref, #4]; LDR x10, [ref], lx ; LDR x14, [blk, #4]; LDR x12, [blk], #16 ; } /* process x11 & x14 */ x11 = sad_4pixel(x11, x14, x9); /* process x12 & x10 */ x10 = sad_4pixel(x10, x12, x9); x5 = x5 + x10; /* accumulate low bytes */ x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */ x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */ x5 = x5 + x11; /* accumulate low bytes */ x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */ x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */ /****************/ x10 = x5 - (x4 << 8); /* extract low bytes */ x10 = x10 + x4; /* add with high bytes */ x10 = x10 + (x10 << 16); /* add with lower half word */ __asm { /****************/ RSBS x11, dmin, x10, lsr #16; ADDLSS x8, x8, #0x10000001; BLS LOOP_SAD0; } return ((uint32)x10 >> 16); SadMBOffset3: return sad_mb_offset3(ref, blk, lx, dmin, x8); SadMBOffset2: return sad_mb_offset2(ref, blk, lx, dmin, x8); SadMBOffset1: return sad_mb_offset1(ref, blk, lx, dmin, x8); } #elif defined(__GNUC__) && defined(__arm__) /* ARM GNU COMPILER */ __inline int32 SUB_SAD(int32 sad, int32 tmp, int32 tmp2) { __asm__ volatile("rsbs %1, %1, %2\n\trsbmi %1, %1, #0\n\tadd %0, %0, %1": "=r"(sad): "r"(tmp), "r"(tmp2)); return sad; } __inline int32 sad_4pixel(int32 src1, int32 src2, int32 mask) { int32 x7; __asm__ volatile("EOR %1, %2, %0\n\tSUBS %0, %2, %0\n\tEOR %1, %1, %0\n\tAND %1, %3, %1, lsr #1\n\tORRCC %1, %1, #0x80000000\n\tRSB %1, %1, %1, lsl #8\n\tADD %0, %0, %1, asr #7\n\tEOR %0, %0, %1, asr #7": "=r"(src1), "=&r"(x7): "r"(src2), "r"(mask)); return src1; } __inline int32 sad_4pixelN(int32 src1, int32 src2, int32 mask) { int32 x7; __asm__ volatile("EOR %1, %2, %0\n\tADDS %0, %2, %0\n\tEOR %1, %1, %0\n\tANDS %1, %3, %1, rrx\n\tRSB %1, %1, %1, lsl #8\n\tSUB %0, %0, %1, asr #7\n\tEOR %0, %0, %1, asr #7": "=r"(src1), "=&r"(x7): "r"(src2), "r"(mask)); return src1; } #define sum_accumulate __asm__ volatile("SBC %0, %0, %1\n\tBIC %1, %4, %1\n\tADD %2, %2, %1, lsr #8\n\tSBC %0, %0, %3\n\tBIC %3, %4, %3\n\tADD %2, %2, %3, lsr #8": "=&r" (x5), "=&r" (x10), "=&r" (x4), "=&r" (x11): "r" (x6)); #define NUMBER 3 #define SHIFT 24 #define INC_X8 0x08000001 #include "sad_mb_offset.h" #undef NUMBER #define NUMBER 2 #undef SHIFT #define SHIFT 16 #undef INC_X8 #define INC_X8 0x10000001 #include "sad_mb_offset.h" #undef NUMBER #define NUMBER 1 #undef SHIFT #define SHIFT 8 #undef INC_X8 #define INC_X8 0x08000001 #include "sad_mb_offset.h" __inline int32 simd_sad_mb(uint8 *ref, uint8 *blk, int dmin, int lx) { int32 x4, x5, x6, x8, x9, x10, x11, x12, x14; x9 = 0x80808080; /* const. */ x4 = x5 = 0; x8 = (uint32)ref & 0x3; if (x8 == 3) goto SadMBOffset3; if (x8 == 2) goto SadMBOffset2; if (x8 == 1) goto SadMBOffset1; x8 = 16; /// __asm__ volatile("MVN %0, #0xFF00": "=r"(x6)); LOOP_SAD0: /****** process 8 pixels ******/ x11 = *((int32*)(ref + 12)); x10 = *((int32*)(ref + 8)); x14 = *((int32*)(blk + 12)); x12 = *((int32*)(blk + 8)); /* process x11 & x14 */ x11 = sad_4pixel(x11, x14, x9); /* process x12 & x10 */ x10 = sad_4pixel(x10, x12, x9); x5 = x5 + x10; /* accumulate low bytes */ x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */ x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */ x5 = x5 + x11; /* accumulate low bytes */ x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */ x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */ /****** process 8 pixels ******/ x11 = *((int32*)(ref + 4)); __asm__ volatile("LDR %0, [%1], %2": "=&r"(x10), "=r"(ref): "r"(lx)); //x10 = *((int32*)ref); ref+=lx; x14 = *((int32*)(blk + 4)); __asm__ volatile("LDR %0, [%1], #16": "=&r"(x12), "=r"(blk)); /* process x11 & x14 */ x11 = sad_4pixel(x11, x14, x9); /* process x12 & x10 */ x10 = sad_4pixel(x10, x12, x9); x5 = x5 + x10; /* accumulate low bytes */ x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */ x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */ x5 = x5 + x11; /* accumulate low bytes */ x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */ x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */ /****************/ x10 = x5 - (x4 << 8); /* extract low bytes */ x10 = x10 + x4; /* add with high bytes */ x10 = x10 + (x10 << 16); /* add with lower half word */ /****************/ if (((uint32)x10 >> 16) <= dmin) /* compare with dmin */ { if (--x8) { goto LOOP_SAD0; } } return ((uint32)x10 >> 16); SadMBOffset3: return sad_mb_offset3(ref, blk, lx, dmin); SadMBOffset2: return sad_mb_offset2(ref, blk, lx, dmin); SadMBOffset1: return sad_mb_offset1(ref, blk, lx, dmin); } #endif #ifdef __cplusplus } #endif #endif // _SAD_INLINE_H_ ================================================ FILE: RtspCamera/jni/avc_h264/enc/src/sad_mb_offset.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #if defined(__GNUC__) && defined(__arm__) /* ARM GNU COMPILER */ #if (NUMBER==3) __inline int32 sad_mb_offset3(uint8 *ref, uint8 *blk, int lx, int dmin) #elif (NUMBER==2) __inline int32 sad_mb_offset2(uint8 *ref, uint8 *blk, int lx, int dmin) #elif (NUMBER==1) __inline int32 sad_mb_offset1(uint8 *ref, uint8 *blk, int lx, int dmin) #endif { int32 x4, x5, x6, x8, x9, x10, x11, x12, x14; // x5 = (x4<<8) - x4; x4 = x5 = 0; x6 = 0xFFFF00FF; x9 = 0x80808080; /* const. */ ref -= NUMBER; /* bic ref, ref, #3 */ ref -= lx; blk -= 16; x8 = 16; #if (NUMBER==3) LOOP_SAD3: #elif (NUMBER==2) LOOP_SAD2: #elif (NUMBER==1) LOOP_SAD1: #endif /****** process 8 pixels ******/ x10 = *((uint32*)(ref += lx)); /* D C B A */ x11 = *((uint32*)(ref + 4)); /* H G F E */ x12 = *((uint32*)(ref + 8)); /* L K J I */ x10 = ((uint32)x10 >> SHIFT); /* 0 0 0 D */ x10 = x10 | (x11 << (32 - SHIFT)); /* G F E D */ x11 = ((uint32)x11 >> SHIFT); /* 0 0 0 H */ x11 = x11 | (x12 << (32 - SHIFT)); /* K J I H */ x12 = *((uint32*)(blk += 16)); x14 = *((uint32*)(blk + 4)); /* process x11 & x14 */ x11 = sad_4pixel(x11, x14, x9); /* process x12 & x10 */ x10 = sad_4pixel(x10, x12, x9); x5 = x5 + x10; /* accumulate low bytes */ x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */ x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */ x5 = x5 + x11; /* accumulate low bytes */ x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */ x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */ /****** process 8 pixels ******/ x10 = *((uint32*)(ref + 8)); /* D C B A */ x11 = *((uint32*)(ref + 12)); /* H G F E */ x12 = *((uint32*)(ref + 16)); /* L K J I */ x10 = ((uint32)x10 >> SHIFT); /* mvn x10, x10, lsr #24 = 0xFF 0xFF 0xFF ~D */ x10 = x10 | (x11 << (32 - SHIFT)); /* bic x10, x10, x11, lsl #8 = ~G ~F ~E ~D */ x11 = ((uint32)x11 >> SHIFT); /* 0xFF 0xFF 0xFF ~H */ x11 = x11 | (x12 << (32 - SHIFT)); /* ~K ~J ~I ~H */ x12 = *((uint32*)(blk + 8)); x14 = *((uint32*)(blk + 12)); /* process x11 & x14 */ x11 = sad_4pixel(x11, x14, x9); /* process x12 & x10 */ x10 = sad_4pixel(x10, x12, x9); x5 = x5 + x10; /* accumulate low bytes */ x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */ x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */ x5 = x5 + x11; /* accumulate low bytes */ x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */ x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */ /****************/ x10 = x5 - (x4 << 8); /* extract low bytes */ x10 = x10 + x4; /* add with high bytes */ x10 = x10 + (x10 << 16); /* add with lower half word */ if ((int)((uint32)x10 >> 16) <= dmin) /* compare with dmin */ { if (--x8) { #if (NUMBER==3) goto LOOP_SAD3; #elif (NUMBER==2) goto LOOP_SAD2; #elif (NUMBER==1) goto LOOP_SAD1; #endif } } return ((uint32)x10 >> 16); } #elif defined(__CC_ARM) /* only work with arm v5 */ #if (NUMBER==3) __inline int32 sad_mb_offset3(uint8 *ref, uint8 *blk, int lx, int dmin, int32 x8) #elif (NUMBER==2) __inline int32 sad_mb_offset2(uint8 *ref, uint8 *blk, int lx, int dmin, int32 x8) #elif (NUMBER==1) __inline int32 sad_mb_offset1(uint8 *ref, uint8 *blk, int lx, int dmin, int32 x8) #endif { int32 x4, x5, x6, x9, x10, x11, x12, x14; x9 = 0x80808080; /* const. */ x4 = x5 = 0; __asm{ MVN x6, #0xff0000; #if (NUMBER==3) LOOP_SAD3: #elif (NUMBER==2) LOOP_SAD2: #elif (NUMBER==1) LOOP_SAD1: #endif BIC ref, ref, #3; } /****** process 8 pixels ******/ x11 = *((int32*)(ref + 12)); x12 = *((int32*)(ref + 16)); x10 = *((int32*)(ref + 8)); x14 = *((int32*)(blk + 12)); __asm{ MVN x10, x10, lsr #SHIFT; BIC x10, x10, x11, lsl #(32-SHIFT); MVN x11, x11, lsr #SHIFT; BIC x11, x11, x12, lsl #(32-SHIFT); LDR x12, [blk, #8]; } /* process x11 & x14 */ x11 = sad_4pixelN(x11, x14, x9); /* process x12 & x10 */ x10 = sad_4pixelN(x10, x12, x9); sum_accumulate; __asm{ /****** process 8 pixels ******/ LDR x11, [ref, #4]; LDR x12, [ref, #8]; LDR x10, [ref], lx ; LDR x14, [blk, #4]; MVN x10, x10, lsr #SHIFT; BIC x10, x10, x11, lsl #(32-SHIFT); MVN x11, x11, lsr #SHIFT; BIC x11, x11, x12, lsl #(32-SHIFT); LDR x12, [blk], #16; } /* process x11 & x14 */ x11 = sad_4pixelN(x11, x14, x9); /* process x12 & x10 */ x10 = sad_4pixelN(x10, x12, x9); sum_accumulate; /****************/ x10 = x5 - (x4 << 8); /* extract low bytes */ x10 = x10 + x4; /* add with high bytes */ x10 = x10 + (x10 << 16); /* add with lower half word */ __asm{ RSBS x11, dmin, x10, lsr #16 ADDLSS x8, x8, #INC_X8 #if (NUMBER==3) BLS LOOP_SAD3; #elif (NUMBER==2) BLS LOOP_SAD2; #elif (NUMBER==1) BLS LOOP_SAD1; #endif } return ((uint32)x10 >> 16); } #elif defined(__GNUC__) && defined(__arm__) /* ARM GNU COMPILER */ #if (NUMBER==3) __inline int32 sad_mb_offset3(uint8 *ref, uint8 *blk, int lx, int dmin) #elif (NUMBER==2) __inline int32 sad_mb_offset2(uint8 *ref, uint8 *blk, int lx, int dmin) #elif (NUMBER==1) __inline int32 sad_mb_offset1(uint8 *ref, uint8 *blk, int lx, int dmin) #endif { int32 x4, x5, x6, x8, x9, x10, x11, x12, x14; x9 = 0x80808080; /* const. */ x4 = x5 = 0; x8 = 16; //<<===========******* __asm__ volatile("MVN %0, #0xFF0000": "=r"(x6)); #if (NUMBER==3) LOOP_SAD3: #elif (NUMBER==2) LOOP_SAD2: #elif (NUMBER==1) LOOP_SAD1: #endif __asm__ volatile("BIC %0, %0, #3": "=r"(ref)); /****** process 8 pixels ******/ x11 = *((int32*)(ref + 12)); x12 = *((int32*)(ref + 16)); x10 = *((int32*)(ref + 8)); x14 = *((int32*)(blk + 12)); #if (SHIFT==8) __asm__ volatile("MVN %0, %0, lsr #8\n\tBIC %0, %0, %1,lsl #24\n\tMVN %1, %1,lsr #8\n\tBIC %1, %1, %2,lsl #24": "=&r"(x10), "=&r"(x11): "r"(x12)); #elif (SHIFT==16) __asm__ volatile("MVN %0, %0, lsr #16\n\tBIC %0, %0, %1,lsl #16\n\tMVN %1, %1,lsr #16\n\tBIC %1, %1, %2,lsl #16": "=&r"(x10), "=&r"(x11): "r"(x12)); #elif (SHIFT==24) __asm__ volatile("MVN %0, %0, lsr #24\n\tBIC %0, %0, %1,lsl #8\n\tMVN %1, %1,lsr #24\n\tBIC %1, %1, %2,lsl #8": "=&r"(x10), "=&r"(x11): "r"(x12)); #endif x12 = *((int32*)(blk + 8)); /* process x11 & x14 */ x11 = sad_4pixelN(x11, x14, x9); /* process x12 & x10 */ x10 = sad_4pixelN(x10, x12, x9); sum_accumulate; /****** process 8 pixels ******/ x11 = *((int32*)(ref + 4)); x12 = *((int32*)(ref + 8)); x10 = *((int32*)ref); ref += lx; x14 = *((int32*)(blk + 4)); #if (SHIFT==8) __asm__ volatile("MVN %0, %0, lsr #8\n\tBIC %0, %0, %1,lsl #24\n\tMVN %1, %1,lsr #8\n\tBIC %1, %1, %2,lsl #24": "=&r"(x10), "=&r"(x11): "r"(x12)); #elif (SHIFT==16) __asm__ volatile("MVN %0, %0, lsr #16\n\tBIC %0, %0, %1,lsl #16\n\tMVN %1, %1,lsr #16\n\tBIC %1, %1, %2,lsl #16": "=&r"(x10), "=&r"(x11): "r"(x12)); #elif (SHIFT==24) __asm__ volatile("MVN %0, %0, lsr #24\n\tBIC %0, %0, %1,lsl #8\n\tMVN %1, %1,lsr #24\n\tBIC %1, %1, %2,lsl #8": "=&r"(x10), "=&r"(x11): "r"(x12)); #endif __asm__ volatile("LDR %0, [%1], #16": "=&r"(x12), "=r"(blk)); /* process x11 & x14 */ x11 = sad_4pixelN(x11, x14, x9); /* process x12 & x10 */ x10 = sad_4pixelN(x10, x12, x9); sum_accumulate; /****************/ x10 = x5 - (x4 << 8); /* extract low bytes */ x10 = x10 + x4; /* add with high bytes */ x10 = x10 + (x10 << 16); /* add with lower half word */ if (((uint32)x10 >> 16) <= (uint32)dmin) /* compare with dmin */ { if (--x8) { #if (NUMBER==3) goto LOOP_SAD3; #elif (NUMBER==2) goto LOOP_SAD2; #elif (NUMBER==1) goto LOOP_SAD1; #endif } } return ((uint32)x10 >> 16); } #endif ================================================ FILE: RtspCamera/jni/avc_h264/enc/src/slice.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "oscl_mem.h" #include "avcenc_lib.h" AVCEnc_Status AVCEncodeSlice(AVCEncObject *encvid) { AVCEnc_Status status = AVCENC_SUCCESS; AVCCommonObj *video = encvid->common; AVCPicParamSet *pps = video->currPicParams; AVCSliceHeader *sliceHdr = video->sliceHdr; AVCMacroblock *currMB ; AVCEncBitstream *stream = encvid->bitstream; uint slice_group_id; int CurrMbAddr, slice_type; slice_type = video->slice_type; /* set the first mb in slice */ video->mbNum = CurrMbAddr = sliceHdr->first_mb_in_slice;// * (1+video->MbaffFrameFlag); slice_group_id = video->MbToSliceGroupMap[CurrMbAddr]; video->mb_skip_run = 0; /* while loop , see subclause 7.3.4 */ while (1) { video->mbNum = CurrMbAddr; currMB = video->currMB = &(video->mblock[CurrMbAddr]); currMB->slice_id = video->slice_id; // for deblocking video->mb_x = CurrMbAddr % video->PicWidthInMbs; video->mb_y = CurrMbAddr / video->PicWidthInMbs; /* initialize QP for this MB here*/ /* calculate currMB->QPy */ RCInitMBQP(encvid); /* check the availability of neighboring macroblocks */ InitNeighborAvailability(video, CurrMbAddr); /* Assuming that InitNeighborAvailability has been called prior to this function */ video->intraAvailA = video->intraAvailB = video->intraAvailC = video->intraAvailD = 0; /* this is necessary for all subsequent intra search */ if (!video->currPicParams->constrained_intra_pred_flag) { video->intraAvailA = video->mbAvailA; video->intraAvailB = video->mbAvailB; video->intraAvailC = video->mbAvailC; video->intraAvailD = video->mbAvailD; } else { if (video->mbAvailA) { video->intraAvailA = video->mblock[video->mbAddrA].mb_intra; } if (video->mbAvailB) { video->intraAvailB = video->mblock[video->mbAddrB].mb_intra ; } if (video->mbAvailC) { video->intraAvailC = video->mblock[video->mbAddrC].mb_intra; } if (video->mbAvailD) { video->intraAvailD = video->mblock[video->mbAddrD].mb_intra; } } /* encode_one_macroblock() */ status = EncodeMB(encvid); if (status != AVCENC_SUCCESS) { break; } /* go to next MB */ CurrMbAddr++; while ((uint)video->MbToSliceGroupMap[CurrMbAddr] != slice_group_id && (uint)CurrMbAddr < video->PicSizeInMbs) { CurrMbAddr++; } if ((uint)CurrMbAddr >= video->PicSizeInMbs) { /* end of slice, return, but before that check to see if there are other slices to be encoded. */ encvid->currSliceGroup++; if (encvid->currSliceGroup > (int)pps->num_slice_groups_minus1) /* no more slice group */ { status = AVCENC_PICTURE_READY; break; } else { /* find first_mb_num for the next slice */ CurrMbAddr = 0; while (video->MbToSliceGroupMap[CurrMbAddr] != encvid->currSliceGroup && (uint)CurrMbAddr < video->PicSizeInMbs) { CurrMbAddr++; } if ((uint)CurrMbAddr >= video->PicSizeInMbs) { status = AVCENC_SLICE_EMPTY; /* error, one slice group has no MBs in it */ } video->mbNum = CurrMbAddr; status = AVCENC_SUCCESS; break; } } } if (video->mb_skip_run > 0) { /* write skip_run */ if (slice_type != AVC_I_SLICE && slice_type != AVC_SI_SLICE) { ue_v(stream, video->mb_skip_run); video->mb_skip_run = 0; } else /* shouldn't happen */ { status = AVCENC_FAIL; } } return status; } AVCEnc_Status EncodeMB(AVCEncObject *encvid) { AVCEnc_Status status = AVCENC_SUCCESS; AVCCommonObj *video = encvid->common; AVCPictureData *currPic = video->currPic; AVCFrameIO *currInput = encvid->currInput; AVCMacroblock *currMB = video->currMB; AVCMacroblock *MB_A, *MB_B; AVCEncBitstream *stream = encvid->bitstream; AVCRateControl *rateCtrl = encvid->rateCtrl; uint8 *cur, *curL, *curCb, *curCr; uint8 *orgL, *orgCb, *orgCr, *org4; int CurrMbAddr = video->mbNum; int picPitch = currPic->pitch; int orgPitch = currInput->pitch; int x_position = (video->mb_x << 4); int y_position = (video->mb_y << 4); int offset; int b8, b4, blkidx; AVCResidualType resType; int slice_type; int numcoeff; /* output from residual_block_cavlc */ int cost16, cost8; int num_bits, start_mb_bits, start_text_bits; slice_type = video->slice_type; /* now, point to the reconstructed frame */ offset = y_position * picPitch + x_position; curL = currPic->Sl + offset; orgL = currInput->YCbCr[0] + offset; offset = (offset + x_position) >> 2; curCb = currPic->Scb + offset; curCr = currPic->Scr + offset; orgCb = currInput->YCbCr[1] + offset; orgCr = currInput->YCbCr[2] + offset; if (orgPitch != picPitch) { offset = y_position * (orgPitch - picPitch); orgL += offset; offset >>= 2; orgCb += offset; orgCr += offset; } /******* determine MB prediction mode *******/ if (encvid->intraSearch[CurrMbAddr]) { MBIntraSearch(encvid, CurrMbAddr, curL, picPitch); } /******* This part should be determined somehow ***************/ if (currMB->mbMode == AVC_I_PCM) { /* write down mb_type and PCM data */ /* and copy from currInput to currPic */ status = EncodeIntraPCM(encvid); return status; } /****** for intra prediction, pred is already done *******/ /****** for I4, the recon is ready and Xfrm coefs are ready to be encoded *****/ //RCCalculateMAD(encvid,currMB,orgL,orgPitch); // no need to re-calculate MAD for Intra // not used since totalSAD is used instead /* compute the prediction */ /* output is video->pred_block */ if (!currMB->mb_intra) { AVCMBMotionComp(encvid, video); /* perform prediction and residue calculation */ /* we can do the loop here and call dct_luma */ video->pred_pitch = picPitch; currMB->CBP = 0; cost16 = 0; cur = curL; org4 = orgL; for (b8 = 0; b8 < 4; b8++) { cost8 = 0; for (b4 = 0; b4 < 4; b4++) { blkidx = blkIdx2blkXY[b8][b4]; video->pred_block = cur; numcoeff = dct_luma(encvid, blkidx, cur, org4, &cost8); currMB->nz_coeff[blkidx] = numcoeff; if (numcoeff) { video->cbp4x4 |= (1 << blkidx); currMB->CBP |= (1 << b8); } if (b4&1) { cur += ((picPitch << 2) - 4); org4 += ((orgPitch << 2) - 4); } else { cur += 4; org4 += 4; } } /* move the IDCT part out of dct_luma to accommodate the check for coeff_cost. */ if ((currMB->CBP&(1 << b8)) && (cost8 <= _LUMA_COEFF_COST_)) { cost8 = 0; // reset it currMB->CBP ^= (1 << b8); blkidx = blkIdx2blkXY[b8][0]; currMB->nz_coeff[blkidx] = 0; currMB->nz_coeff[blkidx+1] = 0; currMB->nz_coeff[blkidx+4] = 0; currMB->nz_coeff[blkidx+5] = 0; } cost16 += cost8; if (b8&1) { cur -= 8; org4 -= 8; } else { cur += (8 - (picPitch << 3)); org4 += (8 - (orgPitch << 3)); } } /* after the whole MB, we do another check for coeff_cost */ if ((currMB->CBP&0xF) && (cost16 <= _LUMA_MB_COEFF_COST_)) { currMB->CBP = 0; // reset it to zero oscl_memset(currMB->nz_coeff, 0, sizeof(uint8)*16); } // now we do IDCT MBInterIdct(video, curL, currMB, picPitch); // video->pred_block = video->pred + 256; } else /* Intra prediction */ { encvid->numIntraMB++; if (currMB->mbMode == AVC_I16) /* do prediction for the whole macroblock */ { currMB->CBP = 0; /* get the prediction from encvid->pred_i16 */ dct_luma_16x16(encvid, curL, orgL); } video->pred_block = encvid->pred_ic[currMB->intra_chroma_pred_mode]; } /* chrominance */ /* not need to do anything, the result is in encvid->pred_ic chroma dct must be aware that prediction block can come from either intra or inter. */ dct_chroma(encvid, curCb, orgCb, 0); dct_chroma(encvid, curCr, orgCr, 1); /* 4.1 if there's nothing in there, video->mb_skip_run++ */ /* 4.2 if coded, check if there is a run of skipped MB, encodes it, set video->QPyprev = currMB->QPy; */ /* 5. vlc encode */ /* check for skipped macroblock, INTER only */ if (!currMB->mb_intra) { /* decide whether this MB (for inter MB) should be skipped if there's nothing left. */ if (!currMB->CBP && currMB->NumMbPart == 1 && currMB->QPy == video->QPy) { if (currMB->MBPartPredMode[0][0] == AVC_Pred_L0 && currMB->ref_idx_L0[0] == 0) { MB_A = &video->mblock[video->mbAddrA]; MB_B = &video->mblock[video->mbAddrB]; if (!video->mbAvailA || !video->mbAvailB) { if (currMB->mvL0[0] == 0) /* both mv components are zeros.*/ { currMB->mbMode = AVC_SKIP; video->mvd_l0[0][0][0] = 0; video->mvd_l0[0][0][1] = 0; } } else { if ((MB_A->ref_idx_L0[1] == 0 && MB_A->mvL0[3] == 0) || (MB_B->ref_idx_L0[2] == 0 && MB_B->mvL0[12] == 0)) { if (currMB->mvL0[0] == 0) /* both mv components are zeros.*/ { currMB->mbMode = AVC_SKIP; video->mvd_l0[0][0][0] = 0; video->mvd_l0[0][0][1] = 0; } } else if (video->mvd_l0[0][0][0] == 0 && video->mvd_l0[0][0][1] == 0) { currMB->mbMode = AVC_SKIP; } } } if (currMB->mbMode == AVC_SKIP) { video->mb_skip_run++; /* set parameters */ /* not sure whether we need the followings */ if (slice_type == AVC_P_SLICE) { currMB->mbMode = AVC_SKIP; currMB->MbPartWidth = currMB->MbPartHeight = 16; currMB->MBPartPredMode[0][0] = AVC_Pred_L0; currMB->NumMbPart = 1; currMB->NumSubMbPart[0] = currMB->NumSubMbPart[1] = currMB->NumSubMbPart[2] = currMB->NumSubMbPart[3] = 1; currMB->SubMbPartWidth[0] = currMB->SubMbPartWidth[1] = currMB->SubMbPartWidth[2] = currMB->SubMbPartWidth[3] = currMB->MbPartWidth; currMB->SubMbPartHeight[0] = currMB->SubMbPartHeight[1] = currMB->SubMbPartHeight[2] = currMB->SubMbPartHeight[3] = currMB->MbPartHeight; } else if (slice_type == AVC_B_SLICE) { currMB->mbMode = AVC_SKIP; currMB->MbPartWidth = currMB->MbPartHeight = 8; currMB->MBPartPredMode[0][0] = AVC_Direct; currMB->NumMbPart = -1; } /* for skipped MB, always look at the first entry in RefPicList */ currMB->RefIdx[0] = currMB->RefIdx[1] = currMB->RefIdx[2] = currMB->RefIdx[3] = video->RefPicList0[0]->RefIdx; /* do not return yet, need to do some copies */ } } } /* non-skipped MB */ /************* START ENTROPY CODING *************************/ start_mb_bits = 32 + (encvid->bitstream->write_pos << 3) - encvid->bitstream->bit_left; /* encode mb_type, mb_pred, sub_mb_pred, CBP */ if (slice_type != AVC_I_SLICE && slice_type != AVC_SI_SLICE && currMB->mbMode != AVC_SKIP) { //if(!pps->entropy_coding_mode_flag) ALWAYS true { ue_v(stream, video->mb_skip_run); video->mb_skip_run = 0; } } if (currMB->mbMode != AVC_SKIP) { status = EncodeMBHeader(currMB, encvid); if (status != AVCENC_SUCCESS) { return status; } } start_text_bits = 32 + (encvid->bitstream->write_pos << 3) - encvid->bitstream->bit_left; /**** now decoding part *******/ resType = AVC_Luma; /* DC transform for luma I16 mode */ if (currMB->mbMode == AVC_I16) { /* vlc encode level/run */ status = enc_residual_block(encvid, AVC_Intra16DC, encvid->numcoefdc, currMB); if (status != AVCENC_SUCCESS) { return status; } resType = AVC_Intra16AC; } /* VLC encoding for luma */ for (b8 = 0; b8 < 4; b8++) { if (currMB->CBP&(1 << b8)) { for (b4 = 0; b4 < 4; b4++) { /* vlc encode level/run */ status = enc_residual_block(encvid, resType, (b8 << 2) + b4, currMB); if (status != AVCENC_SUCCESS) { return status; } } } } /* chroma */ if (currMB->CBP & (3 << 4)) /* chroma DC residual present */ { for (b8 = 0; b8 < 2; b8++) /* for iCbCr */ { /* vlc encode level/run */ status = enc_residual_block(encvid, AVC_ChromaDC, encvid->numcoefcdc[b8] + (b8 << 3), currMB); if (status != AVCENC_SUCCESS) { return status; } } } if (currMB->CBP & (2 << 4)) { /* AC part */ for (b8 = 0; b8 < 2; b8++) /* for iCbCr */ { for (b4 = 0; b4 < 4; b4++) /* for each block inside Cb or Cr */ { /* vlc encode level/run */ status = enc_residual_block(encvid, AVC_ChromaAC, 16 + (b8 << 2) + b4, currMB); if (status != AVCENC_SUCCESS) { return status; } } } } num_bits = 32 + (encvid->bitstream->write_pos << 3) - encvid->bitstream->bit_left; RCPostMB(video, rateCtrl, start_text_bits - start_mb_bits, num_bits - start_text_bits); // num_bits -= start_mb_bits; // fprintf(fdebug,"MB #%d: %d bits\n",CurrMbAddr,num_bits); // fclose(fdebug); return status; } /* copy the content from predBlock back to the reconstructed YUV frame */ void Copy_MB(uint8 *curL, uint8 *curCb, uint8 *curCr, uint8 *predBlock, int picPitch) { int j, offset; uint32 *dst, *dst2, *src; dst = (uint32*)curL; src = (uint32*)predBlock; offset = (picPitch - 16) >> 2; for (j = 0; j < 16; j++) { *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; dst += offset; } dst = (uint32*)curCb; dst2 = (uint32*)curCr; offset >>= 1; for (j = 0; j < 8; j++) { *dst++ = *src++; *dst++ = *src++; *dst2++ = *src++; *dst2++ = *src++; dst += offset; dst2 += offset; } return ; } /* encode mb_type, mb_pred, sub_mb_pred, CBP */ /* decide whether this MB (for inter MB) should be skipped */ AVCEnc_Status EncodeMBHeader(AVCMacroblock *currMB, AVCEncObject *encvid) { AVCEnc_Status status = AVCENC_SUCCESS; uint mb_type; AVCCommonObj *video = encvid->common; AVCEncBitstream *stream = encvid->bitstream; if (currMB->CBP > 47) /* chroma CBP is 11 */ { currMB->CBP -= 16; /* remove the 5th bit from the right */ } mb_type = InterpretMBType(currMB, video->slice_type); status = ue_v(stream, mb_type); if (currMB->mbMode == AVC_P8 || currMB->mbMode == AVC_P8ref0) { status = sub_mb_pred(video, currMB, stream); } else { status = mb_pred(video, currMB, stream) ; } if (currMB->mbMode != AVC_I16) { /* decode coded_block_pattern */ status = EncodeCBP(currMB, stream); } /* calculate currMB->mb_qp_delta = currMB->QPy - video->QPyprev */ if (currMB->CBP > 0 || currMB->mbMode == AVC_I16) { status = se_v(stream, currMB->QPy - video->QPy); video->QPy = currMB->QPy; /* = (video->QPyprev + currMB->mb_qp_delta + 52)%52; */ // no need video->QPc = currMB->QPc; } else { if (currMB->QPy != video->QPy) // current QP is not the same as previous QP { /* restore these values */ RCRestoreQP(currMB, video, encvid); } } return status; } /* inputs are mbMode, mb_intra, i16Mode, CBP, NumMbPart, MbPartWidth, MbPartHeight */ uint InterpretMBType(AVCMacroblock *currMB, int slice_type) { int CBP_chrom; int mb_type;// part1, part2, part3; // const static int MapParts2Type[2][3][3]={{{4,8,12},{10,6,14},{16,18,20}}, // {{5,9,13},{11,7,15},{17,19,21}}}; if (currMB->mb_intra) { if (currMB->mbMode == AVC_I4) { mb_type = 0; } else if (currMB->mbMode == AVC_I16) { CBP_chrom = (currMB->CBP & 0x30); if (currMB->CBP&0xF) { currMB->CBP |= 0xF; /* either 0x0 or 0xF */ mb_type = 13; } else { mb_type = 1; } mb_type += (CBP_chrom >> 2) + currMB->i16Mode; } else /* if(currMB->mbMode == AVC_I_PCM) */ { mb_type = 25; } } else { /* P-MB *//* note that the order of the enum AVCMBMode cannot be changed since we use it here. */ mb_type = currMB->mbMode - AVC_P16; } if (slice_type == AVC_P_SLICE) { if (currMB->mb_intra) { mb_type += 5; } } // following codes have not been tested yet, not needed. /* else if(slice_type == AVC_B_SLICE) { if(currMB->mbMode == AVC_BDirect16) { mb_type = 0; } else if(currMB->mbMode == AVC_P16) { mb_type = currMB->MBPartPredMode[0][0] + 1; // 1 or 2 } else if(currMB->mbMode == AVC_P8) { mb_type = 26; } else if(currMB->mbMode == AVC_P8ref0) { mb_type = 27; } else { part1 = currMB->mbMode - AVC_P16x8; part2 = currMB->MBPartPredMode[0][0]; part3 = currMB->MBPartPredMode[1][0]; mb_type = MapParts2Type[part1][part2][part3]; } } if(slice_type == AVC_SI_SLICE) { mb_type++; } */ return (uint)mb_type; } //const static int mbPart2raster[3][4] = {{0,0,0,0},{1,1,0,0},{1,0,1,0}}; /* see subclause 7.3.5.1 */ AVCEnc_Status mb_pred(AVCCommonObj *video, AVCMacroblock *currMB, AVCEncBitstream *stream) { AVCEnc_Status status = AVCENC_SUCCESS; int mbPartIdx; AVCSliceHeader *sliceHdr = video->sliceHdr; int max_ref_idx; uint code; if (currMB->mbMode == AVC_I4 || currMB->mbMode == AVC_I16) { if (currMB->mbMode == AVC_I4) { /* perform prediction to get the actual intra 4x4 pred mode */ EncodeIntra4x4Mode(video, currMB, stream); /* output will be in currMB->i4Mode[4][4] */ } /* assume already set from MBPrediction() */ status = ue_v(stream, currMB->intra_chroma_pred_mode); } else if (currMB->MBPartPredMode[0][0] != AVC_Direct) { oscl_memset(currMB->ref_idx_L0, 0, sizeof(int16)*4); /* see subclause 7.4.5.1 for the range of ref_idx_lX */ max_ref_idx = sliceHdr->num_ref_idx_l0_active_minus1; /* if(video->MbaffFrameFlag && currMB->mb_field_decoding_flag) max_ref_idx = 2*sliceHdr->num_ref_idx_l0_active_minus1 + 1; */ /* decode ref index for L0 */ if (sliceHdr->num_ref_idx_l0_active_minus1 > 0) { for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++) { if (/*(sliceHdr->num_ref_idx_l0_active_minus1>0 || currMB->mb_field_decoding_flag) &&*/ currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L1) { code = currMB->ref_idx_L0[mbPartIdx]; status = te_v(stream, code, max_ref_idx); } } } /* see subclause 7.4.5.1 for the range of ref_idx_lX */ max_ref_idx = sliceHdr->num_ref_idx_l1_active_minus1; /* if(video->MbaffFrameFlag && currMB->mb_field_decoding_flag) max_ref_idx = 2*sliceHdr->num_ref_idx_l1_active_minus1 + 1; */ /* decode ref index for L1 */ if (sliceHdr->num_ref_idx_l1_active_minus1 > 0) { for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++) { if (/*(sliceHdr->num_ref_idx_l1_active_minus1>0 || currMB->mb_field_decoding_flag) &&*/ currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L0) { status = te_v(stream, currMB->ref_idx_L1[mbPartIdx], max_ref_idx); } } } /* encode mvd_l0 */ for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++) { if (currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L1) { status = se_v(stream, video->mvd_l0[mbPartIdx][0][0]); status = se_v(stream, video->mvd_l0[mbPartIdx][0][1]); } } /* encode mvd_l1 */ for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++) { if (currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L0) { status = se_v(stream, video->mvd_l1[mbPartIdx][0][0]); status = se_v(stream, video->mvd_l1[mbPartIdx][0][1]); } } } return status; } /* see subclause 7.3.5.2 */ AVCEnc_Status sub_mb_pred(AVCCommonObj *video, AVCMacroblock *currMB, AVCEncBitstream *stream) { AVCEnc_Status status = AVCENC_SUCCESS; int mbPartIdx, subMbPartIdx; AVCSliceHeader *sliceHdr = video->sliceHdr; uint max_ref_idx; uint slice_type = video->slice_type; uint sub_mb_type[4]; /* this should move somewhere else where we don't have to make this check */ if (currMB->mbMode == AVC_P8ref0) { oscl_memset(currMB->ref_idx_L0, 0, sizeof(int16)*4); } /* we have to check the values to make sure they are valid */ /* assign values to currMB->sub_mb_type[] */ if (slice_type == AVC_P_SLICE) { InterpretSubMBTypeP(currMB, sub_mb_type); } /* no need to check for B-slice else if(slice_type == AVC_B_SLICE) { InterpretSubMBTypeB(currMB,sub_mb_type); }*/ for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++) { status = ue_v(stream, sub_mb_type[mbPartIdx]); } /* see subclause 7.4.5.1 for the range of ref_idx_lX */ max_ref_idx = sliceHdr->num_ref_idx_l0_active_minus1; /* if(video->MbaffFrameFlag && currMB->mb_field_decoding_flag) max_ref_idx = 2*sliceHdr->num_ref_idx_l0_active_minus1 + 1; */ for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++) { if ((sliceHdr->num_ref_idx_l0_active_minus1 > 0 /*|| currMB->mb_field_decoding_flag*/) && currMB->mbMode != AVC_P8ref0 && /*currMB->subMbMode[mbPartIdx]!=AVC_BDirect8 &&*/ currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L1) { status = te_v(stream, currMB->ref_idx_L0[mbPartIdx], max_ref_idx); } /* used in deblocking */ currMB->RefIdx[mbPartIdx] = video->RefPicList0[currMB->ref_idx_L0[mbPartIdx]]->RefIdx; } /* see subclause 7.4.5.1 for the range of ref_idx_lX */ max_ref_idx = sliceHdr->num_ref_idx_l1_active_minus1; /* if(video->MbaffFrameFlag && currMB->mb_field_decoding_flag) max_ref_idx = 2*sliceHdr->num_ref_idx_l1_active_minus1 + 1;*/ if (sliceHdr->num_ref_idx_l1_active_minus1 > 0) { for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++) { if (/*(sliceHdr->num_ref_idx_l1_active_minus1>0 || currMB->mb_field_decoding_flag) &&*/ /*currMB->subMbMode[mbPartIdx]!=AVC_BDirect8 &&*/ currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L0) { status = te_v(stream, currMB->ref_idx_L1[mbPartIdx], max_ref_idx); } } } for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++) { if (/*currMB->subMbMode[mbPartIdx]!=AVC_BDirect8 &&*/ currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L1) { for (subMbPartIdx = 0; subMbPartIdx < currMB->NumSubMbPart[mbPartIdx]; subMbPartIdx++) { status = se_v(stream, video->mvd_l0[mbPartIdx][subMbPartIdx][0]); status = se_v(stream, video->mvd_l0[mbPartIdx][subMbPartIdx][1]); } } } for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++) { if (/*currMB->subMbMode[mbPartIdx]!=AVC_BDirect8 &&*/ currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L0) { for (subMbPartIdx = 0; subMbPartIdx < currMB->NumSubMbPart[mbPartIdx]; subMbPartIdx++) { status = se_v(stream, video->mvd_l1[mbPartIdx][subMbPartIdx][0]); status = se_v(stream, video->mvd_l1[mbPartIdx][subMbPartIdx][1]); } } } return status; } /* input is mblock->sub_mb_type[] */ void InterpretSubMBTypeP(AVCMacroblock *mblock, uint *sub_mb_type) { int i; /* see enum AVCMBType declaration */ /*const static AVCSubMBMode map2subMbMode[4] = {AVC_8x8,AVC_8x4,AVC_4x8,AVC_4x4}; const static int map2subPartWidth[4] = {8,8,4,4}; const static int map2subPartHeight[4] = {8,4,8,4}; const static int map2numSubPart[4] = {1,2,2,4};*/ for (i = 0; i < 4 ; i++) { sub_mb_type[i] = mblock->subMbMode[i] - AVC_8x8; } return ; } void InterpretSubMBTypeB(AVCMacroblock *mblock, uint *sub_mb_type) { int i; /* see enum AVCMBType declaration */ /* const static AVCSubMBMode map2subMbMode[13] = {AVC_BDirect8,AVC_8x8,AVC_8x8, AVC_8x8,AVC_8x4,AVC_4x8,AVC_8x4,AVC_4x8,AVC_8x4,AVC_4x8,AVC_4x4,AVC_4x4,AVC_4x4}; const static int map2subPartWidth[13] = {4,8,8,8,8,4,8,4,8,4,4,4,4}; const static int map2subPartHeight[13] = {4,8,8,8,4,8,4,8,4,8,4,4,4}; const static int map2numSubPart[13] = {4,1,1,1,2,2,2,2,2,2,4,4,4}; const static int map2predMode[13] = {3,0,1,2,0,0,1,1,2,2,0,1,2};*/ for (i = 0; i < 4 ; i++) { if (mblock->subMbMode[i] == AVC_BDirect8) { sub_mb_type[i] = 0; } else if (mblock->subMbMode[i] == AVC_8x8) { sub_mb_type[i] = 1 + mblock->MBPartPredMode[i][0]; } else if (mblock->subMbMode[i] == AVC_4x4) { sub_mb_type[i] = 10 + mblock->MBPartPredMode[i][0]; } else { sub_mb_type[i] = 4 + (mblock->MBPartPredMode[i][0] << 1) + (mblock->subMbMode[i] - AVC_8x4); } } return ; } /* see subclause 8.3.1 */ AVCEnc_Status EncodeIntra4x4Mode(AVCCommonObj *video, AVCMacroblock *currMB, AVCEncBitstream *stream) { int intra4x4PredModeA = 0; int intra4x4PredModeB, predIntra4x4PredMode; int component, SubBlock_indx, block_x, block_y; int dcOnlyPredictionFlag; uint flag; int rem = 0; int mode; int bindx = 0; for (component = 0; component < 4; component++) /* partition index */ { block_x = ((component & 1) << 1); block_y = ((component >> 1) << 1); for (SubBlock_indx = 0; SubBlock_indx < 4; SubBlock_indx++) /* sub-partition index */ { dcOnlyPredictionFlag = 0; if (block_x > 0) { intra4x4PredModeA = currMB->i4Mode[(block_y << 2) + block_x - 1 ]; } else { if (video->intraAvailA) { if (video->mblock[video->mbAddrA].mbMode == AVC_I4) { intra4x4PredModeA = video->mblock[video->mbAddrA].i4Mode[(block_y << 2) + 3]; } else { intra4x4PredModeA = AVC_I4_DC; } } else { dcOnlyPredictionFlag = 1; } } if (block_y > 0) { intra4x4PredModeB = currMB->i4Mode[((block_y-1) << 2) + block_x]; } else { if (video->intraAvailB) { if (video->mblock[video->mbAddrB].mbMode == AVC_I4) { intra4x4PredModeB = video->mblock[video->mbAddrB].i4Mode[(3 << 2) + block_x]; } else { intra4x4PredModeB = AVC_I4_DC; } } else { dcOnlyPredictionFlag = 1; } } if (dcOnlyPredictionFlag) { intra4x4PredModeA = intra4x4PredModeB = AVC_I4_DC; } predIntra4x4PredMode = AVC_MIN(intra4x4PredModeA, intra4x4PredModeB); flag = 0; mode = currMB->i4Mode[(block_y<<2)+block_x]; if (mode == (AVCIntra4x4PredMode)predIntra4x4PredMode) { flag = 1; } else if (mode < predIntra4x4PredMode) { rem = mode; } else { rem = mode - 1; } BitstreamWrite1Bit(stream, flag); if (!flag) { BitstreamWriteBits(stream, 3, rem); } bindx++; block_y += (SubBlock_indx & 1) ; block_x += (1 - 2 * (SubBlock_indx & 1)) ; } } return AVCENC_SUCCESS; } ================================================ FILE: RtspCamera/jni/avc_h264/enc/src/vlc_encode.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "avcenc_lib.h" /** See algorithm in subclause 9.1, Table 9-1, Table 9-2. */ AVCEnc_Status ue_v(AVCEncBitstream *bitstream, uint codeNum) { if (AVCENC_SUCCESS != SetEGBitstring(bitstream, codeNum)) return AVCENC_FAIL; return AVCENC_SUCCESS; } /** See subclause 9.1.1, Table 9-3 */ AVCEnc_Status se_v(AVCEncBitstream *bitstream, int value) { uint codeNum; AVCEnc_Status status; if (value <= 0) { codeNum = -value * 2; } else { codeNum = value * 2 - 1; } status = ue_v(bitstream, codeNum); return status; } AVCEnc_Status te_v(AVCEncBitstream *bitstream, uint value, uint range) { AVCEnc_Status status; if (range > 1) { return ue_v(bitstream, value); } else { status = BitstreamWrite1Bit(bitstream, 1 - value); return status; } } /** See subclause 9.1, Table 9-1, 9-2. */ // compute leadingZeros and inforbits //codeNum = (1<mbMode == AVC_I4) { codeNum = MapCBP2code[currMB->CBP][0]; } else { codeNum = MapCBP2code[currMB->CBP][1]; } status = ue_v(stream, codeNum); return status; } AVCEnc_Status ce_TotalCoeffTrailingOnes(AVCEncBitstream *stream, int TrailingOnes, int TotalCoeff, int nC) { const static uint8 totCoeffTrailOne[3][4][17][2] = { { // 0702 {{1, 1}, {6, 5}, {8, 7}, {9, 7}, {10, 7}, {11, 7}, {13, 15}, {13, 11}, {13, 8}, {14, 15}, {14, 11}, {15, 15}, {15, 11}, {16, 15}, {16, 11}, {16, 7}, {16, 4}}, {{0, 0}, {2, 1}, {6, 4}, {8, 6}, {9, 6}, {10, 6}, {11, 6}, {13, 14}, {13, 10}, {14, 14}, {14, 10}, {15, 14}, {15, 10}, {15, 1}, {16, 14}, {16, 10}, {16, 6}}, {{0, 0}, {0, 0}, {3, 1}, {7, 5}, {8, 5}, {9, 5}, {10, 5}, {11, 5}, {13, 13}, {13, 9}, {14, 13}, {14, 9}, {15, 13}, {15, 9}, {16, 13}, {16, 9}, {16, 5}}, {{0, 0}, {0, 0}, {0, 0}, {5, 3}, {6, 3}, {7, 4}, {8, 4}, {9, 4}, {10, 4}, {11, 4}, {13, 12}, {14, 12}, {14, 8}, {15, 12}, {15, 8}, {16, 12}, {16, 8}}, }, { {{2, 3}, {6, 11}, {6, 7}, {7, 7}, {8, 7}, {8, 4}, {9, 7}, {11, 15}, {11, 11}, {12, 15}, {12, 11}, {12, 8}, {13, 15}, {13, 11}, {13, 7}, {14, 9}, {14, 7}}, {{0, 0}, {2, 2}, {5, 7}, {6, 10}, {6, 6}, {7, 6}, {8, 6}, {9, 6}, {11, 14}, {11, 10}, {12, 14}, {12, 10}, {13, 14}, {13, 10}, {14, 11}, {14, 8}, {14, 6}}, {{0, 0}, {0, 0}, {3, 3}, {6, 9}, {6, 5}, {7, 5}, {8, 5}, {9, 5}, {11, 13}, {11, 9}, {12, 13}, {12, 9}, {13, 13}, {13, 9}, {13, 6}, {14, 10}, {14, 5}}, {{0, 0}, {0, 0}, {0, 0}, {4, 5}, {4, 4}, {5, 6}, {6, 8}, {6, 4}, {7, 4}, {9, 4}, {11, 12}, {11, 8}, {12, 12}, {13, 12}, {13, 8}, {13, 1}, {14, 4}}, }, { {{4, 15}, {6, 15}, {6, 11}, {6, 8}, {7, 15}, {7, 11}, {7, 9}, {7, 8}, {8, 15}, {8, 11}, {9, 15}, {9, 11}, {9, 8}, {10, 13}, {10, 9}, {10, 5}, {10, 1}}, {{0, 0}, {4, 14}, {5, 15}, {5, 12}, {5, 10}, {5, 8}, {6, 14}, {6, 10}, {7, 14}, {8, 14}, {8, 10}, {9, 14}, {9, 10}, {9, 7}, {10, 12}, {10, 8}, {10, 4}}, {{0, 0}, {0, 0}, {4, 13}, {5, 14}, {5, 11}, {5, 9}, {6, 13}, {6, 9}, {7, 13}, {7, 10}, {8, 13}, {8, 9}, {9, 13}, {9, 9}, {10, 11}, {10, 7}, {10, 3}}, {{0, 0}, {0, 0}, {0, 0}, {4, 12}, {4, 11}, {4, 10}, {4, 9}, {4, 8}, {5, 13}, {6, 12}, {7, 12}, {8, 12}, {8, 8}, {9, 12}, {10, 10}, {10, 6}, {10, 2}} } }; AVCEnc_Status status = AVCENC_SUCCESS; uint code, len; int vlcnum; if (TrailingOnes > 3) { return AVCENC_TRAILINGONES_FAIL; } if (nC >= 8) { if (TotalCoeff) { code = ((TotalCoeff - 1) << 2) | (TrailingOnes); } else { code = 3; } status = BitstreamWriteBits(stream, 6, code); } else { if (nC < 2) { vlcnum = 0; } else if (nC < 4) { vlcnum = 1; } else { vlcnum = 2; } len = totCoeffTrailOne[vlcnum][TrailingOnes][TotalCoeff][0]; code = totCoeffTrailOne[vlcnum][TrailingOnes][TotalCoeff][1]; status = BitstreamWriteBits(stream, len, code); } return status; } AVCEnc_Status ce_TotalCoeffTrailingOnesChromaDC(AVCEncBitstream *stream, int TrailingOnes, int TotalCoeff) { const static uint8 totCoeffTrailOneChrom[4][5][2] = { { {2, 1}, {6, 7}, {6, 4}, {6, 3}, {6, 2}}, { {0, 0}, {1, 1}, {6, 6}, {7, 3}, {8, 3}}, { {0, 0}, {0, 0}, {3, 1}, {7, 2}, {8, 2}}, { {0, 0}, {0, 0}, {0, 0}, {6, 5}, {7, 0}}, }; AVCEnc_Status status = AVCENC_SUCCESS; uint code, len; len = totCoeffTrailOneChrom[TrailingOnes][TotalCoeff][0]; code = totCoeffTrailOneChrom[TrailingOnes][TotalCoeff][1]; status = BitstreamWriteBits(stream, len, code); return status; } /* see Table 9-7 and 9-8 */ AVCEnc_Status ce_TotalZeros(AVCEncBitstream *stream, int total_zeros, int TotalCoeff) { const static uint8 lenTotalZeros[15][16] = { { 1, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 9}, { 3, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6, 6, 6}, { 4, 3, 3, 3, 4, 4, 3, 3, 4, 5, 5, 6, 5, 6}, { 5, 3, 4, 4, 3, 3, 3, 4, 3, 4, 5, 5, 5}, { 4, 4, 4, 3, 3, 3, 3, 3, 4, 5, 4, 5}, { 6, 5, 3, 3, 3, 3, 3, 3, 4, 3, 6}, { 6, 5, 3, 3, 3, 2, 3, 4, 3, 6}, { 6, 4, 5, 3, 2, 2, 3, 3, 6}, { 6, 6, 4, 2, 2, 3, 2, 5}, { 5, 5, 3, 2, 2, 2, 4}, { 4, 4, 3, 3, 1, 3}, { 4, 4, 2, 1, 3}, { 3, 3, 1, 2}, { 2, 2, 1}, { 1, 1}, }; const static uint8 codTotalZeros[15][16] = { {1, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 1}, {7, 6, 5, 4, 3, 5, 4, 3, 2, 3, 2, 3, 2, 1, 0}, {5, 7, 6, 5, 4, 3, 4, 3, 2, 3, 2, 1, 1, 0}, {3, 7, 5, 4, 6, 5, 4, 3, 3, 2, 2, 1, 0}, {5, 4, 3, 7, 6, 5, 4, 3, 2, 1, 1, 0}, {1, 1, 7, 6, 5, 4, 3, 2, 1, 1, 0}, {1, 1, 5, 4, 3, 3, 2, 1, 1, 0}, {1, 1, 1, 3, 3, 2, 2, 1, 0}, {1, 0, 1, 3, 2, 1, 1, 1, }, {1, 0, 1, 3, 2, 1, 1, }, {0, 1, 1, 2, 1, 3}, {0, 1, 1, 1, 1}, {0, 1, 1, 1}, {0, 1, 1}, {0, 1}, }; int len, code; AVCEnc_Status status; len = lenTotalZeros[TotalCoeff-1][total_zeros]; code = codTotalZeros[TotalCoeff-1][total_zeros]; status = BitstreamWriteBits(stream, len, code); return status; } /* see Table 9-9 */ AVCEnc_Status ce_TotalZerosChromaDC(AVCEncBitstream *stream, int total_zeros, int TotalCoeff) { const static uint8 lenTotalZerosChromaDC[3][4] = { { 1, 2, 3, 3, }, { 1, 2, 2, 0, }, { 1, 1, 0, 0, }, }; const static uint8 codTotalZerosChromaDC[3][4] = { { 1, 1, 1, 0, }, { 1, 1, 0, 0, }, { 1, 0, 0, 0, }, }; int len, code; AVCEnc_Status status; len = lenTotalZerosChromaDC[TotalCoeff-1][total_zeros]; code = codTotalZerosChromaDC[TotalCoeff-1][total_zeros]; status = BitstreamWriteBits(stream, len, code); return status; } /* see Table 9-10 */ AVCEnc_Status ce_RunBefore(AVCEncBitstream *stream, int run_before, int zerosLeft) { const static uint8 lenRunBefore[7][16] = { {1, 1}, {1, 2, 2}, {2, 2, 2, 2}, {2, 2, 2, 3, 3}, {2, 2, 3, 3, 3, 3}, {2, 3, 3, 3, 3, 3, 3}, {3, 3, 3, 3, 3, 3, 3, 4, 5, 6, 7, 8, 9, 10, 11}, }; const static uint8 codRunBefore[7][16] = { {1, 0}, {1, 1, 0}, {3, 2, 1, 0}, {3, 2, 1, 1, 0}, {3, 2, 3, 2, 1, 0}, {3, 0, 1, 3, 2, 5, 4}, {7, 6, 5, 4, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1}, }; int len, code; AVCEnc_Status status; if (zerosLeft <= 6) { len = lenRunBefore[zerosLeft-1][run_before]; code = codRunBefore[zerosLeft-1][run_before]; } else { len = lenRunBefore[6][run_before]; code = codRunBefore[6][run_before]; } status = BitstreamWriteBits(stream, len, code); return status; } ================================================ FILE: RtspCamera/jni/avc_h264/oscl/oscl_base.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef OSCL_BASE_H_INCLUDED #define OSCL_BASE_H_INCLUDED #include "oscl_config.h" #include "oscl_types.h" #include "oscl_error.h" class OsclBase { public: OSCL_IMPORT_REF static void Init() {}; OSCL_IMPORT_REF static void Cleanup() {}; }; class OsclErrorTrap { public: OSCL_IMPORT_REF static void Init() {}; OSCL_IMPORT_REF static void Cleanup() {}; OSCL_IMPORT_REF static void leave(int) {}; }; class OsclMem { public: OSCL_IMPORT_REF static void Init() {}; OSCL_IMPORT_REF static void Cleanup() {}; }; class OsclRequestStatus { public: OsclRequestStatus(); OsclRequestStatus(int32 aVal) { (void)(aVal); }; int32 operator=(int32 aVal); int32 operator==(int32 aVal) const; int32 operator!=(int32 aVal) const; int32 operator>=(int32 aVal) const; int32 operator<=(int32 aVal) const; int32 operator>(int32 aVal) const; int32 operator<(int32 aVal) const; int32 Int() const; private: int32 iStatus; }; class OsclActiveObject { public: /** * Scheduling priorities. */ enum TPriority { /** A low priority, useful for active objects representing background processing. */ EPriorityIdle = -100, /** A priority higher than EPriorityIdle but lower than EPriorityStandard. */ EPriorityLow = -20, /** Most active objects will have this priority. */ EPriorityStandard = 0, /** A priority higher than EPriorityStandard; useful for active objects handling user input. */ EPriorityUserInput = 10, /** A priority higher than EPriorityUserInput. */ EPriorityHigh = 20 }; /** * Constructor. * @param aPriority (input param): scheduling priority * @param name (inpup param): optional name for this AO. */ OSCL_IMPORT_REF OsclActiveObject(int32 aPriority, const char name[]); /** * Destructor. */ OSCL_IMPORT_REF virtual ~OsclActiveObject(); /** * Set request active for this AO. * Will panic if the request is already active, * or the active object is not added to any scheduler, * or the calling thread context does not match * the scheduler thread. */ OSCL_IMPORT_REF void SetBusy(); /** * Return true if this AO is active, * false otherwise. */ OSCL_IMPORT_REF bool IsBusy() const; /** * Set request active for this AO and set the status to pending. * PendForExec is identical to SetBusy, but it * additionally sets the request status to OSCL_REQUEST_PENDING. * */ OSCL_IMPORT_REF void PendForExec(); /** * Complate the active request for the AO. Can be * called from any thread. * @param aStatus: request completion status. */ OSCL_IMPORT_REF void PendComplete(int32 aStatus); /** * Add this AO to the current thread's scheduler. */ OSCL_IMPORT_REF void AddToScheduler(); /** * Return true if this AO is added to the scheduler, * false otherwise. */ OSCL_IMPORT_REF bool IsAdded() const; /** * Remove this AO from its scheduler. * Will panic if the calling thread context does * not match the scheduling thread. * Cancels any active request before removing. */ OSCL_IMPORT_REF void RemoveFromScheduler(); /** * Deque is identical to RemoveFromScheduler * It's only needed to prevent accidental usage * of Symbian CActive::Deque. */ OSCL_IMPORT_REF void Deque(); /** * Complete this AO's request immediately. * If the AO is already active, this will do nothing. * Will panic if the AO is not acced to any scheduler, * or if the calling thread context does not match the * scheduling thread. */ OSCL_IMPORT_REF void RunIfNotReady(); /** * Cancel any active request. * If the request is active, this will call the DoCancel * routine, wait for the request to cancel, then set the * request inactive. The AO will not run. * If the request is not active, it does nothing. * Request must be canceled from the same thread * in which it is scheduled. */ OSCL_IMPORT_REF void Cancel(); /** * Return scheduling priority of this active object. */ OSCL_IMPORT_REF int32 Priority() const; /** * Request status access */ OSCL_IMPORT_REF int32 Status()const; OSCL_IMPORT_REF void SetStatus(int32); OSCL_IMPORT_REF int32 StatusRef(); protected: /** * Cancel request handler. * This gets called by scheduler when the request * is cancelled. The default routine will complete * the request. If any additional action is needed, * the derived class may override this. If the derived class * does override DoCancel, it must complete the request. */ //OSCL_IMPORT_REF virtual void DoCancel(); /** * Run Error handler. * This gets called by scheduler when the Run routine leaves. * The default implementation simply returns the leave code. * If the derived class wants to handle errors from Run, * it may override this. The RunError should return OsclErrNone * if it handles the error, otherwise it should return the * input error code. * @param aError: the leave code generated by the Run. */ //OSCL_IMPORT_REF virtual int32 RunError(int32 aError); }; class OsclTimerObject { public: /** * Constructor. * @param aPriority (input param): scheduling priority * @param name (input param): optional name for this AO. */ OSCL_IMPORT_REF OsclTimerObject(int32 aPriority, const char name[]); /** * Destructor. */ //OSCL_IMPORT_REF virtual ~OsclTimerObject(); /** * Add this AO to the current thread's scheduler. */ OSCL_IMPORT_REF void AddToScheduler(); /** * Return true if this AO is added to the scheduler, * false otherwise. */ OSCL_IMPORT_REF bool IsAdded() const; /** * Remove this AO from its scheduler. * Will panic if the calling thread context does * not match the scheduling thread. * Cancels any active request before removing. */ OSCL_IMPORT_REF void RemoveFromScheduler(); /** * Deque is identical to RemoveFromScheduler * It's only needed to prevent accidental usage * of Symbian CActive::Deque. */ OSCL_IMPORT_REF void Deque(); /** * 'After' sets the request active, with request status * OSCL_REQUEST_STATUS_PENDING, and starts a timer. * When the timer expires, the request will complete with * status OSCL_REQUEST_ERR_NONE. * Must be called from the same thread in which the * active object is scheduled. * Will panic if the request is already active, the object * is not added to any scheduler, or the calling thread * does not match the scheduling thread. * @param anInterval: timeout interval in microseconds. */ OSCL_IMPORT_REF void After(int32 aDelayMicrosec); /** * Complete the request after a time interval. * RunIfNotReady is identical to After() except that it * first checks the request status, and if it is already * active, it does nothing. * * @param aDelayMicrosec (input param): delay in microseconds. */ OSCL_IMPORT_REF void RunIfNotReady(uint32 aDelayMicrosec = 0); /** * Set request active for this AO. * Will panic if the request is already active, * or the active object is not added to any scheduler, * or the calling thread context does not match * the scheduler thread. */ OSCL_IMPORT_REF void SetBusy(); /** * Return true if this AO is active, * false otherwise. */ OSCL_IMPORT_REF bool IsBusy() const; /** * Cancel any active request. * If the request is active, this will call the DoCancel * routine, wait for the request to cancel, then set the * request inactive. The AO will not run. * If the request is not active, it does nothing. * Request must be canceled from the same thread * in which it is scheduled. */ OSCL_IMPORT_REF void Cancel(); /** * Return scheduling priority of this active object. */ OSCL_IMPORT_REF int32 Priority() const; /** * Request status access */ OSCL_IMPORT_REF int32 Status()const; OSCL_IMPORT_REF void SetStatus(int32); OSCL_IMPORT_REF int32 StatusRef(); protected: /** * Cancel request handler. * This gets called by scheduler when the request * is cancelled. The default routine will cancel * the timer. If any additional action is needed, * the derived class may override this. If the * derived class does override this, it should explicitly * call OsclTimerObject::DoCancel in its own DoCancel * routine. */ //OSCL_IMPORT_REF virtual void DoCancel(); /** * Run Error handler. * This gets called by scheduler when the Run routine leaves. * The default implementation simply returns the leave code. * If the derived class wants to handle errors from Run, * it may override this. The RunError should return OsclErrNone * if it handles the error, otherwise it should return the * input error code. * @param aError: the leave code generated by the Run. */ //OSCL_IMPORT_REF virtual int32 RunError(int32 aError); }; #endif // OSCL_BASE_H_INCLUDED ================================================ FILE: RtspCamera/jni/avc_h264/oscl/oscl_base_macros.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef OSCL_BASE_MACROS_H_INCLUDED #define OSCL_BASE_MACROS_H_INCLUDED #ifndef OSCL_UNUSED_ARG #define OSCL_UNUSED_ARG(x) (void)(x) #endif #endif // OSCL_BASE_MACROS_H_INCLUDED ================================================ FILE: RtspCamera/jni/avc_h264/oscl/oscl_config.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef OSCL_CONFIG_H_INCLUDED #define OSCL_CONFIG_H_INCLUDED #define OSCL_HAS_BREW_SUPPORT 0 //Not yet supported #define OSCL_HAS_SYMBIAN_SUPPORT 0 // Not yet supported #define OSCL_HAS_LINUX_SUPPORT 1 #endif // OSCL_CONFIG_H_INCLUDED ================================================ FILE: RtspCamera/jni/avc_h264/oscl/oscl_dll.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef OSCL_DLL_H_INCLUDED #define OSCL_DLL_H_INCLUDED #define OSCL_DLL_ENTRY_POINT() void oscl_dll_entry_point() {} /** * Default DLL entry/exit point function. * * The body of the DLL entry point is given. The macro * only needs to be declared within the source file. * * Usage : * * OSCL_DLL_ENTRY_POINT_DEFAULT() */ #define OSCL_DLL_ENTRY_POINT_DEFAULT() #endif // OSCL_DLL_H_INCLUDED ================================================ FILE: RtspCamera/jni/avc_h264/oscl/oscl_error.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef OSCL_ERROR_H_INCLUDED #define OSCL_ERROR_H_INCLUDED #define OSCL_LEAVE(x) #endif //OSCL_ERROR_H_INCLUDED ================================================ FILE: RtspCamera/jni/avc_h264/oscl/oscl_error_codes.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef OSCL_ERROR_CODES_H_INCLUDED #define OSCL_ERROR_CODES_H_INCLUDED /** Leave Codes */ typedef int32 OsclLeaveCode; #define OsclErrNone 0 #define OsclErrGeneral 100 #define OsclErrNoMemory 101 #define OsclErrCancelled 102 #define OsclErrNotSupported 103 #define OsclErrArgument 104 #define OsclErrBadHandle 105 #define OsclErrAlreadyExists 106 #define OsclErrBusy 107 #define OsclErrNotReady 108 #define OsclErrCorrupt 109 #define OsclErrTimeout 110 #define OsclErrOverflow 111 #define OsclErrUnderflow 112 #define OsclErrInvalidState 113 #define OsclErrNoResources 114 /** For backward compatibility with old definitions */ #define OSCL_ERR_NONE OsclErrNone #define OSCL_BAD_ALLOC_EXCEPTION_CODE OsclErrNoMemory /** Return Codes */ typedef int32 OsclReturnCode; #define OsclSuccess 0 #define OsclPending 1 #define OsclFailure -1 #endif /*! @} */ ================================================ FILE: RtspCamera/jni/avc_h264/oscl/oscl_exception.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef OSCL_EXCEPTION_H_INCLUDED #define OSCL_EXCEPTION_H_INCLUDED #endif // INCLUDED_OSCL_EXCEPTION_H ================================================ FILE: RtspCamera/jni/avc_h264/oscl/oscl_math.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef OSCL_MATH_H_INCLUDED #define OSCL_MATH_H_INCLUDED #include #define oscl_pow pow #define oscl_exp exp #define oscl_sqrt sqrt #define oscl_log log #define oscl_cos cos #define oscl_sin sin #define oscl_tan tan #define oscl_asin asin #endif // OSCL_MATH_H_INCLUDED ================================================ FILE: RtspCamera/jni/avc_h264/oscl/oscl_mem.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef OSCL_MEM_H_INCLUDED #define OSCL_MEM_H_INCLUDED #include "oscl_types.h" #define OSCLMemSizeT size_t #define oscl_memcpy(dest, src, count) memcpy((void *)(dest), (const void *)(src), (OSCLMemSizeT)(count)) #define oscl_memset(dest, ch, count) memset((void *)(dest), (unsigned char)(ch), (OSCLMemSizeT)(count)) #define oscl_memmove(dest, src, bytecount) memmove((void *)(dest), (const void *)(src), (OSCLMemSizeT)(bytecount)) #define oscl_memcmp(buf1, buf2, count) memcmp( (const void *)(buf1), (const void *)(buf2), (OSCLMemSizeT)(count)) #define oscl_malloc(size) malloc((OSCLMemSizeT)(size)) #define oscl_free(memblock) free((void *)(memblock)) #define OSCL_ARRAY_DELETE(ptr) delete [] ptr #define OSCL_ARRAY_NEW(T, count) new T[count] #define OSCL_DELETE(memblock) delete memblock #define OSCL_NEW(arg) new arg #endif // OSCL_MEM_H_INCLUDED ================================================ FILE: RtspCamera/jni/avc_h264/oscl/oscl_string.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ // -*- c++ -*- // = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = // O S C L_ S T R I N G C L A S S // This file contains a standardized set of string containers that // can be used in place of character arrays. // = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = /*! \addtogroup osclutil OSCL Util * * @{ */ /*! * \file oscl_string.h * \brief Provides a standardized set of string containers that * can be used in place of character arrays. * */ #ifndef OSCL_STRING_H_INCLUDED #define OSCL_STRING_H_INCLUDED #ifndef OSCL_BASE_H_INCLUDED #include "oscl_base.h" #endif #ifndef OSCL_MEM_H_INCLUDED #include "oscl_mem.h" #endif #endif // OSCL_STRING_H_INCLUDED /*! @} */ ================================================ FILE: RtspCamera/jni/avc_h264/oscl/oscl_types.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /*! \file oscl_types.h \brief This file contains basic type definitions for common use across platforms. */ #ifndef OSCL_TYPES_H_INCLUDED #define OSCL_TYPES_H_INCLUDED #include #include #include #include #include #include #define OMX_TRUE 1 #define OMX_FALSE 0 //! A typedef for a signed 8 bit integer. #ifndef int8 typedef signed char int8; #endif //! A typedef for an unsigned 8 bit integer. #ifndef uint8 typedef unsigned char uint8; #endif //! A typedef for a signed 16 bit integer. #ifndef int16 typedef short int16; #endif //! A typedef for an unsigned 16 bit integer. #ifndef uint16 typedef unsigned short uint16; #endif //! A typedef for a signed 32 bit integer. #ifndef int32 typedef long int32; #endif //! A typedef for an unsigned 32 bit integer. #ifndef uint32 typedef unsigned long uint32; #endif #ifndef sint8 typedef signed char sint8; #endif #ifndef OsclFloat typedef float OsclFloat; #endif #ifndef uint typedef unsigned int uint; #endif #ifndef int64 #define OSCL_HAS_NATIVE_INT64_TYPE 1 #define OSCL_NATIVE_INT64_TYPE long long typedef OSCL_NATIVE_INT64_TYPE int64; #endif // int64 #ifndef uint64 #define OSCL_HAS_NATIVE_UINT64_TYPE 1 #define OSCL_NATIVE_UINT64_TYPE unsigned long long typedef OSCL_NATIVE_UINT64_TYPE uint64; #endif // uint64 #ifndef OSCL_UNUSED_ARG #define OSCL_UNUSED_ARG(x) (void)(x) #endif #ifndef OSCL_EXPORT_REF #define OSCL_EXPORT_REF #endif #ifndef OSCL_IMPORT_REF #define OSCL_IMPORT_REF #endif #if defined(OSCL_DISABLE_INLINES) #define OSCL_INLINE #define OSCL_COND_EXPORT_REF OSCL_EXPORT_REF #define OSCL_COND_IMPORT_REF OSCL_IMPORT_REF #else #define OSCL_INLINE inline #define OSCL_COND_IMPORT_REF #define OSCL_COND_IMPORT_REF #endif #ifndef INT64 #define INT64 int64 #endif #ifndef UINT64 #define UINT64 uint64 #endif #ifndef UINT64_HILO #define UINT64_HILO(a,b) ((a<<32) | b) #endif #endif // OSCL_TYPES_H_INCLUDED ================================================ FILE: RtspCamera/jni/avc_h264/oscl/osclconfig_compiler_warnings.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ // -*- c++ -*- // = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = // O S C L C O N F I G _ C O M P I L E R _ W A R N I N G S // = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = /*! \file osclconfig_compiler_warnings.h * \brief This file contains the ability to turn off/on compiler warnings * */ // This macro enables the "#pragma GCC system_header" found in any header file that // includes this config file. // "#pragma GCC system_header" suppresses compiler warnings in the rest of that header // file by treating the header as a system header file. // For instance, foo.h has 30 lines, "#pragma GCC system_header" is inserted at line 10, // from line 11 to the end of file, all compiler warnings are disabled. // However, this does not affect any files that include foo.h. // #ifdef __GNUC__ #define OSCL_DISABLE_GCC_WARNING_SYSTEM_HEADER #endif #define OSCL_FUNCTION_PTR(x) (&x) ================================================ FILE: RtspCamera/jni/m4v_h263/Android.mk ================================================ include $(call all-subdir-makefiles) ================================================ FILE: RtspCamera/jni/m4v_h263/dec/Android.mk ================================================ # # Copyright (C) 2008 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This makefile supplies the rules for building a library of JNI code for # use by our example platform shared library. LOCAL_PATH:= $(call my-dir) include $(CLEAR_VARS) LOCAL_MODULE_TAGS := optional # This is the target being built. LOCAL_MODULE:= libH263Decoder # All of the source files that we will compile. LOCAL_SRC_FILES:= \ src/com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder.cpp \ src/adaptive_smooth_no_mmx.cpp \ src/bitstream.cpp \ src/block_idct.cpp \ src/cal_dc_scaler.cpp \ src/chv_filter.cpp \ src/chvr_filter.cpp \ src/combined_decode.cpp \ src/conceal.cpp \ src/datapart_decode.cpp \ src/dcac_prediction.cpp \ src/dec_pred_intra_dc.cpp \ src/deringing_chroma.cpp \ src/deringing_luma.cpp \ src/find_min_max.cpp \ src/get_pred_adv_b_add.cpp \ src/get_pred_outside.cpp \ src/idct.cpp \ src/idct_vca.cpp \ src/mb_motion_comp.cpp \ src/mb_utils.cpp \ src/pvdec_api.cpp \ src/packet_util.cpp \ src/post_filter.cpp \ src/post_proc_semaphore.cpp \ src/pp_semaphore_chroma_inter.cpp \ src/pp_semaphore_luma.cpp \ src/scaling_tab.cpp \ src/vlc_decode.cpp \ src/vlc_dequant.cpp \ src/vlc_tab.cpp \ src/vop.cpp \ src/zigzag_tab.cpp \ src/yuv2rgb.cpp \ src/3GPVideoParser.cpp # All of the shared libraries we link against. LOCAL_SHARED_LIBRARIES := # No static libraries. LOCAL_STATIC_LIBRARIES := # Also need the JNI headers. LOCAL_C_INCLUDES += \ $(JNI_H_INCLUDE) \ $(LOCAL_PATH)/src \ $(LOCAL_PATH)/include \ $(LOCAL_PATH)/oscl # No specia compiler flags. LOCAL_CFLAGS += # Don't prelink this library. For more efficient code, you may want # to add this library to the prelink map and set this to true. LOCAL_PRELINK_MODULE := false include $(BUILD_SHARED_LIBRARY) ================================================ FILE: RtspCamera/jni/m4v_h263/dec/include/mp4dec_api.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef _MP4DEC_API_H_ #define _MP4DEC_API_H_ #ifndef OSCL_BASE_H_INCLUDED #include "oscl_base.h" #endif #ifndef OSCL_TYPES_H_INCLUDED #include "oscl_types.h" #endif #define PV_TOLERATE_VOL_ERRORS #define PV_MEMORY_POOL #ifndef _PV_TYPES_ #define _PV_TYPES_ typedef uint Bool; #define PV_CODEC_INIT 0 #define PV_CODEC_STOP 1 #endif #define PV_TRUE 1 #define PV_FALSE 0 /* flag for post-processing 4/25/00 */ #ifdef DEC_NOPOSTPROC #undef PV_POSTPROC_ON /* enable compilation of post-processing code */ #else #define PV_POSTPROC_ON #endif #define PV_NO_POST_PROC 0 #define PV_DEBLOCK 1 #define PV_DERING 2 #include "visual_header.h" // struct VolInfo is defined /**@name Structure and Data Types * These type definitions specify the input / output from the PVMessage * library. */ /*@{*/ /* The application has to allocate space for this structure */ typedef struct tagOutputFrame { uint8 *data; /* pointer to output YUV buffer */ uint32 timeStamp; /* time stamp */ } OutputFrame; typedef struct tagApplicationData { int layer; /* current video layer */ void *object; /* some optional data field */ } applicationData; /* Application controls, this structed shall be allocated */ /* and initialized in the application. */ typedef struct tagvideoDecControls { /* The following fucntion pointer is copied to BitstreamDecVideo structure */ /* upon initialization and never used again. */ int (*readBitstreamData)(uint8 *buf, int nbytes_required, void *appData); applicationData appData; uint8 *outputFrame; void *videoDecoderData; /* this is an internal pointer that is only used */ /* in the decoder library. */ #ifdef PV_MEMORY_POOL int32 size; #endif int nLayers; /* pointers to VOL data for frame-based decoding. */ uint8 *volbuf[2]; /* maximum of 2 layers for now */ int32 volbuf_size[2]; } VideoDecControls; typedef enum { H263_MODE = 0, MPEG4_MODE, FLV_MODE, UNKNOWN_MODE } MP4DecodingMode; typedef enum { MP4_I_FRAME, MP4_P_FRAME, MP4_B_FRAME, MP4_BAD_FRAME } MP4FrameType; typedef struct tagVopHeaderInfo { int currLayer; uint32 timestamp; MP4FrameType frameType; int refSelCode; int16 quantizer; } VopHeaderInfo; /*--------------------------------------------------------------------------* * VideoRefCopyInfo: * OMAP DSP specific typedef structure, to support the user (ARM) copying * of a Reference Frame into the Video Decoder. *--------------------------------------------------------------------------*/ typedef struct tagVideoRefCopyInfoPtr { uint8 *yChan; /* The Y component frame the user can copy a new reference to */ uint8 *uChan; /* The U component frame the user can copy a new reference to */ uint8 *vChan; /* The V component frame the user can copy a new reference to */ uint8 *currentVop; /* The Vop for video the user can copy a new reference to */ } VideoRefCopyInfoPtr; typedef struct tagVideoRefCopyInfoData { int16 width; /* Width */ int16 height; /* Height */ int16 realWidth; /* Non-padded width, not a multiple of 16. */ int16 realHeight; /* Non-padded height, not a multiple of 16. */ } VideoRefCopyInfoData; typedef struct tagVideoRefCopyInfo { VideoRefCopyInfoData data; VideoRefCopyInfoPtr ptrs; } VideoRefCopyInfo; /*@}*/ #ifdef __cplusplus extern "C" { #endif OSCL_IMPORT_REF Bool PVInitVideoDecoder(VideoDecControls *decCtrl, uint8 *volbuf[], int32 *volbuf_size, int nLayers, int width, int height, MP4DecodingMode mode); Bool PVAllocVideoData(VideoDecControls *decCtrl, int width, int height, int nLayers); OSCL_IMPORT_REF Bool PVCleanUpVideoDecoder(VideoDecControls *decCtrl); Bool PVResetVideoDecoder(VideoDecControls *decCtrl); OSCL_IMPORT_REF void PVSetReferenceYUV(VideoDecControls *decCtrl, uint8 *refYUV); Bool PVDecSetReference(VideoDecControls *decCtrl, uint8 *refYUV, uint32 timestamp); Bool PVDecSetEnhReference(VideoDecControls *decCtrl, uint8 *refYUV, uint32 timestamp); OSCL_IMPORT_REF Bool PVDecodeVideoFrame(VideoDecControls *decCtrl, uint8 *bitstream[], uint32 *timestamp, int32 *buffer_size, uint use_ext_timestamp[], uint8* currYUV); Bool PVDecodeVopHeader(VideoDecControls *decCtrl, uint8 *buffer[], uint32 timestamp[], int32 buffer_size[], VopHeaderInfo *header_info, uint use_ext_timestamp[], uint8 *currYUV); Bool PVDecodeVopBody(VideoDecControls *decCtrl, int32 buffer_size[]); void PVDecPostProcess(VideoDecControls *decCtrl, uint8 *outputYUV); OSCL_IMPORT_REF void PVGetVideoDimensions(VideoDecControls *decCtrl, int32 *display_width, int32 *display_height); OSCL_IMPORT_REF void PVSetPostProcType(VideoDecControls *decCtrl, int mode); uint32 PVGetVideoTimeStamp(VideoDecControls *decoderControl); int PVGetDecBitrate(VideoDecControls *decCtrl); int PVGetDecFramerate(VideoDecControls *decCtrl); uint8 *PVGetDecOutputFrame(VideoDecControls *decCtrl); int PVGetLayerID(VideoDecControls *decCtrl); int32 PVGetDecMemoryUsage(VideoDecControls *decCtrl); OSCL_IMPORT_REF MP4DecodingMode PVGetDecBitstreamMode(VideoDecControls *decCtrl); Bool PVExtractVolHeader(uint8 *video_buffer, uint8 *vol_header, int32 *vol_header_size); int32 PVLocateFrameHeader(uint8 *video_buffer, int32 vop_size); int32 PVLocateH263FrameHeader(uint8 *video_buffer, int32 vop_size); Bool PVGetVolInfo(VideoDecControls *decCtrl, VolInfo *pVolInfo); // BX 6/24/04 Bool IsIntraFrame(VideoDecControls *decoderControl); #ifdef __cplusplus } #endif #endif /* _MP4DEC_API_H_ */ ================================================ FILE: RtspCamera/jni/m4v_h263/dec/include/pvm4vdecoder.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef PVM4VDECODER_H_INCLUDED #define PVM4VDECODER_H_INCLUDED #ifndef OSCL_BASE_H_INCLUDED #include "oscl_base.h" #endif #ifndef VISUAL_HEADER_H_INCLUDED #include "visual_header.h" #endif #ifndef PVVIDEODECODERINTERFACE_H_INCLUDED #include "pvvideodecoderinterface.h" #endif class PVM4VDecoder : public PVVideoDecoderInterface { public: virtual ~PVM4VDecoder(); static PVM4VDecoder* New(void); /** This is the initialization routine of the MPEG-4 video decoder for decoding an nLayers MPEG-4 bitstream (not used for H.263 and ShortHeader Modes). Video object layer headers for all layers are passed in through the array of buffer, volbuf[]. The size of each header is stored in volbuf_size[]. The iWidth and iHeight fields specify the maximum decoded frame dimensions that should be handled by the decoder for H.263 and ShortHeader Modes (does not have any effect for MPEG-4 Mode). When the initialization routine is completed, for an MPEG-4 input bitstream the display width and display height will be set to iWidth and iHeight, respectively. The mode specifies the elementary bitstream type (0:H.263 and 1:M4V). This function shall be called before any other API's are used. */ virtual bool InitVideoDecoder(uint8 *volbuf[], int32 *volbuf_size, int32 nLayers, int32* iWidth, int32* iHeight, int *mode); /** This function frees all the memory used by the decoder library. */ virtual void CleanUpVideoDecoder(void); /** This function takes the compressed bitstreams of a multiple layer video and decodes the next YUV 4:2:0 frame to be displayed. The application has to allocate memory for the output frame before this function is called, and should be passed into the decoder through yuv parameter. The allocated memory should be WORD aligned. The input bitstream is decoded into this passed YUV buffer. The unpadded (non-multiple of 16) size of the frame can be obtained by calling GetVideoDimensions() api. The input parameter, bitstream[], is an array of buffers that stores the next frames to be decoded from all the layers. The use_ext_timestamp[] parameter tells the decoder to use the externally provided system timestamp (1) (ignoring internal bitstream timestamp) or bitstream (0) timestamp. The buffer_size[] parameter for video layers is updated with the remaining number of bytes in each layer after consuming a frame worth of data from a particular layer. This is useful if multiple frame data is passed into the video decoder at once. The decoder will decode one frame at a time. If there is no data at the time of decoding for layer idx, buffer_size[idx] shall be set to 0, otherwise it shall be set to the number of bytes available. Upon return, this array flags each layer that was used by decoder library. For example, if the buffer of layer idx is used by the library, buffer_size[idx] will be set to 0. The application has to refill the data in this buffer before the decoding of the next frame. Note that the decoder may use more than one layer of the bitstream at the same time (in the case of spatial/SNR scalability). The function returns FALSE (0) if an error has occurred during the decoding process. The decoding operation requires at least 2 frame buffers. It is up to the user to manage the handling of frame buffers. The frames are always decoded into the YUV-buffer that is passed in using the yuv frame pointer parameter. This YUV frame buffer is kept as reference frame for decoding of the next frame. After decoding of the frame following the current frame this buffer can be recycled or freed. */ virtual bool DecodeVideoFrame(uint8 *bitstream[], uint32 *timestamp, int32 *buffer_size, uint *use_ext_timestamp, uint8 *yuv); /** This function sets the reference frame for the decoder. The user should allocate the memory for the reference frame. The size of the reference frame is determined after calling the GetVideoDimensions( ) api. The size should be set to ((display_height + 15)/16)*16 x ((display_width + 15)/16)*16. */ virtual void SetReferenceYUV(uint8 *YUV); /** This function returns the display width and height of the video bitstream. */ virtual void GetVideoDimensions(int32 *display_width, int32 *display_height); /** This function sets the postprocessing type to be used. pp_mode =0 is no postprocessing, pp_mode=1 is deblocking only, pp_mode=3 is deblocking + deringing. */ virtual void SetPostProcType(int32 mode); /** This function returns the timestamp of the most recently decoded video frame. */ virtual uint32 GetVideoTimestamp(void); /** This function is used to get VOL header info.Currently only used to get profile and level info. */ virtual bool GetVolInfo(VolInfo* pVolInfo); /** This function returns profile and level id. */ virtual uint32 GetProfileAndLevel(void); /** This function returns average bitrate. (bits per sec) */ virtual uint32 GetDecBitrate(void); /** This function checks whether the last decoded frame is an INTRA frame or not. */ virtual bool IsIFrame(void); /** This function performs postprocessing on the current decoded frame and writes the postprocessed frame to the *yuv frame. If a separate yuv frame is not used for postprocessed frames NULL pointer can be passed in. In this case the postprocessing is done in an internal yuv frame buffer. The pointer to his buffer can be obtained by the next GetDecOutputFrame( ) api. */ virtual void DecPostProcess(uint8 *YUV); /** This function returns the pointer to the frame to be displayed. */ virtual uint8* GetDecOutputFrame(void); /** This function is not used. */ virtual bool ResetVideoDecoder(void); /** This function is not used. */ virtual void DecSetReference(uint8 *refYUV, uint32 timestamp); /** This function is not used. */ virtual void DecSetEnhReference(uint8 *refYUV, uint32 timestamp); private: PVM4VDecoder(); bool Construct(void); void *iVideoCtrls; }; #endif // PVM4VDECODER_H_INCLUDED ================================================ FILE: RtspCamera/jni/m4v_h263/dec/include/pvm4vdecoder_dpi.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef __PVVIDEODECBASE_H #define __PVVIDEODECBASE_H // includes #include #include #include "mp4dec_api.h" #include "dspmsgproto1.h" #include "dspmsgproto2.h" #include "dspmsgproto4.h" #include "dsp_msg_proto6.h" #include "DspMsgProto7.h" #include "dspmsgproto9.h" #include "dspmsgproto10.h" #include "dspmsgproto11.h" #include "dspmsgproto12.h" #include "dspmsgproto13.h" #include "dspmsgproto14.h" #include "dspmsgproto15.h" #include "dspmsgproto16.h" #include "dspmsgproto17.h" #include "dspmsgproto20.h" #ifndef OSCL_BASE_H_INCLUDED #include "oscl_base.h" #endif #ifndef VISUAL_HEADER_H_INCLUDED #include "visual_header.h" #endif #ifndef PVVIDEODECODERINTERFACE_H_INCLUDED #include "pvvideodecoderinterface.h" #endif #include "pvzcdt.h" #define UChar uint8 #define MAX_LAYERS 1 #define USING_SYNC_2STEP class PVM4VDecoder_DPI : public PVVideoDecoderInterface { public: PVM4VDecoder_DPI(); PVM4VDecoder_DPI(CPVDsp* aDsp); ~PVM4VDecoder_DPI(); static PVM4VDecoder_DPI* New(void); IMPORT_C static PVM4VDecoder_DPI* NewL(CPVDsp* aDsp); IMPORT_C bool InitVideoDecoder(uint8 *volbuf[], int32 *volbuf_size, int32 nLayers, int32* iWidth, int32* iHeight, int *mode); ////// not implemeneted///////////////////////////////////////////////////////////////////////////////////////////////////////////// IMPORT_C bool GetVolInfo(VolInfo* pVolInfo) {}; IMPORT_C void DecPostProcess(uint8 *YUV) {}; IMPORT_C void DecSetEnhReference(uint8 *refYUV, uint32 timestamp) {}; IMPORT_C void SetReferenceYUV(uint8 *YUV) {}; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// IMPORT_C void GetVideoDimensions(int32 *display_width, int32 *display_height) { *display_width = GetVideoWidth(); *display_height = GetVideoHeight(); }; #ifdef USING_SYNC_2STEP IMPORT_C bool getSynchResponse(uint32 timestamp[], int32 buffer_size[]); IMPORT_C bool DSPDecoderBusy(); #endif IMPORT_C int32 GetVideoWidth(void); IMPORT_C int32 GetVideoHeight(void); IMPORT_C int32 DPIFreeVideoDecCtrls(void); IMPORT_C void CleanUpVideoDecoder(void); IMPORT_C bool IsIFrame(void); IMPORT_C void SetPostProcType(int32 aMode); IMPORT_C bool DecodeVideoFrame(uint8 *bitstream[], uint32 *timestamp, int32 *buffer_size, uint *use_ext_ts, uint8 *YUV); IMPORT_C bool DecodeVideoFrame(uint8 *bitstream[], uint32 *timestamp, int32 *buffer_size, uint *use_ext_ts, uint8 *YUV, TRequestStatus *aRequestStatus); IMPORT_C bool DecodeVideoFrameAsyncResp(uint32 timestamp[], int32 buffer_size[]); IMPORT_C bool DecodeStillVideoFrame(uint8 *buffer, int32 buf_size, uint8 *YUV); IMPORT_C bool GetStillVideoFrameSize(uint8 *buffer, int32 buf_size, int32 *width, int32 *height); IMPORT_C uint8* GetDecOutputFrame(void); IMPORT_C void GetDecOutputFrame(uint8*); IMPORT_C uint8* CopyDecOutputFrameToSharedMemBuf(void); IMPORT_C bool ResetVideoDecoder(void); IMPORT_C TDspPointer DPIAllocVideoDecCtrls(void); IMPORT_C uint32 GetVideoTimestamp(void); IMPORT_C uint32 GetProfileAndLevel(void); IMPORT_C uint32 GetDecBitrate(void); // only port the API's used in PVPlayer 2.0 IMPORT_C bool ExtractVolHeader(uint8 *video_buffer, uint8 *vol_header, int32 *vol_header_size); IMPORT_C void DecSetReference(uint8 *refYUV, uint32 timestamp); #if defined USE_PV_TRANSFER_BUFFER IMPORT_C inline RPVTransferBuffer& GetTxTransferBuffer() { return iTxTransferBuffer; } IMPORT_C inline RPVTransferBuffer& GetRxTransferBuffer() { return iRxTransferBuffer; } #else IMPORT_C inline RTransferBuffer& GetTxTransferBuffer() { return iTxTransferBuffer; } IMPORT_C inline RTransferBuffer& GetRxTransferBuffer() { return iRxTransferBuffer; } #endif IMPORT_C inline TInt GetNLayers() { return iNLayers; } IMPORT_C inline void SetNLayers(int32 aNLayers) { iNLayers = aNLayers; } TDspPointer iVideoCtrls; protected: void ConstructL(void); bool Construct(); CPVDsp *iDsp; TDspPointer *iBitstreamDspPointer; int32 iNLayers; CDspMsgProto7 dspMsgProto7; CDspMsgProto15 dspMsgProto15; CDspMsgProto16 dspMsgProto16; private: bool iWaitingBitstream; // the order of object declaration is required to ensure the proper sequence of constructor invocation #if defined USE_PV_TRANSFER_BUFFER RPVTransferBuffer iTxTransferBuffer; RPVTransferWindow iTxTransferWindow; RPVTransferBuffer iRxTransferBuffer; RPVTransferWindow iRxTransferWindow; #else RTransferBuffer iTxTransferBuffer; RTransferWindow iTxTransferWindow; RTransferBuffer iRxTransferBuffer; RTransferWindow iRxTransferWindow; #endif RPVTransferBuffer iVideoTransBuf[MAX_LAYERS]; RPVTransferWindow iVideoTransWin[MAX_LAYERS]; RPVTransferBuffer iVolHeaderTransBuf[MAX_LAYERS]; RPVTransferWindow iVolHeaderTransWin[MAX_LAYERS]; RPVTransferBuffer iDecodeBusyFlagTransBuf; RPVTransferWindow iDecodeBusyFlagTransWin; unsigned short *iDecodeBusyFlagPtr; unsigned char *iVolHeader[MAX_LAYERS]; unsigned char *iVideoBuffer[MAX_LAYERS]; uint iHeight; uint iWidth; uint iBufferHeight; uint iBufferWidth; CDspMsgProto1 dspMsgProto1; CDspMsgProto2 dspMsgProto2; CDspMsgProto4 dspMsgProto4; CDspMsgProto6 dspMsgProto6; // for SBR CDspMsgProto9 dspMsgProto9; CDspMsgProto10 dspMsgProto10; CDspMsgProto11 dspMsgProto11; CDspMsgProto12 dspMsgProto12; CDspMsgProto13 dspMsgProto13; CDspMsgProto14 dspMsgProto14; CDspMsgProto17 dspMsgProto17; CDspMsgProto20 dspMsgProto20; int32 iCurrVideoTimeStamp; uint8* iYuvOutputPtr; }; #endif ================================================ FILE: RtspCamera/jni/m4v_h263/dec/include/pvm4vdecoder_factory.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef PVM4VDECODER_FACTORY_H_INCLUDED #define PVM4VDECODER_FACTORY_H_INCLUDED #ifndef OSCL_BASE_H_INCLUDED #include "oscl_base.h" #endif #ifndef OSCL_MEM_H_INCLUDED #include "oscl_mem.h" #endif class PVVideoDecoderInterface; class PVM4VDecoderFactory { public: /** * Creates an instance of a PVM4VDecoder. If the creation fails, this function will leave. * * @returns A pointer to an instance of PVM4VDecoder as PVVideoDecoderInterface reference or leaves if instantiation fails **/ OSCL_IMPORT_REF static PVVideoDecoderInterface* CreatePVM4VDecoder(void); /** * Deletes an instance of PVM4VDecoder and reclaims all allocated resources. * * @param aVideoDec The PVM4VDecoder instance to be deleted * @returns A status code indicating success or failure of deletion **/ OSCL_IMPORT_REF static bool DeletePVM4VDecoder(PVVideoDecoderInterface* aVideoDec); }; #endif ================================================ FILE: RtspCamera/jni/m4v_h263/dec/include/pvvideodecoderinterface.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef PVVIDEODECODERINTERFACE_H_INCLUDED #define PVVIDEODECODERINTERFACE_H_INCLUDED // includes #ifndef OSCL_BASE_H_INCLUDED #include "oscl_base.h" #endif #ifndef VISUAL_HEADER_H_INCLUDED #include "visual_header.h" #endif #include "oscl_aostatus.h" // PVVideoDecoderInterface pure virtual interface class class PVVideoDecoderInterface { public: virtual ~PVVideoDecoderInterface() {}; virtual bool InitVideoDecoder(uint8 *volbuf[], int32 *volbuf_size, int32 nLayers, int32* iWidth, int32* iHeight, int *mode) = 0; virtual void CleanUpVideoDecoder(void) = 0; virtual bool DecodeVideoFrame(uint8 *bitstream[], uint32 *timestamp, int32 *buffer_size, uint *use_ext_ts, uint8 *yuv) = 0; // decode for dual core asynchronous operation virtual bool DecodeVideoFrame(uint8 *bitstream[], uint32 *timestamp, int32 *buffer_size, uint *use_ext_ts, uint8 *yuv, OsclAOStatus *asynch) { OSCL_UNUSED_ARG(bitstream); OSCL_UNUSED_ARG(timestamp); OSCL_UNUSED_ARG(buffer_size); OSCL_UNUSED_ARG(use_ext_ts); OSCL_UNUSED_ARG(yuv); OSCL_UNUSED_ARG(asynch); return true; }; virtual bool DecodeVideoFrameAsyncResp(uint32 timestamp[], int32 buffer_size[]) { OSCL_UNUSED_ARG(timestamp); OSCL_UNUSED_ARG(buffer_size); return true; }; // virtual uint8* GetDecOutputFrame(void) {}; virtual void GetDecOutputFrame(uint8*) {}; virtual bool getSynchResponse(uint32 timestamp[], int32 buffer_size[]) { OSCL_UNUSED_ARG(timestamp); OSCL_UNUSED_ARG(buffer_size); return true; }; virtual bool DSPDecoderBusy() { return true; }; virtual void SetReferenceYUV(uint8 *YUV) = 0; virtual void GetVideoDimensions(int32 *display_width, int32 *display_height) = 0; virtual void SetPostProcType(int32 mode) = 0; virtual uint32 GetVideoTimestamp(void) = 0; virtual bool GetVolInfo(VolInfo* pVolInfo) = 0; virtual bool IsIFrame(void) = 0; virtual void DecPostProcess(uint8 *YUV) = 0; virtual uint8* GetDecOutputFrame(void) = 0; virtual bool ResetVideoDecoder(void) = 0; virtual void DecSetReference(uint8 *refYUV, uint32 timestamp) = 0; virtual void DecSetEnhReference(uint8 *refYUV, uint32 timestamp) = 0; virtual uint32 GetProfileAndLevel(void) = 0; virtual uint32 GetDecBitrate(void) = 0; // This function returns the average bits per second. }; #endif // PVVIDEODECODERINTERFACE_H_INCLUDED ================================================ FILE: RtspCamera/jni/m4v_h263/dec/include/visual_header.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef _VISUAL_HEADER_H #define _VISUAL_HEADER_H #ifndef _PV_TYPES_ // In order to compile in MDF wrapper #define _PV_TYPES_ typedef uint Bool; #endif // #ifndef _PV_TYPES_ typedef struct tagVolInfo { int32 shortVideoHeader; /* shortVideoHeader mode */ /* Error Resilience Flags */ int32 errorResDisable; /* VOL disable error resilence mode(Use Resynch markers) */ int32 useReverseVLC; /* VOL reversible VLCs */ int32 dataPartitioning; /* VOL data partitioning */ /* Parameters used for scalability */ int32 scalability; /* VOL scalability (flag) */ int32 nbitsTimeIncRes; /* number of bits for time increment () */ int32 profile_level_id; /* profile and level */ } VolInfo; #endif // #ifndef _VISUAL_HEADER_H ================================================ FILE: RtspCamera/jni/m4v_h263/dec/oscl/oscl_base.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef OSCL_BASE_H_INCLUDED #define OSCL_BASE_H_INCLUDED #include "oscl_config.h" #include "oscl_types.h" #include "oscl_error.h" class OsclBase { public: OSCL_IMPORT_REF static void Init() {}; OSCL_IMPORT_REF static void Cleanup() {}; }; class OsclErrorTrap { public: OSCL_IMPORT_REF static void Init() {}; OSCL_IMPORT_REF static void Cleanup() {}; OSCL_IMPORT_REF static void leave(int) {}; }; class OsclMem { public: OSCL_IMPORT_REF static void Init() {}; OSCL_IMPORT_REF static void Cleanup() {}; }; class OsclRequestStatus { public: OsclRequestStatus(); OsclRequestStatus(int32 aVal) { (void)(aVal); }; int32 operator=(int32 aVal); int32 operator==(int32 aVal) const; int32 operator!=(int32 aVal) const; int32 operator>=(int32 aVal) const; int32 operator<=(int32 aVal) const; int32 operator>(int32 aVal) const; int32 operator<(int32 aVal) const; int32 Int() const; private: int32 iStatus; }; class OsclActiveObject { public: /** * Scheduling priorities. */ enum TPriority { /** A low priority, useful for active objects representing background processing. */ EPriorityIdle = -100, /** A priority higher than EPriorityIdle but lower than EPriorityStandard. */ EPriorityLow = -20, /** Most active objects will have this priority. */ EPriorityStandard = 0, /** A priority higher than EPriorityStandard; useful for active objects handling user input. */ EPriorityUserInput = 10, /** A priority higher than EPriorityUserInput. */ EPriorityHigh = 20 }; /** * Constructor. * @param aPriority (input param): scheduling priority * @param name (inpup param): optional name for this AO. */ OSCL_IMPORT_REF OsclActiveObject(int32 aPriority, const char name[]); /** * Destructor. */ OSCL_IMPORT_REF virtual ~OsclActiveObject(); /** * Set request active for this AO. * Will panic if the request is already active, * or the active object is not added to any scheduler, * or the calling thread context does not match * the scheduler thread. */ OSCL_IMPORT_REF void SetBusy(); /** * Return true if this AO is active, * false otherwise. */ OSCL_IMPORT_REF bool IsBusy() const; /** * Set request active for this AO and set the status to pending. * PendForExec is identical to SetBusy, but it * additionally sets the request status to OSCL_REQUEST_PENDING. * */ OSCL_IMPORT_REF void PendForExec(); /** * Complate the active request for the AO. Can be * called from any thread. * @param aStatus: request completion status. */ OSCL_IMPORT_REF void PendComplete(int32 aStatus); /** * Add this AO to the current thread's scheduler. */ OSCL_IMPORT_REF void AddToScheduler(); /** * Return true if this AO is added to the scheduler, * false otherwise. */ OSCL_IMPORT_REF bool IsAdded() const; /** * Remove this AO from its scheduler. * Will panic if the calling thread context does * not match the scheduling thread. * Cancels any active request before removing. */ OSCL_IMPORT_REF void RemoveFromScheduler(); /** * Deque is identical to RemoveFromScheduler * It's only needed to prevent accidental usage * of Symbian CActive::Deque. */ OSCL_IMPORT_REF void Deque(); /** * Complete this AO's request immediately. * If the AO is already active, this will do nothing. * Will panic if the AO is not acced to any scheduler, * or if the calling thread context does not match the * scheduling thread. */ OSCL_IMPORT_REF void RunIfNotReady(); /** * Cancel any active request. * If the request is active, this will call the DoCancel * routine, wait for the request to cancel, then set the * request inactive. The AO will not run. * If the request is not active, it does nothing. * Request must be canceled from the same thread * in which it is scheduled. */ OSCL_IMPORT_REF void Cancel(); /** * Return scheduling priority of this active object. */ OSCL_IMPORT_REF int32 Priority() const; /** * Request status access */ OSCL_IMPORT_REF int32 Status()const; OSCL_IMPORT_REF void SetStatus(int32); OSCL_IMPORT_REF int32 StatusRef(); protected: /** * Cancel request handler. * This gets called by scheduler when the request * is cancelled. The default routine will complete * the request. If any additional action is needed, * the derived class may override this. If the derived class * does override DoCancel, it must complete the request. */ //OSCL_IMPORT_REF virtual void DoCancel(); /** * Run Error handler. * This gets called by scheduler when the Run routine leaves. * The default implementation simply returns the leave code. * If the derived class wants to handle errors from Run, * it may override this. The RunError should return OsclErrNone * if it handles the error, otherwise it should return the * input error code. * @param aError: the leave code generated by the Run. */ //OSCL_IMPORT_REF virtual int32 RunError(int32 aError); }; class OsclTimerObject { public: /** * Constructor. * @param aPriority (input param): scheduling priority * @param name (input param): optional name for this AO. */ OSCL_IMPORT_REF OsclTimerObject(int32 aPriority, const char name[]); /** * Destructor. */ //OSCL_IMPORT_REF virtual ~OsclTimerObject(); /** * Add this AO to the current thread's scheduler. */ OSCL_IMPORT_REF void AddToScheduler(); /** * Return true if this AO is added to the scheduler, * false otherwise. */ OSCL_IMPORT_REF bool IsAdded() const; /** * Remove this AO from its scheduler. * Will panic if the calling thread context does * not match the scheduling thread. * Cancels any active request before removing. */ OSCL_IMPORT_REF void RemoveFromScheduler(); /** * Deque is identical to RemoveFromScheduler * It's only needed to prevent accidental usage * of Symbian CActive::Deque. */ OSCL_IMPORT_REF void Deque(); /** * 'After' sets the request active, with request status * OSCL_REQUEST_STATUS_PENDING, and starts a timer. * When the timer expires, the request will complete with * status OSCL_REQUEST_ERR_NONE. * Must be called from the same thread in which the * active object is scheduled. * Will panic if the request is already active, the object * is not added to any scheduler, or the calling thread * does not match the scheduling thread. * @param anInterval: timeout interval in microseconds. */ OSCL_IMPORT_REF void After(int32 aDelayMicrosec); /** * Complete the request after a time interval. * RunIfNotReady is identical to After() except that it * first checks the request status, and if it is already * active, it does nothing. * * @param aDelayMicrosec (input param): delay in microseconds. */ OSCL_IMPORT_REF void RunIfNotReady(uint32 aDelayMicrosec = 0); /** * Set request active for this AO. * Will panic if the request is already active, * or the active object is not added to any scheduler, * or the calling thread context does not match * the scheduler thread. */ OSCL_IMPORT_REF void SetBusy(); /** * Return true if this AO is active, * false otherwise. */ OSCL_IMPORT_REF bool IsBusy() const; /** * Cancel any active request. * If the request is active, this will call the DoCancel * routine, wait for the request to cancel, then set the * request inactive. The AO will not run. * If the request is not active, it does nothing. * Request must be canceled from the same thread * in which it is scheduled. */ OSCL_IMPORT_REF void Cancel(); /** * Return scheduling priority of this active object. */ OSCL_IMPORT_REF int32 Priority() const; /** * Request status access */ OSCL_IMPORT_REF int32 Status()const; OSCL_IMPORT_REF void SetStatus(int32); OSCL_IMPORT_REF int32 StatusRef(); protected: /** * Cancel request handler. * This gets called by scheduler when the request * is cancelled. The default routine will cancel * the timer. If any additional action is needed, * the derived class may override this. If the * derived class does override this, it should explicitly * call OsclTimerObject::DoCancel in its own DoCancel * routine. */ //OSCL_IMPORT_REF virtual void DoCancel(); /** * Run Error handler. * This gets called by scheduler when the Run routine leaves. * The default implementation simply returns the leave code. * If the derived class wants to handle errors from Run, * it may override this. The RunError should return OsclErrNone * if it handles the error, otherwise it should return the * input error code. * @param aError: the leave code generated by the Run. */ //OSCL_IMPORT_REF virtual int32 RunError(int32 aError); }; #endif // OSCL_BASE_H_INCLUDED ================================================ FILE: RtspCamera/jni/m4v_h263/dec/oscl/oscl_base_macros.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef OSCL_BASE_MACROS_H_INCLUDED #define OSCL_BASE_MACROS_H_INCLUDED #ifndef OSCL_UNUSED_ARG #define OSCL_UNUSED_ARG(x) (void)(x) #endif #endif // OSCL_BASE_MACROS_H_INCLUDED ================================================ FILE: RtspCamera/jni/m4v_h263/dec/oscl/oscl_config.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef OSCL_CONFIG_H_INCLUDED #define OSCL_CONFIG_H_INCLUDED #define OSCL_HAS_BREW_SUPPORT 0 //Not yet supported #define OSCL_HAS_SYMBIAN_SUPPORT 0 // Not yet supported #define OSCL_HAS_LINUX_SUPPORT 1 #endif // OSCL_CONFIG_H_INCLUDED ================================================ FILE: RtspCamera/jni/m4v_h263/dec/oscl/oscl_dll.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef OSCL_DLL_H_INCLUDED #define OSCL_DLL_H_INCLUDED #define OSCL_DLL_ENTRY_POINT() void oscl_dll_entry_point() {} /** * Default DLL entry/exit point function. * * The body of the DLL entry point is given. The macro * only needs to be declared within the source file. * * Usage : * * OSCL_DLL_ENTRY_POINT_DEFAULT() */ #define OSCL_DLL_ENTRY_POINT_DEFAULT() #endif // OSCL_DLL_H_INCLUDED ================================================ FILE: RtspCamera/jni/m4v_h263/dec/oscl/oscl_error.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef OSCL_ERROR_H_INCLUDED #define OSCL_ERROR_H_INCLUDED #define OSCL_LEAVE(x) #endif //OSCL_ERROR_H_INCLUDED ================================================ FILE: RtspCamera/jni/m4v_h263/dec/oscl/oscl_error_codes.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef OSCL_ERROR_CODES_H_INCLUDED #define OSCL_ERROR_CODES_H_INCLUDED /** Leave Codes */ typedef int32 OsclLeaveCode; #define OsclErrNone 0 #define OsclErrGeneral 100 #define OsclErrNoMemory 101 #define OsclErrCancelled 102 #define OsclErrNotSupported 103 #define OsclErrArgument 104 #define OsclErrBadHandle 105 #define OsclErrAlreadyExists 106 #define OsclErrBusy 107 #define OsclErrNotReady 108 #define OsclErrCorrupt 109 #define OsclErrTimeout 110 #define OsclErrOverflow 111 #define OsclErrUnderflow 112 #define OsclErrInvalidState 113 #define OsclErrNoResources 114 /** For backward compatibility with old definitions */ #define OSCL_ERR_NONE OsclErrNone #define OSCL_BAD_ALLOC_EXCEPTION_CODE OsclErrNoMemory /** Return Codes */ typedef int32 OsclReturnCode; #define OsclSuccess 0 #define OsclPending 1 #define OsclFailure -1 #endif /*! @} */ ================================================ FILE: RtspCamera/jni/m4v_h263/dec/oscl/oscl_exception.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef OSCL_EXCEPTION_H_INCLUDED #define OSCL_EXCEPTION_H_INCLUDED #endif // INCLUDED_OSCL_EXCEPTION_H ================================================ FILE: RtspCamera/jni/m4v_h263/dec/oscl/oscl_math.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef OSCL_MATH_H_INCLUDED #define OSCL_MATH_H_INCLUDED #include #define oscl_pow pow #define oscl_exp exp #define oscl_sqrt sqrt #define oscl_log log #define oscl_cos cos #define oscl_sin sin #define oscl_tan tan #define oscl_asin asin #endif // OSCL_MATH_H_INCLUDED ================================================ FILE: RtspCamera/jni/m4v_h263/dec/oscl/oscl_mem.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef OSCL_MEM_H_INCLUDED #define OSCL_MEM_H_INCLUDED #include "oscl_types.h" #define OSCLMemSizeT size_t #define oscl_memcpy(dest, src, count) memcpy((void *)(dest), (const void *)(src), (OSCLMemSizeT)(count)) #define oscl_memset(dest, ch, count) memset((void *)(dest), (unsigned char)(ch), (OSCLMemSizeT)(count)) #define oscl_memmove(dest, src, bytecount) memmove((void *)(dest), (const void *)(src), (OSCLMemSizeT)(bytecount)) #define oscl_memcmp(buf1, buf2, count) memcmp( (const void *)(buf1), (const void *)(buf2), (OSCLMemSizeT)(count)) #define oscl_malloc(size) malloc((OSCLMemSizeT)(size)) #define oscl_free(memblock) free((void *)(memblock)) #define OSCL_ARRAY_DELETE(ptr) delete [] ptr #define OSCL_ARRAY_NEW(T, count) new T[count] #define OSCL_DELETE(memblock) delete memblock #define OSCL_NEW(arg) new arg #endif // OSCL_MEM_H_INCLUDED ================================================ FILE: RtspCamera/jni/m4v_h263/dec/oscl/oscl_types.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /*! \file oscl_types.h \brief This file contains basic type definitions for common use across platforms. */ #ifndef OSCL_TYPES_H_INCLUDED #define OSCL_TYPES_H_INCLUDED #include #include #include #include #include #include //! A typedef for a signed 8 bit integer. #ifndef int8 typedef signed char int8; #endif //! A typedef for an unsigned 8 bit integer. #ifndef uint8 typedef unsigned char uint8; #endif //! A typedef for a signed 16 bit integer. #ifndef int16 typedef short int16; #endif //! A typedef for an unsigned 16 bit integer. #ifndef uint16 typedef unsigned short uint16; #endif //! A typedef for a signed 32 bit integer. #ifndef int32 typedef long int32; #endif //! A typedef for an unsigned 32 bit integer. #ifndef uint32 typedef unsigned long uint32; #endif #ifndef sint8 typedef signed char sint8; #endif #ifndef OsclFloat typedef float OsclFloat; #endif #ifndef uint typedef unsigned int uint; #endif #ifndef int64 #define OSCL_HAS_NATIVE_INT64_TYPE 1 #define OSCL_NATIVE_INT64_TYPE long long typedef OSCL_NATIVE_INT64_TYPE int64; #endif // int64 #ifndef uint64 #define OSCL_HAS_NATIVE_UINT64_TYPE 1 #define OSCL_NATIVE_UINT64_TYPE unsigned long long typedef OSCL_NATIVE_UINT64_TYPE uint64; #endif // uint64 #ifndef OSCL_UNUSED_ARG #define OSCL_UNUSED_ARG(x) (void)(x) #endif #ifndef OSCL_EXPORT_REF #define OSCL_EXPORT_REF #endif #ifndef OSCL_IMPORT_REF #define OSCL_IMPORT_REF #endif #if defined(OSCL_DISABLE_INLINES) #define OSCL_INLINE #define OSCL_COND_EXPORT_REF OSCL_EXPORT_REF #define OSCL_COND_IMPORT_REF OSCL_IMPORT_REF #else #define OSCL_INLINE inline #define OSCL_COND_IMPORT_REF #define OSCL_COND_IMPORT_REF #endif #ifndef INT64 #define INT64 int64 #endif #ifndef UINT64 #define UINT64 uint64 #endif #ifndef UINT64_HILO #define UINT64_HILO(a,b) ((a<<32) | b) #endif #endif // OSCL_TYPES_H_INCLUDED ================================================ FILE: RtspCamera/jni/m4v_h263/dec/oscl/osclconfig_compiler_warnings.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ // -*- c++ -*- // = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = // O S C L C O N F I G _ C O M P I L E R _ W A R N I N G S // = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = /*! \file osclconfig_compiler_warnings.h * \brief This file contains the ability to turn off/on compiler warnings * */ // This macro enables the "#pragma GCC system_header" found in any header file that // includes this config file. // "#pragma GCC system_header" suppresses compiler warnings in the rest of that header // file by treating the header as a system header file. // For instance, foo.h has 30 lines, "#pragma GCC system_header" is inserted at line 10, // from line 11 to the end of file, all compiler warnings are disabled. // However, this does not affect any files that include foo.h. // #ifdef __GNUC__ #define OSCL_DISABLE_GCC_WARNING_SYSTEM_HEADER #endif #define OSCL_FUNCTION_PTR(x) (&x) ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/3GPVideoParser.cpp ================================================ /* * Copyright (C) 2009 OrangeLabs * 3GPVideoParser.cpp * * Created on: 12 août 2009 * Author: rglt1266 */ #define LOG_TAG "3GPPSampleReader" #include #include #include #include #include #include "3GPVideoParser.h" /* Variables */ FILE* f = NULL; // File to parse uint32 TimeScale = 0; // Ticks per second uint32 VideoLength = 0; // Video length (time) uint32 VideoWidth = 0; uint32 VideoHeight = 0; char VideoCodec[5]; // Codec type: d263/mp4v.... uint32 moovAtomPtr = 0; uint32 moovAtomSize = 0; uint32 trakAtomPtr = 0; uint32 trakAtomSize = 0; /* Buffers and pointers*/ uint8* moovBuff = 0; uint8* sttsPtr = 0; uint8* stcoPtr = 0; uint8* stszPtr = 0; uint8* stscPtr = 0; uint8* stsdPtr = 0; Sample* samplePtr = 0; /** * Endien convert */ uint32 EndienConvert (uint32 input){ return ((input & 0xFF) << 24) | ((input & 0xFF00) << 8) | ((uint32)(input & 0xFF0000) >> 8) | ((uint32)(input & 0xFF000000) >> 24); } /** * Get a uint32 value at a precised position in a uint8 buffer */ uint32 getUint32FromUint8Buffer (uint8* buffer,uint32 offset){ return ( ((buffer[offset]<<24)& 0xff000000) | ((buffer[offset+1]<<16)& 0xff0000) | ((buffer[offset+2]<<8)& 0xff00) | ((buffer[offset+3])& 0xff)); } /** * Find a particular value in a uint8 buffer reading uint32 */ int32 findAtom (uint8* buffer,uint32 bufferSize, uint32 valueToFind){ uint32 tmp; uint32 i = 0; for (i=0;i<(bufferSize-4);i++){ tmp = getUint32FromUint8Buffer(buffer,i); if (tmp == valueToFind){ return i-4; } } return VPAtomError; } /** * Find a particular value in a uint32 buffer */ int32 findAtom (uint32* buffer,uint32 bufferSize, uint32 valueToFind){ uint32 i = 0; for (i=0;i<(bufferSize);i++){ if (EndienConvert(buffer[i]) == valueToFind){ return i; } } return VPAtomError; } /** * Cleanup the parser * * @return error code */ int cleanupParser(void){ /* Clean atom info */ free(moovBuff); VideoWidth = 0; VideoHeight = 0; VideoCodec[0] = '\0'; VideoLength = 0; return VPAtomSucces; } /** * Init the parser * * @param filePath path of the file to read * @param width check if the video width is correct * @param heigth check if the video height is correct * @return error code */ int Init3GPVideoParser (char *filePath){ uint32 anAtomSize = 0; uint32 anAtomType = 0; uint32 trakOffset = 0; int32 pos = 0; int32 fileSize; /* Load file */ f = fopen(filePath,"r"); if (f == NULL) { return VPAtomError; } fseek( f, 0L, SEEK_END ); fileSize = ftell( f ); if (fileSize <= 8 ) return VPAtomError; // File is too small ! /* Check if file format is correct ie it's a 3gp file*/ fseek(f,4,SEEK_SET); fread(&anAtomType,sizeof(uint32),1,f); anAtomType = EndienConvert(anAtomType); if (anAtomType != AtomFtyp) return VPAtomError; /* Start parsing from begining*/ rewind (f); // Find Moov Atom while (ftell(f) 0) { int32 trakSize = getUint32FromUint8Buffer(moovBuff,pos); if (findAtom(moovBuff+pos,trakSize,AtomVmhd)){ trakAtomPtr = moovAtomPtr+pos; trakAtomSize = trakSize; break; } else { // This is not the videotrack } // Trying to find new trak pos = findAtom(moovBuff+pos,moovAtomSize-pos,AtomTrak); } if (trakAtomPtr == 0) { return VPAtomError; } trakOffset = trakAtomPtr - moovAtomPtr; // Find MDHD pos = findAtom(moovBuff+trakOffset,trakAtomSize,AtomMdhd); if (pos > 0){ uint8* Ptr = moovBuff + trakOffset + pos + 16; // Skip Atom size and Atom name TimeScale = getUint32FromUint8Buffer(Ptr,4); VideoLength = getUint32FromUint8Buffer(Ptr,8); } else { return VPAtomError; } // Find STTS pos = findAtom(moovBuff+trakOffset,trakAtomSize,AtomStts); if (pos > 0){ sttsPtr = moovBuff + trakOffset + pos + 16; // Skip Atom size and Atom name } else { return VPAtomError; } // Find STSZ pos = findAtom(moovBuff+trakOffset,trakAtomSize,AtomStsz); if (pos > 0){ stszPtr = moovBuff + trakOffset + pos + 20; // Skip Atom size and Atom name } else { return VPAtomError; } // Find STCO pos = findAtom(moovBuff+trakOffset,trakAtomSize,AtomStco); if (pos > 0){ stcoPtr = moovBuff + trakOffset + pos + 16; // Skip Atom size, Atom name, ... } else { return VPAtomError; } // Find STSC pos = findAtom(moovBuff+trakOffset,trakAtomSize,AtomStsc); if (pos > 0){ stscPtr = moovBuff + trakOffset + pos + 16; // Skip Atom size, Atom name, ... } else { return VPAtomError; } // Find STSD pos = findAtom(moovBuff+trakOffset,trakAtomSize,AtomStsd); if (pos > 0){ stsdPtr = moovBuff + trakOffset + pos + 16; // Skip Atom size and Atom name VideoWidth = (getUint32FromUint8Buffer(stsdPtr,32)>>16) & 0xFFFF; VideoHeight = getUint32FromUint8Buffer(stsdPtr,32) & 0xFFFF; VideoCodec[0] = *(stsdPtr+90); VideoCodec[1] = *(stsdPtr+91); VideoCodec[2] = *(stsdPtr+92); VideoCodec[3] = *(stsdPtr+93); VideoCodec[4]= '\0'; } else { return VPAtomError; } /** * Prepare Sample list */ uint32 countChunk = 0; // Total number of chunk uint32 currChunk=0; // Counter for current chunk uint32 currChunkInStsc=0; // Current chunk described in stsc Atom uint32 ChunkAddr = 0; // Current chunk offset uint32 countSample = 0; // Counter for sample in a chunk uint32 currSample = 0; // Counter for current sample (/total sample in file) uint32 SamplePerChunk = 0; // Value sample per chunk uint32 currStscPos = 0; // Current stsc table uint32 Offset = 0; // Offset from ChunkAddr to sample data start int32 currSttsPos = 0; uint32 SameTimestampCount = 0; // For case where n sample have the same timestamp uint32 temp; Sample* currSamplePtr = 0; // Pointer to current Sample Sample* aSample = 0; // Current Sample element bool initList = false; // Boolean changed after first sample is read /* Get "Number of entries" field of stco atom */ countChunk = getUint32FromUint8Buffer(stcoPtr-4,0); /* Init currChunk */ currChunkInStsc = getUint32FromUint8Buffer(stscPtr,currStscPos*12); for (currChunk=0;currChunksize = getUint32FromUint8Buffer(stszPtr,currSample*4); currSample++; /* Get sample addr */ aSample->addr = ChunkAddr + Offset; Offset = Offset + aSample->size; /* Get sample timestamp */ if (SameTimestampCount == 0){ // Read new stts element SameTimestampCount = getUint32FromUint8Buffer(sttsPtr,currSttsPos*8); currSttsPos++; } temp = getUint32FromUint8Buffer(sttsPtr,(currSttsPos-1)*8+4); aSample->timestamp = (uint32)((temp*1000)/TimeScale); SameTimestampCount--; /* Set next to NULL */ aSample->next = NULL; /* Update the sample list */ if (initList == false){ samplePtr = aSample; currSamplePtr = aSample; initList = true; } else { currSamplePtr->next = aSample; currSamplePtr = aSample; currSamplePtr->next = NULL; } } } return VPAtomSucces; } /** * Get Videoframe * * @param aOutBuffer buffer to write the videoframe * @param aBufferSize size of the buffer * @param aTimestamp timestamp * @return error code for overrun buffer */ int getFrame (uint8* aOutBuffer,uint32* aBufferSize, uint32* aTimestamp){ // Temp sample to free data Sample* tmp; if (samplePtr != NULL){ if (aOutBuffer == NULL || f==NULL){ return VPAtomError; } fseek(f,samplePtr->addr,SEEK_SET); if (fread(aOutBuffer,1,samplePtr->size,f) != samplePtr->size){ return VPAtomError; } *aTimestamp = samplePtr->timestamp; *aBufferSize = samplePtr->size; /* Free the sample */ tmp = samplePtr; samplePtr = samplePtr->next; free(tmp); return VPAtomSucces; } else { aOutBuffer = NULL; *aBufferSize = 0; *aTimestamp = 0; return VPAtomError; } } /** * Release file by closing it * * @return error code */ int release(){ if (f != NULL){ fclose(f); } return cleanupParser(); } /** * Get the video duration * * @return video duration in seconds ( last 3 digits are ms) */ uint32 getVideoDuration (){ uint32 retValue = 0; retValue = ((VideoLength/TimeScale)*1000)+(VideoLength%TimeScale); return retValue; } /** * Get the video codec * * @return video codec string */ char* getVideoCodec (){ return VideoCodec; } /** * Get video width * * @return video width */ uint32 getVideoWidth (){ return VideoWidth; } /** * Get the video height * * @return video height */ uint32 getVideoHeight(){ return VideoHeight; } ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/3GPVideoParser.h ================================================ /* * Copyright (C) 2009 OrangeLabs * 3GPVideoParser.h * * Created on: 12 août 2009 * Author: rglt1266 */ #ifndef _3GPVIDEOPARSER_H_ #define _3GPVIDEOPARSER_H_ /* Define new types */ typedef unsigned char uint8; typedef unsigned short uint16; typedef short int16; typedef unsigned long uint32; typedef long int32; #define DEBUG 1; /* Define important atoms 4Bytes code (char)*/ #define AtomFtyp 0x66747970 /* File type compatibility atom */ #define AtomMdat 0x6D646174 /* Movie sample data atom */ #define AtomMoov 0x6D6F6F76 /* Movie ressource metadata atom */ #define AtomMdhd 0x6D646864 /* Video media information header atom */ #define AtomMvhd 0x6D766864 /* Video media information header atom */ #define AtomStts 0x73747473 /* Time-to-sample atom */ #define AtomStco 0x7374636F /* Sample-to-chunck atom */ #define AtomTrak 0x7472616B /* Trak atom */ #define AtomStsz 0x7374737A /* Sample size atom */ #define AtomStsc 0x73747363 /* Nb of sample per chunck */ #define AtomStsd 0x73747364 /* Nb of sample per chunck */ #define AtomVmhd 0x766D6864 /* Identifier of a video track */ /* Define error codes */ #define VPAtomError 0 #define VPAtomSucces 1 typedef struct { uint32 ptr; uint32 size; } Atom; struct sample { uint32 addr; uint32 size; uint32 timestamp; struct sample *next; }; typedef struct sample Sample; int Init3GPVideoParser (char *); int release(); int getFrame (uint8*,uint32*, uint32*); uint32 getVideoDuration(); uint32 getVideoWidth(); uint32 getVideoHeight(); char* getVideoCodec(); #endif /* 3GPVIDEOPARSER_H_ */ ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/adaptive_smooth_no_mmx.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /* Description: Separated modules into one function per file and put into new template. Description: Optimizing C code and adding comments. Also changing variable names to make them more meaningful. Who: Date: Description: ------------------------------------------------------------------------------ INPUT AND OUTPUT DEFINITIONS Inputs: Rec_Y = pointer to 0th position in buffer containing luminance values of type uint8. y_start = value of y coordinate of type int that specifies the first row of pixels to be used in the filter algorithm. x_start = value of x coordinate of type int that specifies the first column of pixels to be used in the filter algorithm. y_blk_start = value of the y coordinate of type int that specifies the row of pixels which contains the start of a block. The row specified by y_blk_start+BLK_SIZE is the last row of pixels that are used in the filter algorithm. x_blk_start = value of the x coordinate of type int that specifies the column of pixels which contains the start of a block. The column specified by x_blk_start+BLK_SIZE is the last column of pixels that are used in the filter algorithm. thr = value of type int that is compared to the elements in Rec_Y to determine if a particular value in Rec_Y will be modified by the filter or not width = value of type int that specifies the width of the display in pixels (or pels, equivalently). max_diff = value of type int that specifies the value that may be added or subtracted from the pixel in Rec_Y that is being filtered if the filter algorithm decides to change that particular pixel's luminance value. Local Stores/Buffers/Pointers Needed: None Global Stores/Buffers/Pointers Needed: None Outputs: None Pointers and Buffers Modified: Buffer pointed to by Rec_Y is modified with the filtered luminance values. Local Stores Modified: None Global Stores Modified: None ------------------------------------------------------------------------------ FUNCTION DESCRIPTION This function implements a motion compensated noise filter using adaptive weighted averaging of luminance values. *Rec_Y contains the luminance values that are being filtered. The picture below depicts a 3x3 group of pixel luminance values. The "u", "c", and "l" stand for "upper", "center" and "lower", respectively. The location of pelc0 is specified by x_start and y_start in the 1-D array "Rec_Y" as follows (assuming x_start=0): location of pelc0 = [(y_start+1) * width] + x_start Moving up or down 1 row (moving from pelu2 to pelc2, for example) is done by incrementing or decrementing "width" elements within Rec_Y. The coordinates of the upper left hand corner of a block (not the group of 9 pixels depicted in the figure below) is specified by (y_blk_start, x_blk_start). The width and height of the block is BLKSIZE. (y_start,x_start) may be specified independently of (y_blk_start, x_blk_start). (y_start,x_start) -----------|-------------------------- | | | | | | X | pelu1 | pelu2 | | pelu0 | | | | | | | -------------------------------------- | | | | | pelc0 | pelc1 | pelc2 | | | | | | | | | -------------------------------------- | | | | | pell0 | pell1 | pell2 | | | | | | | | | -------------------------------------- The filtering of the luminance values is achieved by comparing the 9 luminance values to a threshold value ("thr") and then changing the luminance value of pelc1 if all of the values are above or all of the values are below the threshold. The amount that the luminance value is changed depends on a weighted sum of the 9 luminance values. The position of Pelc1 is then advanced to the right by one (as well as all of the surrounding pixels) and the same calculation is performed again for the luminance value of the new Pelc1. This continues row-wise until pixels in the last row of the block are filtered. ------------------------------------------------------------------------------ REQUIREMENTS None. ------------------------------------------------------------------------------ REFERENCES ..\corelibs\decoder\common\src\post_proc.c ------------------------------------------------------------------------------ PSEUDO-CODE ------------------------------------------------------------------------------ RESOURCES USED When the code is written for a specific target processor the the resources used should be documented below. STACK USAGE: [stack count for this module] + [variable to represent stack usage for each subroutine called] where: [stack usage variable] = stack usage for [subroutine name] (see [filename].ext) DATA MEMORY USED: x words PROGRAM MEMORY USED: x words CLOCK CYCLES: [cycle count equation for this module] + [variable used to represent cycle count for each subroutine called] where: [cycle count variable] = cycle count for [subroutine name] (see [filename].ext) ------------------------------------------------------------------------------ */ /*---------------------------------------------------------------------------- ; INCLUDES ----------------------------------------------------------------------------*/ #include "mp4dec_lib.h" #include "post_proc.h" #include "mp4def.h" #define OSCL_DISABLE_WARNING_CONV_POSSIBLE_LOSS_OF_DATA #include "osclconfig_compiler_warnings.h" /*---------------------------------------------------------------------------- ; MACROS ; Define module specific macros here ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; DEFINES ; Include all pre-processor statements here. Include conditional ; compile variables also. ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; LOCAL FUNCTION DEFINITIONS ; Function Prototype declaration ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; LOCAL STORE/BUFFER/POINTER DEFINITIONS ; Variable declaration - defined here and used outside this module ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; EXTERNAL FUNCTION REFERENCES ; Declare functions defined elsewhere and referenced in this module ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; EXTERNAL GLOBAL STORE/BUFFER/POINTER REFERENCES ; Declare variables used in this module but defined elsewhere ----------------------------------------------------------------------------*/ #ifdef PV_POSTPROC_ON /*---------------------------------------------------------------------------- ; FUNCTION CODE ----------------------------------------------------------------------------*/ void AdaptiveSmooth_NoMMX( uint8 *Rec_Y, /* i/o */ int y_start, /* i */ int x_start, /* i */ int y_blk_start, /* i */ int x_blk_start, /* i */ int thr, /* i */ int width, /* i */ int max_diff /* i */ ) { /*---------------------------------------------------------------------------- ; Define all local variables ----------------------------------------------------------------------------*/ int sign_v[15]; int sum_v[15]; int *sum_V_ptr; int *sign_V_ptr; uint8 pelu; uint8 pelc; uint8 pell; uint8 *pelp; uint8 oldrow[15]; int sum; int sum1; uint8 *Rec_Y_ptr; int32 addr_v; int row_cntr; int col_cntr; /*---------------------------------------------------------------------------- ; Function body here ----------------------------------------------------------------------------*/ /* first row */ addr_v = (int32)(y_start + 1) * width; /* y coord of 1st element in the row / /containing pelc pixel / */ Rec_Y_ptr = &Rec_Y[addr_v + x_start]; /* initializing pointer to / pelc0 position */ sum_V_ptr = &sum_v[0]; /* initializing pointer to 0th element of array / that will contain weighted sums of pixel / luminance values */ sign_V_ptr = &sign_v[0]; /* initializing pointer to 0th element of / array that will contain sums that indicate / how many of the 9 pixels are above or below / the threshold value (thr) */ pelp = &oldrow[0]; /* initializing pointer to the 0th element of array / that will contain current values of pelc that / are saved and used as values of pelu when the / next row of pixels are filtered */ pelu = *(Rec_Y_ptr - width); /* assigning value of pelu0 to pelu */ *pelp++ = pelc = *Rec_Y_ptr; /* assigning value of pelc0 to pelc and / storing this value in pelp which / will be used as value of pelu0 when / next row is filtered */ pell = *(Rec_Y_ptr + width); /* assigning value of pell0 to pell */ Rec_Y_ptr++; /* advancing pointer from pelc0 to pelc1 */ *sum_V_ptr++ = pelu + (pelc << 1) + pell; /* weighted sum of pelu0, / pelc0 and pell0 */ /* sum of 0's and 1's (0 if pixel value is below thr, 1 if value /is above thr) */ *sign_V_ptr++ = INDEX(pelu, thr) + INDEX(pelc, thr) + INDEX(pell, thr); pelu = *(Rec_Y_ptr - width); /* assigning value of pelu1 to pelu */ *pelp++ = pelc = *Rec_Y_ptr; /* assigning value of pelc1 to pelc and / storing this value in pelp which / will be used as the value of pelu1 when / next row is filtered */ pell = *(Rec_Y_ptr + width); /* assigning value of pell1 to pell */ Rec_Y_ptr++; /* advancing pointer from pelc1 to pelc2 */ *sum_V_ptr++ = pelu + (pelc << 1) + pell; /* weighted sum of pelu1, / pelc1 and pell1 */ /* sum of 0's and 1's (0 if pixel value is below thr, 1 if value /is above thr) */ *sign_V_ptr++ = INDEX(pelu, thr) + INDEX(pelc, thr) + INDEX(pell, thr); /* The loop below performs the filtering for the first row of / pixels in the region. It steps across the remaining pixels in / the row and alters the luminance value of pelc1 if necessary, / depending on the luminance values of the adjacent pixels*/ for (col_cntr = (x_blk_start + BLKSIZE - 1) - x_start; col_cntr > 0; col_cntr--) { pelu = *(Rec_Y_ptr - width); /* assigning value of pelu2 to / pelu */ *pelp++ = pelc = *Rec_Y_ptr; /* assigning value of pelc2 to pelc / and storing this value in pelp / which will be used as value of pelu2 / when next row is filtered */ pell = *(Rec_Y_ptr + width); /* assigning value of pell2 to pell */ /* weighted sum of pelu1, pelc1 and pell1 */ *sum_V_ptr = pelu + (pelc << 1) + pell; /* sum of 0's and 1's (0 if pixel value is below thr, /1 if value is above thr) */ *sign_V_ptr = INDEX(pelu, thr) + INDEX(pelc, thr) + INDEX(pell, thr); /* the value of sum1 indicates how many of the 9 pixels' /luminance values are above or equal to thr */ sum1 = *(sign_V_ptr - 2) + *(sign_V_ptr - 1) + *sign_V_ptr; /* alter the luminance value of pelc1 if all 9 luminance values /are above or equal to thr or if all 9 values are below thr */ if (sum1 == 0 || sum1 == 9) { /* sum is a weighted average of the 9 pixel luminance /values */ sum = (*(sum_V_ptr - 2) + (*(sum_V_ptr - 1) << 1) + *sum_V_ptr + 8) >> 4; Rec_Y_ptr--; /* move pointer back to pelc1 */ /* If luminance value of pelc1 is larger than / sum by more than max_diff, then subract max_diff / from luminance value of pelc1*/ if ((int)(*Rec_Y_ptr - sum) > max_diff) { sum = *Rec_Y_ptr - max_diff; } /* If luminance value of pelc1 is smaller than / sum by more than max_diff, then add max_diff / to luminance value of pelc1*/ else if ((int)(*Rec_Y_ptr - sum) < -max_diff) { sum = *Rec_Y_ptr + max_diff; } *Rec_Y_ptr++ = sum; /* assign value of sum to pelc1 and advance pointer to pelc2 */ } Rec_Y_ptr++; /* advance pointer to new value of pelc2 / old pelc2 is now treated as pelc1*/ sum_V_ptr++; /* pointer is advanced so next weighted sum may / be saved */ sign_V_ptr++; /* pointer is advanced so next sum of 0's and / 1's may be saved */ } /* The nested loops below perform the filtering for the remaining rows */ addr_v = (y_start + 2) * width; /* advance addr_v to the next row / (corresponding to pell0)*/ /* The outer loop steps throught the rows. */ for (row_cntr = (y_blk_start + BLKSIZE) - (y_start + 2); row_cntr > 0; row_cntr--) { Rec_Y_ptr = &Rec_Y[addr_v + x_start]; /* advance pointer to /the old pell0, which has become the new pelc0 */ addr_v += width; /* move addr_v down 1 row */ sum_V_ptr = &sum_v[0]; /* re-initializing pointer */ sign_V_ptr = &sign_v[0]; /* re-initilaizing pointer */ pelp = &oldrow[0]; /* re-initializing pointer */ pelu = *pelp; /* setting pelu0 to old value of pelc0 */ *pelp++ = pelc = *Rec_Y_ptr; pell = *(Rec_Y_ptr + width); Rec_Y_ptr++; *sum_V_ptr++ = pelu + (pelc << 1) + pell; *sign_V_ptr++ = INDEX(pelu, thr) + INDEX(pelc, thr) + INDEX(pell, thr); pelu = *pelp; /* setting pelu1 to old value of pelc1 */ *pelp++ = pelc = *Rec_Y_ptr; pell = *(Rec_Y_ptr + width); Rec_Y_ptr++; *sum_V_ptr++ = pelu + (pelc << 1) + pell; *sign_V_ptr++ = INDEX(pelu, thr) + INDEX(pelc, thr) + INDEX(pell, thr); /* The inner loop steps through the columns */ for (col_cntr = (x_blk_start + BLKSIZE - 1) - x_start; col_cntr > 0; col_cntr--) { pelu = *pelp; /* setting pelu2 to old value of pelc2 */ *pelp++ = pelc = *Rec_Y_ptr; pell = *(Rec_Y_ptr + width); *sum_V_ptr = pelu + (pelc << 1) + pell; *sign_V_ptr = INDEX(pelu, thr) + INDEX(pelc, thr) + INDEX(pell, thr); sum1 = *(sign_V_ptr - 2) + *(sign_V_ptr - 1) + *sign_V_ptr; /* the "if" statement below is the same as the one in / the first loop */ if (sum1 == 0 || sum1 == 9) { sum = (*(sum_V_ptr - 2) + (*(sum_V_ptr - 1) << 1) + *sum_V_ptr + 8) >> 4; Rec_Y_ptr--; if ((int)(*Rec_Y_ptr - sum) > max_diff) { sum = *Rec_Y_ptr - max_diff; } else if ((int)(*Rec_Y_ptr - sum) < -max_diff) { sum = *Rec_Y_ptr + max_diff; } *Rec_Y_ptr++ = (uint8) sum; } Rec_Y_ptr++; sum_V_ptr++; sign_V_ptr++; } } /*---------------------------------------------------------------------------- ; Return nothing or data or data pointer ----------------------------------------------------------------------------*/ return; } #endif ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/bitstream.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "bitstream.h" #include "mp4dec_lib.h" #define OSCL_DISABLE_WARNING_CONDITIONAL_IS_CONSTANT #include "osclconfig_compiler_warnings.h" /* to mask the n least significant bits of an integer */ static const uint32 msk[33] = { 0x00000000, 0x00000001, 0x00000003, 0x00000007, 0x0000000f, 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff, 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff, 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff, 0x0001ffff, 0x0003ffff, 0x0007ffff, 0x000fffff, 0x001fffff, 0x003fffff, 0x007fffff, 0x00ffffff, 0x01ffffff, 0x03ffffff, 0x07ffffff, 0x0fffffff, 0x1fffffff, 0x3fffffff, 0x7fffffff, 0xffffffff }; /* ======================================================================== */ /* Function : BitstreamFillCache() */ /* Date : 08/29/2000 */ /* Purpose : Read more bitstream data into buffer & the 24-byte cache. */ /* This function is different from BitstreamFillBuffer in */ /* that the buffer is the frame-based buffer provided by */ /* the application. */ /* In/out : */ /* Return : PV_SUCCESS if successed, PV_FAIL if failed. */ /* Modified : 4/16/01 : removed return of PV_END_OF_BUFFER */ /* ======================================================================== */ PV_STATUS BitstreamFillCache(BitstreamDecVideo *stream) { uint8 *bitstreamBuffer = stream->bitstreamBuffer; uint8 *v; int num_bits, i; stream->curr_word |= (stream->next_word >> stream->incnt); // stream->incnt cannot be 32 stream->next_word <<= (31 - stream->incnt); stream->next_word <<= 1; num_bits = stream->incnt_next + stream->incnt; if (num_bits >= 32) { stream->incnt_next -= (32 - stream->incnt); stream->incnt = 32; return PV_SUCCESS; } /* this check can be removed if there is additional extra 4 bytes at the end of the bitstream */ v = bitstreamBuffer + stream->read_point; if (stream->read_point > stream->data_end_pos - 4) { if (stream->data_end_pos <= stream->read_point) { stream->incnt = num_bits; stream->incnt_next = 0; return PV_SUCCESS; } stream->next_word = 0; for (i = 0; i < stream->data_end_pos - stream->read_point; i++) { stream->next_word |= (v[i] << ((3 - i) << 3)); } stream->read_point = stream->data_end_pos; stream->curr_word |= (stream->next_word >> num_bits); // this is safe stream->next_word <<= (31 - num_bits); stream->next_word <<= 1; num_bits = i << 3; stream->incnt += stream->incnt_next; stream->incnt_next = num_bits - (32 - stream->incnt); if (stream->incnt_next < 0) { stream->incnt += num_bits; stream->incnt_next = 0; } else { stream->incnt = 32; } return PV_SUCCESS; } stream->next_word = ((uint32)v[0] << 24) | (v[1] << 16) | (v[2] << 8) | v[3]; stream->read_point += 4; stream->curr_word |= (stream->next_word >> num_bits); // this is safe stream->next_word <<= (31 - num_bits); stream->next_word <<= 1; stream->incnt_next += stream->incnt; stream->incnt = 32; return PV_SUCCESS; } /* ======================================================================== */ /* Function : BitstreamReset() */ /* Date : 08/29/2000 */ /* Purpose : Initialize the bitstream buffer for frame-based decoding. */ /* In/out : */ /* Return : */ /* Modified : */ /* ======================================================================== */ void BitstreamReset(BitstreamDecVideo *stream, uint8 *buffer, int32 buffer_size) { /* set up frame-based bitstream buffer */ oscl_memset(stream, 0, sizeof(BitstreamDecVideo)); stream->data_end_pos = buffer_size; stream->bitstreamBuffer = buffer; } /* ======================================================================== */ /* Function : BitstreamOpen() */ /* Purpose : Initialize the bitstream data structure. */ /* In/out : */ /* Return : */ /* Modified : */ /* ======================================================================== */ int BitstreamOpen(BitstreamDecVideo *stream, int layer) { OSCL_UNUSED_ARG(layer); int buffer_size = 0; /* set up linear bitstream buffer */ // stream->currentBytePos = 0; stream->data_end_pos = 0; stream->incnt = 0; stream->incnt_next = 0; stream->bitcnt = 0; stream->curr_word = stream->next_word = 0; stream->read_point = stream->data_end_pos; return buffer_size; } /* ======================================================================== */ /* Function : BitstreamClose() */ /* Purpose : Cleanup the bitstream data structure. */ /* In/out : */ /* Return : */ /* Modified : */ /* ======================================================================== */ void BitstreamClose(BitstreamDecVideo * stream) { OSCL_UNUSED_ARG(stream); return; } /***********************************************************CommentBegin****** * * -- BitstreamShowBits32HC * Shows 32 bits ***********************************************************CommentEnd********/ PV_STATUS BitstreamShowBits32HC(BitstreamDecVideo *stream, uint32 *code) { PV_STATUS status = PV_SUCCESS; if (stream->incnt < 32) { /* frame-based decoding */ status = BitstreamFillCache(stream); } *code = stream->curr_word; return status; } /***********************************************************CommentBegin****** * * -- BitstreamShowBits32 * Shows upto and including 31 bits ***********************************************************CommentEnd********/ PV_STATUS BitstreamShowBits32(BitstreamDecVideo *stream, int nbits, uint32 *code) { PV_STATUS status = PV_SUCCESS; if (stream->incnt < nbits) { /* frame-based decoding */ status = BitstreamFillCache(stream); } *code = stream->curr_word >> (32 - nbits); return status; } #ifndef PV_BS_INLINE /*========================================================================= */ /* Function: BitstreamShowBits16() */ /* Date: 12/18/2000 */ /* Purpose: To see the next "nbits"(nbits<=16) bitstream bits */ /* without advancing the read pointer */ /* */ /* =========================================================================*/ PV_STATUS BitstreamShowBits16(BitstreamDecVideo *stream, int nbits, uint *code) { PV_STATUS status = PV_SUCCESS; if (stream->incnt < nbits) { /* frame-based decoding */ status = BitstreamFillCache(stream); } *code = stream->curr_word >> (32 - nbits); return status; } /*========================================================================= */ /* Function: BitstreamShow15Bits() */ /* Date: 01/23/2001 */ /* Purpose: To see the next 15 bitstream bits */ /* without advancing the read pointer */ /* */ /* =========================================================================*/ PV_STATUS BitstreamShow15Bits(BitstreamDecVideo *stream, uint *code) { PV_STATUS status = PV_SUCCESS; if (stream->incnt < 15) { /* frame-based decoding */ status = BitstreamFillCache(stream); } *code = stream->curr_word >> 17; return status; } /*========================================================================= */ /* Function: BitstreamShow13Bits */ /* Date: 050923 */ /* Purpose: Faciliate and speed up showing 13 bit from bitstream */ /* used in VlcTCOEFF decoding */ /* Modified: */ /* =========================================================================*/ PV_STATUS BitstreamShow13Bits(BitstreamDecVideo *stream, uint *code) { PV_STATUS status = PV_SUCCESS; if (stream->incnt < 13) { /* frame-based decoding */ status = BitstreamFillCache(stream); } *code = stream->curr_word >> 19; return status; } uint BitstreamReadBits16_INLINE(BitstreamDecVideo *stream, int nbits) { uint code; PV_STATUS status; if (stream->incnt < nbits) { /* frame-based decoding */ status = BitstreamFillCache(stream); } code = stream->curr_word >> (32 - nbits); PV_BitstreamFlushBits(stream, nbits); return code; } uint BitstreamRead1Bits_INLINE(BitstreamDecVideo *stream) { PV_STATUS status = PV_SUCCESS; uint code; if (stream->incnt < 1) { /* frame-based decoding */ status = BitstreamFillCache(stream); } code = stream->curr_word >> 31; PV_BitstreamFlushBits(stream, 1); return code; } #endif /* ======================================================================== */ /* Function : BitstreamReadBits16() */ /* Purpose : Read bits (nbits <=16) from bitstream buffer. */ /* In/out : */ /* Return : */ /* ======================================================================== */ uint BitstreamReadBits16(BitstreamDecVideo *stream, int nbits) { uint code; if (stream->incnt < nbits) { /* frame-based decoding */ BitstreamFillCache(stream); } code = stream->curr_word >> (32 - nbits); PV_BitstreamFlushBits(stream, nbits); return code; } /* ======================================================================== */ /* Function : BitstreamRead1Bits() */ /* Date : 10/23/2000 */ /* Purpose : Faciliate and speed up reading 1 bit from bitstream. */ /* In/out : */ /* Return : */ /* ======================================================================== */ uint BitstreamRead1Bits(BitstreamDecVideo *stream) { uint code; if (stream->incnt < 1) { /* frame-based decoding */ BitstreamFillCache(stream); } code = stream->curr_word >> 31; PV_BitstreamFlushBits(stream, 1); return code; } /* ======================================================================== */ /* Function : PV_BitstreamFlushBitsCheck() */ /* Purpose : Flush nbits bits from bitstream buffer. Check for cache */ /* In/out : */ /* Return : */ /* Modified : */ /* ======================================================================== */ PV_STATUS PV_BitstreamFlushBitsCheck(BitstreamDecVideo *stream, int nbits) { PV_STATUS status = PV_SUCCESS; stream->bitcnt += nbits; stream->incnt -= nbits; if (stream->incnt < 0) { /* frame-based decoding */ status = BitstreamFillCache(stream); if (stream->incnt < 0) { stream->bitcnt += stream->incnt; stream->incnt = 0; } } stream->curr_word <<= nbits; return status; } /* ======================================================================== */ /* Function : BitstreamReadBits32() */ /* Purpose : Read bits from bitstream buffer. */ /* In/out : */ /* Return : */ /* ======================================================================== */ uint32 BitstreamReadBits32(BitstreamDecVideo *stream, int nbits) { uint32 code; if (stream->incnt < nbits) { /* frame-based decoding */ BitstreamFillCache(stream); } code = stream->curr_word >> (32 - nbits); PV_BitstreamFlushBits(stream, nbits); return code; } uint32 BitstreamReadBits32HC(BitstreamDecVideo *stream) { uint32 code; BitstreamShowBits32HC(stream, &code); stream->bitcnt += 32; stream->incnt = 0; stream->curr_word = 0; return code; } /* ======================================================================== */ /* Function : BitstreamCheckEndBuffer() */ /* Date : 03/30/2001 */ /* Purpose : Check to see if we are at the end of buffer */ /* In/out : */ /* Return : */ /* Modified : */ /* ======================================================================== */ PV_STATUS BitstreamCheckEndBuffer(BitstreamDecVideo *stream) { if (stream->read_point >= stream->data_end_pos && stream->incnt <= 0) return PV_END_OF_VOP; return PV_SUCCESS; } PV_STATUS PV_BitstreamShowBitsByteAlign(BitstreamDecVideo *stream, int nbits, uint32 *code) { PV_STATUS status = PV_SUCCESS; int n_stuffed; n_stuffed = 8 - (stream->bitcnt & 0x7); /* 07/05/01 */ if (stream->incnt < (nbits + n_stuffed)) { /* frame-based decoding */ status = BitstreamFillCache(stream); } *code = (stream->curr_word << n_stuffed) >> (32 - nbits); return status; } #ifdef PV_ANNEX_IJKT_SUPPORT PV_STATUS PV_BitstreamShowBitsByteAlignNoForceStuffing(BitstreamDecVideo *stream, int nbits, uint32 *code) { PV_STATUS status = PV_SUCCESS; int n_stuffed; n_stuffed = (8 - (stream->bitcnt & 0x7)) & 7; if (stream->incnt < (nbits + n_stuffed)) { /* frame-based decoding */ status = BitstreamFillCache(stream); } *code = (stream->curr_word << n_stuffed) >> (32 - nbits); return status; } #endif PV_STATUS PV_BitstreamByteAlign(BitstreamDecVideo *stream) { PV_STATUS status = PV_SUCCESS; int n_stuffed; n_stuffed = 8 - (stream->bitcnt & 0x7); /* 07/05/01 */ /* We have to make sure we have enough bits in the cache. 08/15/2000 */ if (stream->incnt < n_stuffed) { /* frame-based decoding */ status = BitstreamFillCache(stream); } stream->bitcnt += n_stuffed; stream->incnt -= n_stuffed; stream->curr_word <<= n_stuffed; if (stream->incnt < 0) { stream->bitcnt += stream->incnt; stream->incnt = 0; } return status; } PV_STATUS BitstreamByteAlignNoForceStuffing(BitstreamDecVideo *stream) { uint n_stuffed; n_stuffed = (8 - (stream->bitcnt & 0x7)) & 0x7; /* 07/05/01 */ stream->bitcnt += n_stuffed; stream->incnt -= n_stuffed; if (stream->incnt < 0) { stream->bitcnt += stream->incnt; stream->incnt = 0; } stream->curr_word <<= n_stuffed; return PV_SUCCESS; } /* ==================================================================== */ /* Function : getPointer() */ /* Date : 10/98 */ /* Purpose : get current position of file pointer */ /* In/out : */ /* Return : */ /* ==================================================================== */ int32 getPointer(BitstreamDecVideo *stream) { return stream->bitcnt; } /* ====================================================================== / Function : movePointerTo() Date : 05/14/2004 Purpose : move bitstream pointer to a desired position In/out : Return : Modified : / ====================================================================== */ PV_STATUS movePointerTo(BitstreamDecVideo *stream, int32 pos) { int32 byte_pos; if (pos < 0) { pos = 0; } byte_pos = pos >> 3; if (byte_pos > stream->data_end_pos) { byte_pos = stream->data_end_pos; } stream->read_point = byte_pos & -4; stream->bitcnt = stream->read_point << 3;; stream->curr_word = 0; stream->next_word = 0; stream->incnt = 0; stream->incnt_next = 0; BitstreamFillCache(stream); PV_BitstreamFlushBits(stream, ((pos & 0x7) + ((byte_pos & 0x3) << 3))); return PV_SUCCESS; } /* ======================================================================== */ /* Function : validStuffing() */ /* Date : 04/11/2000 */ /* Purpose : Check whether we have valid stuffing at current position. */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : 12/18/2000 : changed the pattern type to uint */ /* 04/01/2001 : removed PV_END_OF_BUFFER */ /* ======================================================================== */ Bool validStuffing(BitstreamDecVideo *stream) { uint n_stuffed; uint pattern; n_stuffed = 8 - (stream->bitcnt & 0x7); BitstreamShowBits16(stream, n_stuffed, &pattern); if (pattern == msk[n_stuffed-1]) return PV_TRUE; return PV_FALSE; } #ifdef PV_ANNEX_IJKT_SUPPORT Bool validStuffing_h263(BitstreamDecVideo *stream) { uint n_stuffed; uint pattern; n_stuffed = (8 - (stream->bitcnt & 0x7)) & 7; // stream->incnt % 8 if (n_stuffed == 0) { return PV_TRUE; } BitstreamShowBits16(stream, n_stuffed, &pattern); if (pattern == 0) return PV_TRUE; return PV_FALSE; } #endif /* ======================================================================== */ /* Function : PVSearchNextH263Frame() */ /* Date : 04/08/2005 */ /* Purpose : search for 0x00 0x00 0x80 */ /* In/out : */ /* Return : PV_SUCCESS if succeeded or PV_END_OF_VOP if failed */ /* Modified : */ /* ======================================================================== */ PV_STATUS PVSearchNextH263Frame(BitstreamDecVideo *stream) { PV_STATUS status = PV_SUCCESS; uint8 *ptr; int32 i; int32 initial_byte_aligned_position = (stream->bitcnt + 7) >> 3; ptr = stream->bitstreamBuffer + initial_byte_aligned_position; i = PVLocateH263FrameHeader(ptr, stream->data_end_pos - initial_byte_aligned_position); if (stream->data_end_pos <= initial_byte_aligned_position + i) { status = PV_END_OF_VOP; } (void)movePointerTo(stream, ((i + initial_byte_aligned_position) << 3)); /* ptr + i */ return status; } /* ======================================================================== */ /* Function : PVSearchNextM4VFrame() */ /* Date : 04/08/2005 */ /* Purpose : search for 0x00 0x00 0x01 and move the pointer to the */ /* beginning of the start code */ /* In/out : */ /* Return : PV_SUCCESS if succeeded or PV_END_OF_VOP if failed */ /* Modified : */ /* ======================================================================== */ PV_STATUS PVSearchNextM4VFrame(BitstreamDecVideo *stream) { PV_STATUS status = PV_SUCCESS; uint8 *ptr; int32 i; int32 initial_byte_aligned_position = (stream->bitcnt + 7) >> 3; ptr = stream->bitstreamBuffer + initial_byte_aligned_position; i = PVLocateFrameHeader(ptr, stream->data_end_pos - initial_byte_aligned_position); if (stream->data_end_pos <= initial_byte_aligned_position + i) { status = PV_END_OF_VOP; } (void)movePointerTo(stream, ((i + initial_byte_aligned_position) << 3)); /* ptr + i */ return status; } void PVLocateM4VFrameBoundary(BitstreamDecVideo *stream) { uint8 *ptr; int32 byte_pos = (stream->bitcnt >> 3); stream->searched_frame_boundary = 1; ptr = stream->bitstreamBuffer + byte_pos; stream->data_end_pos = PVLocateFrameHeader(ptr, (int32)stream->data_end_pos - byte_pos) + byte_pos; } void PVLocateH263FrameBoundary(BitstreamDecVideo *stream) { uint8 *ptr; int32 byte_pos = (stream->bitcnt >> 3); stream->searched_frame_boundary = 1; ptr = stream->bitstreamBuffer + byte_pos; stream->data_end_pos = PVLocateH263FrameHeader(ptr, (int32)stream->data_end_pos - byte_pos) + byte_pos; } /* ======================================================================== */ /* Function : quickSearchVideoPacketHeader() */ /* Date : 05/08/2000 */ /* Purpose : Quick search for the next video packet header */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* ======================================================================== */ PV_STATUS quickSearchVideoPacketHeader(BitstreamDecVideo *stream, int marker_length) { PV_STATUS status = PV_SUCCESS; uint32 tmpvar; if (stream->searched_frame_boundary == 0) { PVLocateM4VFrameBoundary(stream); } do { status = BitstreamCheckEndBuffer(stream); if (status == PV_END_OF_VOP) break; PV_BitstreamShowBitsByteAlign(stream, marker_length, &tmpvar); if (tmpvar == RESYNC_MARKER) break; PV_BitstreamFlushBits(stream, 8); } while (status == PV_SUCCESS); return status; } #ifdef PV_ANNEX_IJKT_SUPPORT PV_STATUS quickSearchH263SliceHeader(BitstreamDecVideo *stream) { PV_STATUS status = PV_SUCCESS; uint32 tmpvar; if (stream->searched_frame_boundary == 0) { PVLocateH263FrameBoundary(stream); } do { status = BitstreamCheckEndBuffer(stream); if (status == PV_END_OF_VOP) break; PV_BitstreamShowBitsByteAlignNoForceStuffing(stream, 17, &tmpvar); if (tmpvar == RESYNC_MARKER) break; PV_BitstreamFlushBits(stream, 8); } while (status == PV_SUCCESS); return status; } #endif /* ======================================================================== */ /* The following functions are for Error Concealment. */ /* ======================================================================== */ /****************************************************/ // 01/22/99 Quick search of Resync Marker // (actually the first part of it, i.e. 16 0's and a 1. /* We are not using the fastest algorithm possible. What this function does is to locate 11 consecutive 0's and then check if the 5 bits before them and the 1 bit after them are all 1's. */ // Table used for quick search of markers. Gives the last `1' in // 4 bits. The MSB is bit #1, the LSB is bit #4. const int lastOne[] = { 0, 4, 3, 4, 2, 4, 3, 4, 1, 4, 3, 4, 2, 4, 3, 4 }; // Table used for quick search of markers. Gives the last `0' in // 4 bits. The MSB is bit #1, the LSB is bit #4. /*const int lastZero[]= { 4, 3, 4, 2, 4, 3, 4, 1, 4, 3, 4, 2, 4, 3, 4, 0 }; */ // Table used for quick search of markers. Gives the first `0' in // 4 bits. The MSB is bit #1, the LSB is bit #4. const int firstZero[] = { 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 0 }; // Table used for quick search of markers. Gives the first `1' in // 4 bits. The MSB is bit #1, the LSB is bit #4. const int firstOne[] = { 0, 4, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1 }; /* ======================================================================== */ /* Function : quickSearchMarkers() */ /* Date : 01/25/99 */ /* Purpose : Quick search for Motion marker */ /* In/out : */ /* Return : Boolean true of false */ /* Modified : 12/18/2000 : 32-bit version */ /* ======================================================================== */ PV_STATUS quickSearchMotionMarker(BitstreamDecVideo *stream) // MM: (11111000000000001) { PV_STATUS status; uint32 tmpvar, tmpvar2; if (stream->searched_frame_boundary == 0) { PVLocateM4VFrameBoundary(stream); } while (TRUE) { status = BitstreamCheckEndBuffer(stream); if (status == PV_END_OF_VOP) return PV_END_OF_VOP; BitstreamShowBits32(stream, 17, &tmpvar); if (!tmpvar) return PV_FAIL; if (tmpvar & 1) // Check if the 17th bit from the curr bit pos is a '1' { if (tmpvar == MOTION_MARKER_COMB) { return PV_SUCCESS; // Found } else { tmpvar >>= 1; tmpvar &= 0xF; PV_BitstreamFlushBits(stream, (int)(12 + firstZero[tmpvar])); } } else { // 01/25/99 Get the first 16 bits tmpvar >>= 1; tmpvar2 = tmpvar & 0xF; // 01/26/99 Check bits #13 ~ #16 if (tmpvar2) { PV_BitstreamFlushBits(stream, (int)(7 + lastOne[tmpvar2])); } else { tmpvar >>= 4; tmpvar2 = tmpvar & 0xF; // 01/26/99 Check bits #9 ~ #12 if (tmpvar2) { PV_BitstreamFlushBits(stream, (int)(3 + lastOne[tmpvar2])); } else { tmpvar >>= 4; tmpvar2 = tmpvar & 0xF; // 01/26/99 Check bits #5 ~ #8 // We don't need to check further // for the first 5 bits should be all 1's if (lastOne[tmpvar2] < 2) { /* we already have too many consecutive 0's. */ /* Go directly pass the last of the 17 bits. */ PV_BitstreamFlushBits(stream, 17); } else { PV_BitstreamFlushBits(stream, (int)(lastOne[tmpvar2] - 1)); } } } } } } /* ======================================================================== */ /* Function : quickSearchDCM() */ /* Date : 01/22/99 */ /* Purpose : Quick search for DC Marker */ /* We are not using the fastest algorithm possible. What this */ /* function does is to locate 11 consecutive 0's and then */ /* check if the 7 bits before them and the 1 bit after them */ /* are correct. (actually the first part of it, i.e. 16 0's */ /* and a 1. */ /* In/out : */ /* Return : Boolean true of false */ /* Modified : 12/18/2000 : 32-bit version */ /* ======================================================================== */ PV_STATUS quickSearchDCM(BitstreamDecVideo *stream) // DCM: (110 1011 0000 0000 0001) { PV_STATUS status; uint32 tmpvar, tmpvar2; if (stream->searched_frame_boundary == 0) { PVLocateM4VFrameBoundary(stream); } while (TRUE) { status = BitstreamCheckEndBuffer(stream); if (status == PV_END_OF_VOP) return PV_END_OF_VOP; BitstreamShowBits32(stream, 19, &tmpvar); if (tmpvar & 1) // Check if the 17th bit from the curr bit pos is a '1' { if (tmpvar == DC_MARKER) { return PV_SUCCESS; // Found } else { // 01/25/99 We treat the last of the 19 bits as its 7th bit (which is // also a `1' PV_BitstreamFlushBits(stream, 12); } } else { tmpvar >>= 1; tmpvar2 = tmpvar & 0xF; if (tmpvar2) { PV_BitstreamFlushBits(stream, (int)(7 + lastOne[tmpvar2])); } else { tmpvar >>= 4; tmpvar2 = tmpvar & 0xF; if (tmpvar2) { PV_BitstreamFlushBits(stream, (int)(3 + lastOne[tmpvar2])); } else { tmpvar >>= 4; tmpvar2 = tmpvar & 0xF; if (lastOne[tmpvar2] < 2) { /* we already have too many consecutive 0's. */ /* Go directly pass the last of the 17 bits. */ PV_BitstreamFlushBits(stream, 19); } else { PV_BitstreamFlushBits(stream, (int)(lastOne[tmpvar2] - 1)); } } } } } } /* ======================================================================== */ /* Function : quickSearchGOBHeader() 0000 0000 0000 0000 1 */ /* Date : 07/06/01 */ /* Purpose : Quick search of GOBHeader (not byte aligned) */ /* In/out : */ /* Return : Integer value indicates type of marker found */ /* Modified : */ /* ======================================================================== */ PV_STATUS quickSearchGOBHeader(BitstreamDecVideo *stream) { PV_STATUS status; int byte0, byte1, byte2, shift, tmpvar; BitstreamByteAlignNoForceStuffing(stream); if (stream->searched_frame_boundary == 0) { PVLocateH263FrameBoundary(stream); } while (TRUE) { status = BitstreamCheckEndBuffer(stream); if (status == PV_END_OF_VOP) return PV_END_OF_VOP; if (stream->incnt < 24) { status = BitstreamFillCache(stream); } byte1 = (stream->curr_word << 8) >> 24; if (byte1 == 0) { byte2 = (stream->curr_word << 16) >> 24; if (byte2) { tmpvar = byte2 >> 4; if (tmpvar) { shift = 9 - firstOne[tmpvar]; } else { shift = 5 - firstOne[byte2]; } byte0 = stream->curr_word >> 24; if ((byte0 & msk[shift]) == 0) { PV_BitstreamFlushBits(stream, 8 - shift); return PV_SUCCESS; } PV_BitstreamFlushBits(stream, 8); /* third_byte is not zero */ } } PV_BitstreamFlushBits(stream, 8); } } ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/bitstream.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef _BITSTREAM_D_H_ #define _BITSTREAM_D_H_ #include "mp4dec_lib.h" /* video decoder function prototypes */ #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ #define PV_BS_INLINE /* support inline bitstream functions */ #define PV_BitstreamFlushBits(A,B) {(A)->bitcnt += (B); (A)->incnt -= (B); (A)->curr_word <<= (B);} PV_STATUS BitstreamFillBuffer(BitstreamDecVideo *stream); PV_STATUS BitstreamFillCache(BitstreamDecVideo *stream); void BitstreamReset(BitstreamDecVideo *stream, uint8 *buffer, int32 buffer_size); int BitstreamOpen(BitstreamDecVideo *stream, int layer); void BitstreamClose(BitstreamDecVideo *stream); PV_STATUS BitstreamShowBits32(BitstreamDecVideo *stream, int nbits, uint32 *code); uint32 BitstreamReadBits32(BitstreamDecVideo *stream, int nbits); uint BitstreamReadBits16(BitstreamDecVideo *stream, int nbits); uint BitstreamRead1Bits(BitstreamDecVideo *stream); #ifndef PV_BS_INLINE PV_STATUS BitstreamShowBits16(BitstreamDecVideo *stream, int nbits, uint *code); PV_STATUS BitstreamShow15Bits(BitstreamDecVideo *stream, uint *code); PV_STATUS BitstreamShow13Bits(BitstreamDecVideo *stream, uint *code); uint BitstreamReadBits16_INLINE(BitstreamDecVideo *stream, int nbits); uint BitstreamRead1Bits_INLINE(BitstreamDecVideo *stream); #else __inline PV_STATUS BitstreamShowBits16(BitstreamDecVideo *stream, int nbits, uint *code) { PV_STATUS status = PV_SUCCESS; if (stream->incnt < nbits) { /* frame-based decoding */ status = BitstreamFillCache(stream); } *code = stream->curr_word >> (32 - nbits); return status; } /* =========================================================================*/ __inline PV_STATUS BitstreamShow15Bits(BitstreamDecVideo *stream, uint *code) { PV_STATUS status = PV_SUCCESS; if (stream->incnt < 15) { /* frame-based decoding */ status = BitstreamFillCache(stream); } *code = stream->curr_word >> 17; return status; } __inline PV_STATUS BitstreamShow13Bits(BitstreamDecVideo *stream, uint *code) { PV_STATUS status = PV_SUCCESS; if (stream->incnt < 13) { /* frame-based decoding */ status = BitstreamFillCache(stream); } *code = stream->curr_word >> 19; return status; } __inline uint BitstreamReadBits16_INLINE(BitstreamDecVideo *stream, int nbits) { uint code; if (stream->incnt < nbits) { /* frame-based decoding */ BitstreamFillCache(stream); } code = stream->curr_word >> (32 - nbits); PV_BitstreamFlushBits(stream, nbits); return code; } __inline uint BitstreamRead1Bits_INLINE(BitstreamDecVideo *stream) { uint code; if (stream->incnt < 1) { /* frame-based decoding */ BitstreamFillCache(stream); } code = stream->curr_word >> 31; PV_BitstreamFlushBits(stream, 1); return code; } #endif PV_STATUS PV_BitstreamFlushBitsCheck(BitstreamDecVideo *stream, int nbits); uint32 BitstreamReadBits32HC(BitstreamDecVideo *stream); PV_STATUS BitstreamShowBits32HC(BitstreamDecVideo *stream, uint32 *code); PV_STATUS BitstreamCheckEndBuffer(BitstreamDecVideo *stream); PV_STATUS PV_BitstreamShowBitsByteAlign(BitstreamDecVideo *stream, int nbits, uint32 *code); #ifdef PV_ANNEX_IJKT_SUPPORT PV_STATUS PV_BitstreamShowBitsByteAlignNoForceStuffing(BitstreamDecVideo *stream, int nbits, uint32 *code); Bool validStuffing_h263(BitstreamDecVideo *stream); PV_STATUS quickSearchH263SliceHeader(BitstreamDecVideo *stream); #endif PV_STATUS PV_BitstreamByteAlign(BitstreamDecVideo *stream); PV_STATUS BitstreamByteAlignNoForceStuffing(BitstreamDecVideo *stream); Bool validStuffing(BitstreamDecVideo *stream); PV_STATUS movePointerTo(BitstreamDecVideo *stream, int32 pos); PV_STATUS PVSearchNextM4VFrame(BitstreamDecVideo *stream); PV_STATUS PVSearchNextH263Frame(BitstreamDecVideo *stream); PV_STATUS quickSearchVideoPacketHeader(BitstreamDecVideo *stream, int marker_length); /* for error concealment & soft-decoding */ void PVLocateM4VFrameBoundary(BitstreamDecVideo *stream); void PVSearchH263FrameBoundary(BitstreamDecVideo *stream); PV_STATUS quickSearchMotionMarker(BitstreamDecVideo *stream); PV_STATUS quickSearchDCM(BitstreamDecVideo *stream); PV_STATUS quickSearchGOBHeader(BitstreamDecVideo *stream); void BitstreamShowBuffer(BitstreamDecVideo *stream, int32 startbit, int32 endbit, uint8 *bitBfr); /* 10/8/98 New prototyps. */ int32 getPointer(BitstreamDecVideo *stream); #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* _BITSTREAM_D_H_ */ ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/block_idct.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /* ------------------------------------------------------------------------------ INPUT AND OUTPUT DEFINITIONS Inputs: [input_variable_name] = [description of the input to module, its type definition, and length (when applicable)] Local Stores/Buffers/Pointers Needed: [local_store_name] = [description of the local store, its type definition, and length (when applicable)] [local_buffer_name] = [description of the local buffer, its type definition, and length (when applicable)] [local_ptr_name] = [description of the local pointer, its type definition, and length (when applicable)] Global Stores/Buffers/Pointers Needed: [global_store_name] = [description of the global store, its type definition, and length (when applicable)] [global_buffer_name] = [description of the global buffer, its type definition, and length (when applicable)] [global_ptr_name] = [description of the global pointer, its type definition, and length (when applicable)] Outputs: [return_variable_name] = [description of data/pointer returned by module, its type definition, and length (when applicable)] Pointers and Buffers Modified: [variable_bfr_ptr] points to the [describe where the variable_bfr_ptr points to, its type definition, and length (when applicable)] [variable_bfr] contents are [describe the new contents of variable_bfr] Local Stores Modified: [local_store_name] = [describe new contents, its type definition, and length (when applicable)] Global Stores Modified: [global_store_name] = [describe new contents, its type definition, and length (when applicable)] ------------------------------------------------------------------------------ FUNCTION DESCRIPTION ------------------------------------------------------------------------------ REQUIREMENTS ------------------------------------------------------------------------------ REFERENCES ------------------------------------------------------------------------------ PSEUDO-CODE ------------------------------------------------------------------------------ RESOURCES USED When the code is written for a specific target processor the the resources used should be documented below. STACK USAGE: [stack count for this module] + [variable to represent stack usage for each subroutine called] where: [stack usage variable] = stack usage for [subroutine name] (see [filename].ext) DATA MEMORY USED: x words PROGRAM MEMORY USED: x words CLOCK CYCLES: [cycle count equation for this module] + [variable used to represent cycle count for each subroutine called] where: [cycle count variable] = cycle count for [subroutine name] (see [filename].ext) ------------------------------------------------------------------------------ */ /*---------------------------------------------------------------------------- ; INCLUDES ----------------------------------------------------------------------------*/ #include "mp4dec_lib.h" #include "idct.h" #include "motion_comp.h" #define OSCL_DISABLE_WARNING_CONV_POSSIBLE_LOSS_OF_DATA #include "osclconfig_compiler_warnings.h" /*---------------------------------------------------------------------------- ; MACROS ; Define module specific macros here ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; DEFINES ; Include all pre-processor statements here. Include conditional ; compile variables also. ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; LOCAL FUNCTION DEFINITIONS ; Function Prototype declaration ----------------------------------------------------------------------------*/ /* private prototypes */ static void idctrow(int16 *blk, uint8 *pred, uint8 *dst, int width); static void idctrow_intra(int16 *blk, PIXEL *, int width); static void idctcol(int16 *blk); #ifdef FAST_IDCT // mapping from nz_coefs to functions to be used // ARM4 does not allow global data when they are not constant hence // an array of function pointers cannot be considered as array of constants // (actual addresses are only known when the dll is loaded). // So instead of arrays of function pointers, we'll store here // arrays of rows or columns and then call the idct function // corresponding to such the row/column number: static void (*const idctcolVCA[10][4])(int16*) = { {&idctcol1, &idctcol0, &idctcol0, &idctcol0}, {&idctcol1, &idctcol1, &idctcol0, &idctcol0}, {&idctcol2, &idctcol1, &idctcol0, &idctcol0}, {&idctcol3, &idctcol1, &idctcol0, &idctcol0}, {&idctcol3, &idctcol2, &idctcol0, &idctcol0}, {&idctcol3, &idctcol2, &idctcol1, &idctcol0}, {&idctcol3, &idctcol2, &idctcol1, &idctcol1}, {&idctcol3, &idctcol2, &idctcol2, &idctcol1}, {&idctcol3, &idctcol3, &idctcol2, &idctcol1}, {&idctcol4, &idctcol3, &idctcol2, &idctcol1} }; static void (*const idctrowVCA[10])(int16*, uint8*, uint8*, int) = { &idctrow1, &idctrow2, &idctrow2, &idctrow2, &idctrow2, &idctrow3, &idctrow4, &idctrow4, &idctrow4, &idctrow4 }; static void (*const idctcolVCA2[16])(int16*) = { &idctcol0, &idctcol4, &idctcol3, &idctcol4, &idctcol2, &idctcol4, &idctcol3, &idctcol4, &idctcol1, &idctcol4, &idctcol3, &idctcol4, &idctcol2, &idctcol4, &idctcol3, &idctcol4 }; static void (*const idctrowVCA2[8])(int16*, uint8*, uint8*, int) = { &idctrow1, &idctrow4, &idctrow3, &idctrow4, &idctrow2, &idctrow4, &idctrow3, &idctrow4 }; static void (*const idctrowVCA_intra[10])(int16*, PIXEL *, int) = { &idctrow1_intra, &idctrow2_intra, &idctrow2_intra, &idctrow2_intra, &idctrow2_intra, &idctrow3_intra, &idctrow4_intra, &idctrow4_intra, &idctrow4_intra, &idctrow4_intra }; static void (*const idctrowVCA2_intra[8])(int16*, PIXEL *, int) = { &idctrow1_intra, &idctrow4_intra, &idctrow3_intra, &idctrow4_intra, &idctrow2_intra, &idctrow4_intra, &idctrow3_intra, &idctrow4_intra }; #endif /*---------------------------------------------------------------------------- ; LOCAL STORE/BUFFER/POINTER DEFINITIONS ; Variable declaration - defined here and used outside this module ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; EXTERNAL FUNCTION REFERENCES ; Declare functions defined elsewhere and referenced in this module ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; EXTERNAL GLOBAL STORE/BUFFER/POINTER REFERENCES ; Declare variables used in this module but defined elsewhere ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; FUNCTION CODE ----------------------------------------------------------------------------*/ void MBlockIDCT(VideoDecData *video) { Vop *currVop = video->currVop; MacroBlock *mblock = video->mblock; PIXEL *c_comp; PIXEL *cu_comp; PIXEL *cv_comp; int x_pos = video->mbnum_col; int y_pos = video->mbnum_row; int width, width_uv; int32 offset; width = video->width; width_uv = width >> 1; offset = (int32)(y_pos << 4) * width + (x_pos << 4); c_comp = currVop->yChan + offset; cu_comp = currVop->uChan + (offset >> 2) + (x_pos << 2); cv_comp = currVop->vChan + (offset >> 2) + (x_pos << 2); BlockIDCT_intra(mblock, c_comp, 0, width); BlockIDCT_intra(mblock, c_comp + 8, 1, width); BlockIDCT_intra(mblock, c_comp + (width << 3), 2, width); BlockIDCT_intra(mblock, c_comp + (width << 3) + 8, 3, width); BlockIDCT_intra(mblock, cu_comp, 4, width_uv); BlockIDCT_intra(mblock, cv_comp, 5, width_uv); } void BlockIDCT_intra( MacroBlock *mblock, PIXEL *c_comp, int comp, int width) { /*---------------------------------------------------------------------------- ; Define all local variables ----------------------------------------------------------------------------*/ int16 *coeff_in = mblock->block[comp]; #ifdef INTEGER_IDCT #ifdef FAST_IDCT /* VCA IDCT using nzcoefs and bitmaps*/ int i, bmapr; int nz_coefs = mblock->no_coeff[comp]; uint8 *bitmapcol = mblock->bitmapcol[comp]; uint8 bitmaprow = mblock->bitmaprow[comp]; /*---------------------------------------------------------------------------- ; Function body here ----------------------------------------------------------------------------*/ if (nz_coefs <= 10) { bmapr = (nz_coefs - 1); (*(idctcolVCA[bmapr]))(coeff_in); (*(idctcolVCA[bmapr][1]))(coeff_in + 1); (*(idctcolVCA[bmapr][2]))(coeff_in + 2); (*(idctcolVCA[bmapr][3]))(coeff_in + 3); (*idctrowVCA_intra[nz_coefs-1])(coeff_in, c_comp, width); } else { i = 8; while (i--) { bmapr = (int)bitmapcol[i]; if (bmapr) { if ((bmapr&0xf) == 0) /* 07/18/01 */ { (*(idctcolVCA2[bmapr>>4]))(coeff_in + i); } else { idctcol(coeff_in + i); } } } if ((bitmapcol[4] | bitmapcol[5] | bitmapcol[6] | bitmapcol[7]) == 0) { bitmaprow >>= 4; (*(idctrowVCA2_intra[(int)bitmaprow]))(coeff_in, c_comp, width); } else { idctrow_intra(coeff_in, c_comp, width); } } #else void idct_intra(int *block, uint8 *comp, int width); idct_intra(coeff_in, c_comp, width); #endif #else void idctref_intra(int *block, uint8 *comp, int width); idctref_intra(coeff_in, c_comp, width); #endif /*---------------------------------------------------------------------------- ; Return nothing or data or data pointer ----------------------------------------------------------------------------*/ return; } /* 08/04/05, no residue, just copy from pred to output */ void Copy_Blk_to_Vop(uint8 *dst, uint8 *pred, int width) { /* copy 4 bytes at a time */ width -= 4; *((uint32*)dst) = *((uint32*)pred); *((uint32*)(dst += 4)) = *((uint32*)(pred += 4)); *((uint32*)(dst += width)) = *((uint32*)(pred += 12)); *((uint32*)(dst += 4)) = *((uint32*)(pred += 4)); *((uint32*)(dst += width)) = *((uint32*)(pred += 12)); *((uint32*)(dst += 4)) = *((uint32*)(pred += 4)); *((uint32*)(dst += width)) = *((uint32*)(pred += 12)); *((uint32*)(dst += 4)) = *((uint32*)(pred += 4)); *((uint32*)(dst += width)) = *((uint32*)(pred += 12)); *((uint32*)(dst += 4)) = *((uint32*)(pred += 4)); *((uint32*)(dst += width)) = *((uint32*)(pred += 12)); *((uint32*)(dst += 4)) = *((uint32*)(pred += 4)); *((uint32*)(dst += width)) = *((uint32*)(pred += 12)); *((uint32*)(dst += 4)) = *((uint32*)(pred += 4)); *((uint32*)(dst += width)) = *((uint32*)(pred += 12)); *((uint32*)(dst += 4)) = *((uint32*)(pred += 4)); return ; } /* 08/04/05 compute IDCT and add prediction at the end */ void BlockIDCT( uint8 *dst, /* destination */ uint8 *pred, /* prediction block, pitch 16 */ int16 *coeff_in, /* DCT data, size 64 */ int width, /* width of dst */ int nz_coefs, uint8 *bitmapcol, uint8 bitmaprow ) { #ifdef INTEGER_IDCT #ifdef FAST_IDCT /* VCA IDCT using nzcoefs and bitmaps*/ int i, bmapr; /*---------------------------------------------------------------------------- ; Function body here ----------------------------------------------------------------------------*/ if (nz_coefs <= 10) { bmapr = (nz_coefs - 1); (*(idctcolVCA[bmapr]))(coeff_in); (*(idctcolVCA[bmapr][1]))(coeff_in + 1); (*(idctcolVCA[bmapr][2]))(coeff_in + 2); (*(idctcolVCA[bmapr][3]))(coeff_in + 3); (*idctrowVCA[nz_coefs-1])(coeff_in, pred, dst, width); return ; } else { i = 8; while (i--) { bmapr = (int)bitmapcol[i]; if (bmapr) { if ((bmapr&0xf) == 0) /* 07/18/01 */ { (*(idctcolVCA2[bmapr>>4]))(coeff_in + i); } else { idctcol(coeff_in + i); } } } if ((bitmapcol[4] | bitmapcol[5] | bitmapcol[6] | bitmapcol[7]) == 0) { (*(idctrowVCA2[bitmaprow>>4]))(coeff_in, pred, dst, width); } else { idctrow(coeff_in, pred, dst, width); } return ; } #else // FAST_IDCT void idct(int *block, uint8 *pred, uint8 *dst, int width); idct(coeff_in, pred, dst, width); return; #endif // FAST_IDCT #else // INTEGER_IDCT void idctref(int *block, uint8 *pred, uint8 *dst, int width); idctref(coeff_in, pred, dst, width); return; #endif // INTEGER_IDCT } /*---------------------------------------------------------------------------- ; End Function: block_idct ----------------------------------------------------------------------------*/ /****************************************************************************/ /* ------------------------------------------------------------------------------ FUNCTION NAME: idctrow ------------------------------------------------------------------------------ INPUT AND OUTPUT DEFINITIONS FOR idctrow Inputs: [input_variable_name] = [description of the input to module, its type definition, and length (when applicable)] Local Stores/Buffers/Pointers Needed: [local_store_name] = [description of the local store, its type definition, and length (when applicable)] [local_buffer_name] = [description of the local buffer, its type definition, and length (when applicable)] [local_ptr_name] = [description of the local pointer, its type definition, and length (when applicable)] Global Stores/Buffers/Pointers Needed: [global_store_name] = [description of the global store, its type definition, and length (when applicable)] [global_buffer_name] = [description of the global buffer, its type definition, and length (when applicable)] [global_ptr_name] = [description of the global pointer, its type definition, and length (when applicable)] Outputs: [return_variable_name] = [description of data/pointer returned by module, its type definition, and length (when applicable)] Pointers and Buffers Modified: [variable_bfr_ptr] points to the [describe where the variable_bfr_ptr points to, its type definition, and length (when applicable)] [variable_bfr] contents are [describe the new contents of variable_bfr] Local Stores Modified: [local_store_name] = [describe new contents, its type definition, and length (when applicable)] Global Stores Modified: [global_store_name] = [describe new contents, its type definition, and length (when applicable)] ------------------------------------------------------------------------------ FUNCTION DESCRIPTION FOR idctrow ------------------------------------------------------------------------------ REQUIREMENTS FOR idctrow ------------------------------------------------------------------------------ REFERENCES FOR idctrow ------------------------------------------------------------------------------ PSEUDO-CODE FOR idctrow ------------------------------------------------------------------------------ RESOURCES USED FOR idctrow When the code is written for a specific target processor the the resources used should be documented below. STACK USAGE: [stack count for this module] + [variable to represent stack usage for each subroutine called] where: [stack usage variable] = stack usage for [subroutine name] (see [filename].ext) DATA MEMORY USED: x words PROGRAM MEMORY USED: x words CLOCK CYCLES: [cycle count equation for this module] + [variable used to represent cycle count for each subroutine called] where: [cycle count variable] = cycle count for [subroutine name] (see [filename].ext) ------------------------------------------------------------------------------ */ /*---------------------------------------------------------------------------- ; Function Code FOR idctrow ----------------------------------------------------------------------------*/ void idctrow( int16 *blk, uint8 *pred, uint8 *dst, int width ) { /*---------------------------------------------------------------------------- ; Define all local variables ----------------------------------------------------------------------------*/ int32 x0, x1, x2, x3, x4, x5, x6, x7, x8; int i = 8; uint32 pred_word, dst_word; int res, res2; /*---------------------------------------------------------------------------- ; Function body here ----------------------------------------------------------------------------*/ /* row (horizontal) IDCT * * 7 pi 1 dst[k] = sum c[l] * src[l] * cos( -- * * ( k + - ) * l ) l=0 8 2 * * where: c[0] = 128 c[1..7] = 128*sqrt(2) */ /* preset the offset, such that we can take advantage pre-offset addressing mode */ width -= 4; dst -= width; pred -= 12; blk -= 8; while (i--) { x1 = (int32)blk[12] << 8; blk[12] = 0; x2 = blk[14]; blk[14] = 0; x3 = blk[10]; blk[10] = 0; x4 = blk[9]; blk[9] = 0; x5 = blk[15]; blk[15] = 0; x6 = blk[13]; blk[13] = 0; x7 = blk[11]; blk[11] = 0; x0 = ((*(blk += 8)) << 8) + 8192; blk[0] = 0; /* for proper rounding in the fourth stage */ /* first stage */ x8 = W7 * (x4 + x5) + 4; x4 = (x8 + (W1 - W7) * x4) >> 3; x5 = (x8 - (W1 + W7) * x5) >> 3; x8 = W3 * (x6 + x7) + 4; x6 = (x8 - (W3 - W5) * x6) >> 3; x7 = (x8 - (W3 + W5) * x7) >> 3; /* second stage */ x8 = x0 + x1; x0 -= x1; x1 = W6 * (x3 + x2) + 4; x2 = (x1 - (W2 + W6) * x2) >> 3; x3 = (x1 + (W2 - W6) * x3) >> 3; x1 = x4 + x6; x4 -= x6; x6 = x5 + x7; x5 -= x7; /* third stage */ x7 = x8 + x3; x8 -= x3; x3 = x0 + x2; x0 -= x2; x2 = (181 * (x4 + x5) + 128) >> 8; x4 = (181 * (x4 - x5) + 128) >> 8; /* fourth stage */ pred_word = *((uint32*)(pred += 12)); /* read 4 bytes from pred */ res = (x7 + x1) >> 14; ADD_AND_CLIP1(res); res2 = (x3 + x2) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (x0 + x4) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (x8 + x6) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)(dst += width)) = dst_word; /* save 4 bytes to dst */ pred_word = *((uint32*)(pred += 4)); /* read 4 bytes from pred */ res = (x8 - x6) >> 14; ADD_AND_CLIP1(res); res2 = (x0 - x4) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (x3 - x2) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (x7 - x1) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)(dst += 4)) = dst_word; /* save 4 bytes to dst */ } /*---------------------------------------------------------------------------- ; Return nothing or data or data pointer ----------------------------------------------------------------------------*/ return; } void idctrow_intra( int16 *blk, PIXEL *comp, int width ) { /*---------------------------------------------------------------------------- ; Define all local variables ----------------------------------------------------------------------------*/ int32 x0, x1, x2, x3, x4, x5, x6, x7, x8, temp; int i = 8; int offset = width; int32 word; /*---------------------------------------------------------------------------- ; Function body here ----------------------------------------------------------------------------*/ /* row (horizontal) IDCT * * 7 pi 1 dst[k] = sum c[l] * src[l] * cos( -- * * ( k + - ) * l ) l=0 8 2 * * where: c[0] = 128 c[1..7] = 128*sqrt(2) */ while (i--) { x1 = (int32)blk[4] << 8; blk[4] = 0; x2 = blk[6]; blk[6] = 0; x3 = blk[2]; blk[2] = 0; x4 = blk[1]; blk[1] = 0; x5 = blk[7]; blk[7] = 0; x6 = blk[5]; blk[5] = 0; x7 = blk[3]; blk[3] = 0; #ifndef FAST_IDCT /* shortcut */ /* covered by idctrow1 01/9/2001 */ if (!(x1 | x2 | x3 | x4 | x5 | x6 | x7)) { blk[0] = blk[1] = blk[2] = blk[3] = blk[4] = blk[5] = blk[6] = blk[7] = (blk[0] + 32) >> 6; return; } #endif x0 = ((int32)blk[0] << 8) + 8192; blk[0] = 0; /* for proper rounding in the fourth stage */ /* first stage */ x8 = W7 * (x4 + x5) + 4; x4 = (x8 + (W1 - W7) * x4) >> 3; x5 = (x8 - (W1 + W7) * x5) >> 3; x8 = W3 * (x6 + x7) + 4; x6 = (x8 - (W3 - W5) * x6) >> 3; x7 = (x8 - (W3 + W5) * x7) >> 3; /* second stage */ x8 = x0 + x1; x0 -= x1; x1 = W6 * (x3 + x2) + 4; x2 = (x1 - (W2 + W6) * x2) >> 3; x3 = (x1 + (W2 - W6) * x3) >> 3; x1 = x4 + x6; x4 -= x6; x6 = x5 + x7; x5 -= x7; /* third stage */ x7 = x8 + x3; x8 -= x3; x3 = x0 + x2; x0 -= x2; x2 = (181 * (x4 + x5) + 128) >> 8; x4 = (181 * (x4 - x5) + 128) >> 8; /* fourth stage */ word = ((x7 + x1) >> 14); CLIP_RESULT(word) temp = ((x3 + x2) >> 14); CLIP_RESULT(temp) word = word | (temp << 8); temp = ((x0 + x4) >> 14); CLIP_RESULT(temp) word = word | (temp << 16); temp = ((x8 + x6) >> 14); CLIP_RESULT(temp) word = word | (temp << 24); *((int32*)(comp)) = word; word = ((x8 - x6) >> 14); CLIP_RESULT(word) temp = ((x0 - x4) >> 14); CLIP_RESULT(temp) word = word | (temp << 8); temp = ((x3 - x2) >> 14); CLIP_RESULT(temp) word = word | (temp << 16); temp = ((x7 - x1) >> 14); CLIP_RESULT(temp) word = word | (temp << 24); *((int32*)(comp + 4)) = word; comp += offset; blk += B_SIZE; } /*---------------------------------------------------------------------------- ; Return nothing or data or data pointer ----------------------------------------------------------------------------*/ return; } /*---------------------------------------------------------------------------- ; End Function: idctrow ----------------------------------------------------------------------------*/ /****************************************************************************/ /* ------------------------------------------------------------------------------ FUNCTION NAME: idctcol ------------------------------------------------------------------------------ INPUT AND OUTPUT DEFINITIONS FOR idctcol Inputs: [input_variable_name] = [description of the input to module, its type definition, and length (when applicable)] Local Stores/Buffers/Pointers Needed: [local_store_name] = [description of the local store, its type definition, and length (when applicable)] [local_buffer_name] = [description of the local buffer, its type definition, and length (when applicable)] [local_ptr_name] = [description of the local pointer, its type definition, and length (when applicable)] Global Stores/Buffers/Pointers Needed: [global_store_name] = [description of the global store, its type definition, and length (when applicable)] [global_buffer_name] = [description of the global buffer, its type definition, and length (when applicable)] [global_ptr_name] = [description of the global pointer, its type definition, and length (when applicable)] Outputs: [return_variable_name] = [description of data/pointer returned by module, its type definition, and length (when applicable)] Pointers and Buffers Modified: [variable_bfr_ptr] points to the [describe where the variable_bfr_ptr points to, its type definition, and length (when applicable)] [variable_bfr] contents are [describe the new contents of variable_bfr] Local Stores Modified: [local_store_name] = [describe new contents, its type definition, and length (when applicable)] Global Stores Modified: [global_store_name] = [describe new contents, its type definition, and length (when applicable)] ------------------------------------------------------------------------------ FUNCTION DESCRIPTION FOR idctcol ------------------------------------------------------------------------------ REQUIREMENTS FOR idctcol ------------------------------------------------------------------------------ REFERENCES FOR idctcol ------------------------------------------------------------------------------ PSEUDO-CODE FOR idctcol ------------------------------------------------------------------------------ RESOURCES USED FOR idctcol When the code is written for a specific target processor the the resources used should be documented below. STACK USAGE: [stack count for this module] + [variable to represent stack usage for each subroutine called] where: [stack usage variable] = stack usage for [subroutine name] (see [filename].ext) DATA MEMORY USED: x words PROGRAM MEMORY USED: x words CLOCK CYCLES: [cycle count equation for this module] + [variable used to represent cycle count for each subroutine called] where: [cycle count variable] = cycle count for [subroutine name] (see [filename].ext) ------------------------------------------------------------------------------ */ /*---------------------------------------------------------------------------- ; Function Code FOR idctcol ----------------------------------------------------------------------------*/ void idctcol( int16 *blk ) { /*---------------------------------------------------------------------------- ; Define all local variables ----------------------------------------------------------------------------*/ int32 x0, x1, x2, x3, x4, x5, x6, x7, x8; /*---------------------------------------------------------------------------- ; Function body here ----------------------------------------------------------------------------*/ /* column (vertical) IDCT * * 7 pi 1 dst[8*k] = sum c[l] * src[8*l] * * cos( -- * ( k + - ) * l ) l=0 8 2 * * where: c[0] = 1/1024 c[1..7] = (1/1024)*sqrt(2) */ x1 = (int32)blk[32] << 11; x2 = blk[48]; x3 = blk[16]; x4 = blk[8]; x5 = blk[56]; x6 = blk[40]; x7 = blk[24]; #ifndef FAST_IDCT /* shortcut */ /* covered by idctcolumn1 01/9/2001 */ if (!(x1 | x2 | x3 | x4 | x5 | x6 | x7)) { blk[0] = blk[8] = blk[16] = blk[24] = blk[32] = blk[40] = blk[48] = blk[56] = blk[0] << 3; return; } #endif x0 = ((int32)blk[0] << 11) + 128; /* first stage */ x8 = W7 * (x4 + x5); x4 = x8 + (W1 - W7) * x4; x5 = x8 - (W1 + W7) * x5; x8 = W3 * (x6 + x7); x6 = x8 - (W3 - W5) * x6; x7 = x8 - (W3 + W5) * x7; /* second stage */ x8 = x0 + x1; x0 -= x1; x1 = W6 * (x3 + x2); x2 = x1 - (W2 + W6) * x2; x3 = x1 + (W2 - W6) * x3; x1 = x4 + x6; x4 -= x6; x6 = x5 + x7; x5 -= x7; /* third stage */ x7 = x8 + x3; x8 -= x3; x3 = x0 + x2; x0 -= x2; x2 = (181 * (x4 + x5) + 128) >> 8; x4 = (181 * (x4 - x5) + 128) >> 8; /* fourth stage */ blk[0] = (x7 + x1) >> 8; blk[8] = (x3 + x2) >> 8; blk[16] = (x0 + x4) >> 8; blk[24] = (x8 + x6) >> 8; blk[32] = (x8 - x6) >> 8; blk[40] = (x0 - x4) >> 8; blk[48] = (x3 - x2) >> 8; blk[56] = (x7 - x1) >> 8; /*---------------------------------------------------------------------------- ; Return nothing or data or data pointer ----------------------------------------------------------------------------*/ return; } /*---------------------------------------------------------------------------- ; End Function: idctcol ----------------------------------------------------------------------------*/ ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/cal_dc_scaler.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /* ------------------------------------------------------------------------------ INPUT AND OUTPUT DEFINITIONS Inputs: [input_variable_name] = [description of the input to module, its type definition, and length (when applicable)] Local Stores/Buffers/Pointers Needed: [local_store_name] = [description of the local store, its type definition, and length (when applicable)] [local_buffer_name] = [description of the local buffer, its type definition, and length (when applicable)] [local_ptr_name] = [description of the local pointer, its type definition, and length (when applicable)] Global Stores/Buffers/Pointers Needed: [global_store_name] = [description of the global store, its type definition, and length (when applicable)] [global_buffer_name] = [description of the global buffer, its type definition, and length (when applicable)] [global_ptr_name] = [description of the global pointer, its type definition, and length (when applicable)] Outputs: [return_variable_name] = [description of data/pointer returned by module, its type definition, and length (when applicable)] Pointers and Buffers Modified: [variable_bfr_ptr] points to the [describe where the variable_bfr_ptr points to, its type definition, and length (when applicable)] [variable_bfr] contents are [describe the new contents of variable_bfr] Local Stores Modified: [local_store_name] = [describe new contents, its type definition, and length (when applicable)] Global Stores Modified: [global_store_name] = [describe new contents, its type definition, and length (when applicable)] ------------------------------------------------------------------------------ FUNCTION DESCRIPTION This module calculates the DC quantization scale according to the incoming Q and type. ------------------------------------------------------------------------------ REQUIREMENTS [List requirements to be satisfied by this module.] ------------------------------------------------------------------------------ REFERENCES [List all references used in designing this module.] ------------------------------------------------------------------------------ PSEUDO-CODE ------------------------------------------------------------------------------ RESOURCES USED When the code is written for a specific target processor the the resources used should be documented below. STACK USAGE: [stack count for this module] + [variable to represent stack usage for each subroutine called] where: [stack usage variable] = stack usage for [subroutine name] (see [filename].ext) DATA MEMORY USED: x words PROGRAM MEMORY USED: x words CLOCK CYCLES: [cycle count equation for this module] + [variable used to represent cycle count for each subroutine called] where: [cycle count variable] = cycle count for [subroutine name] (see [filename].ext) ------------------------------------------------------------------------------ */ /*---------------------------------------------------------------------------- ; INCLUDES ----------------------------------------------------------------------------*/ #include "mp4dec_lib.h" #include "vlc_decode.h" #include "bitstream.h" #include "zigzag.h" /*---------------------------------------------------------------------------- ; MACROS ; Define module specific macros here ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; DEFINES ; Include all pre-processor statements here. Include conditional ; compile variables also. ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; LOCAL FUNCTION DEFINITIONS ; Function Prototype declaration ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; LOCAL STORE/BUFFER/POINTER DEFINITIONS ; Variable declaration - defined here and used outside this module ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; EXTERNAL FUNCTION REFERENCES ; Declare functions defined elsewhere and referenced in this module ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; EXTERNAL GLOBAL STORE/BUFFER/POINTER REFERENCES ; Declare variables used in this module but defined elsewhere ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; FUNCTION CODE ----------------------------------------------------------------------------*/ int cal_dc_scaler( int QP, int type) { /*---------------------------------------------------------------------------- ; Define all local variables ----------------------------------------------------------------------------*/ int dc_scaler; /*---------------------------------------------------------------------------- ; Function body here ----------------------------------------------------------------------------*/ if (type == LUMINANCE_DC_TYPE) { if (QP > 0 && QP < 5) dc_scaler = 8; else if (QP > 4 && QP < 9) dc_scaler = 2 * QP; else if (QP > 8 && QP < 25) dc_scaler = QP + 8; else dc_scaler = 2 * QP - 16; } else /* if (type == CHROMINANCE_DC_TYPE), there is no other types. */ { if (QP > 0 && QP < 5) dc_scaler = 8; else if (QP > 4 && QP < 25) dc_scaler = (QP + 13) >> 1; else dc_scaler = QP - 6; } /*---------------------------------------------------------------------------- ; Return nothing or data or data pointer ----------------------------------------------------------------------------*/ return dc_scaler; } ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/chv_filter.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /* ------------------------------------------------------------------------------ INPUT AND OUTPUT DEFINITIONS Inputs: [input_variable_name] = [description of the input to module, its type definition, and length (when applicable)] Local Stores/Buffers/Pointers Needed: [local_store_name] = [description of the local store, its type definition, and length (when applicable)] [local_buffer_name] = [description of the local buffer, its type definition, and length (when applicable)] [local_ptr_name] = [description of the local pointer, its type definition, and length (when applicable)] Global Stores/Buffers/Pointers Needed: [global_store_name] = [description of the global store, its type definition, and length (when applicable)] [global_buffer_name] = [description of the global buffer, its type definition, and length (when applicable)] [global_ptr_name] = [description of the global pointer, its type definition, and length (when applicable)] Outputs: [return_variable_name] = [description of data/pointer returned by module, its type definition, and length (when applicable)] Pointers and Buffers Modified: [variable_bfr_ptr] points to the [describe where the variable_bfr_ptr points to, its type definition, and length (when applicable)] [variable_bfr] contents are [describe the new contents of variable_bfr] Local Stores Modified: [local_store_name] = [describe new contents, its type definition, and length (when applicable)] Global Stores Modified: [global_store_name] = [describe new contents, its type definition, and length (when applicable)] ------------------------------------------------------------------------------ FUNCTION DESCRIPTION For fast Deblock filtering Newer version (macroblock based processing) ------------------------------------------------------------------------------ REQUIREMENTS [List requirements to be satisfied by this module.] ------------------------------------------------------------------------------ REFERENCES [List all references used in designing this module.] ------------------------------------------------------------------------------ PSEUDO-CODE ------------------------------------------------------------------------------ RESOURCES USED When the code is written for a specific target processor the the resources used should be documented below. STACK USAGE: [stack count for this module] + [variable to represent stack usage for each subroutine called] where: [stack usage variable] = stack usage for [subroutine name] (see [filename].ext) DATA MEMORY USED: x words PROGRAM MEMORY USED: x words CLOCK CYCLES: [cycle count equation for this module] + [variable used to represent cycle count for each subroutine called] where: [cycle count variable] = cycle count for [subroutine name] (see [filename].ext) ------------------------------------------------------------------------------ */ /*---------------------------------------------------------------------------- ; INCLUDES ----------------------------------------------------------------------------*/ #include "mp4dec_lib.h" #include "post_proc.h" #define OSCL_DISABLE_WARNING_CONV_POSSIBLE_LOSS_OF_DATA #include "osclconfig_compiler_warnings.h" /*---------------------------------------------------------------------------- ; MACROS ; Define module specific macros here ----------------------------------------------------------------------------*/ //#define FILTER_LEN_8 /*---------------------------------------------------------------------------- ; DEFINES ; Include all pre-processor statements here. Include conditional ; compile variables also. ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; LOCAL FUNCTION DEFINITIONS ; Function Prototype declaration ---------------------------------------------------------------------------- ; LOCAL STORE/BUFFER/POINTER DEFINITIONS ; Variable declaration - defined here and used outside this module ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; EXTERNAL FUNCTION REFERENCES ; Declare functions defined elsewhere and referenced in this module ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; EXTERNAL GLOBAL STORE/BUFFER/POINTER REFERENCES ; Declare variables used in this module but defined elsewhere ----------------------------------------------------------------------------*/ #ifdef PV_POSTPROC_ON /************************************************************************* Function prototype : void CombinedHorzVertFilter( uint8 *rec, int width, int height, int *QP_store, int chr, uint8 *pp_mod) Parameters : rec : pointer to the decoded frame buffer. width : width of decoded frame. height : height of decoded frame QP_store: pointer to the array of QP corresponding to the decoded frame. It had only one value for each MB. chr : luma or color indication == 0 luma == 1 color pp_mod : The semphore used for deblocking Remark : The function do the deblocking on decoded frames. First based on the semaphore info., it is divided into hard and soft filtering. To differentiate real and fake edge, it then check the difference with QP to decide whether to do the filtering or not. *************************************************************************/ /*---------------------------------------------------------------------------- ; FUNCTION CODE ----------------------------------------------------------------------------*/ void CombinedHorzVertFilter( uint8 *rec, int width, int height, int16 *QP_store, int chr, uint8 *pp_mod) { /*---------------------------------------------------------------------------- ; Define all local variables ----------------------------------------------------------------------------*/ int br, bc, mbr, mbc; int QP = 1; uint8 *ptr, *ptr_e; int pp_w, pp_h; int brwidth; int jVal0, jVal1, jVal2; /*---------------------------------------------------------------------------- ; Function body here ----------------------------------------------------------------------------*/ pp_w = (width >> 3); pp_h = (height >> 3); for (mbr = 0; mbr < pp_h; mbr += 2) /* row of blocks */ { brwidth = mbr * pp_w; /* number of blocks above current block row */ for (mbc = 0; mbc < pp_w; mbc += 2) /* col of blocks */ { if (!chr) QP = QP_store[(brwidth>>2) + (mbc>>1)]; /* QP is per MB based value */ /********* for each block **************/ /****************** Horiz. Filtering ********************/ for (br = mbr + 1; br < mbr + 3; br++) /* 2x2 blocks */ { brwidth += pp_w; /* number of blocks above & left current block row */ /* the profile on ARM920T shows separate these two boundary check is faster than combine them */ if (br < pp_h) /* boundary : don't do it on the lowest row block */ for (bc = mbc; bc < mbc + 2; bc++) { /****** check boundary for deblocking ************/ if (bc < pp_w) /* boundary : don't do it on the most right col block */ { ptr = rec + (brwidth << 6) + (bc << 3); jVal0 = brwidth + bc; if (chr) QP = QP_store[jVal0]; ptr_e = ptr + 8; /* pointer to where the loop ends */ if (((pp_mod[jVal0]&0x02)) && ((pp_mod[jVal0-pp_w]&0x02))) { /* Horiz Hard filter */ do { jVal0 = *(ptr - width); /* C */ jVal1 = *ptr; /* D */ jVal2 = jVal1 - jVal0; if (((jVal2 > 0) && (jVal2 < (QP << 1))) || ((jVal2 < 0) && (jVal2 > -(QP << 1)))) /* (D-C) compared with 2QP */ { /* differentiate between real and fake edge */ jVal0 = ((jVal0 + jVal1) >> 1); /* (D+C)/2 */ *(ptr - width) = (uint8)(jVal0); /* C */ *ptr = (uint8)(jVal0); /* D */ jVal0 = *(ptr - (width << 1)); /* B */ jVal1 = *(ptr + width); /* E */ jVal2 = jVal1 - jVal0; /* E-B */ if (jVal2 > 0) { jVal0 += ((jVal2 + 3) >> 2); jVal1 -= ((jVal2 + 3) >> 2); *(ptr - (width << 1)) = (uint8)jVal0; /* store B */ *(ptr + width) = (uint8)jVal1; /* store E */ } else if (jVal2) { jVal0 -= ((3 - jVal2) >> 2); jVal1 += ((3 - jVal2) >> 2); *(ptr - (width << 1)) = (uint8)jVal0; /* store B */ *(ptr + width) = (uint8)jVal1; /* store E */ } jVal0 = *(ptr - (width << 1) - width); /* A */ jVal1 = *(ptr + (width << 1)); /* F */ jVal2 = jVal1 - jVal0; /* (F-A) */ if (jVal2 > 0) { jVal0 += ((jVal2 + 7) >> 3); jVal1 -= ((jVal2 + 7) >> 3); *(ptr - (width << 1) - width) = (uint8)(jVal0); *(ptr + (width << 1)) = (uint8)(jVal1); } else if (jVal2) { jVal0 -= ((7 - jVal2) >> 3); jVal1 += ((7 - jVal2) >> 3); *(ptr - (width << 1) - width) = (uint8)(jVal0); *(ptr + (width << 1)) = (uint8)(jVal1); } }/* a3_0 > 2QP */ } while (++ptr < ptr_e); } else /* Horiz soft filter*/ { do { jVal0 = *(ptr - width); /* B */ jVal1 = *ptr; /* C */ jVal2 = jVal1 - jVal0; /* C-B */ if (((jVal2 > 0) && (jVal2 < (QP))) || ((jVal2 < 0) && (jVal2 > -(QP)))) /* (C-B) compared with QP */ { jVal0 = ((jVal0 + jVal1) >> 1); /* (B+C)/2 cannot overflow; ceil() */ *(ptr - width) = (uint8)(jVal0); /* B = (B+C)/2 */ *ptr = (uint8)jVal0; /* C = (B+C)/2 */ jVal0 = *(ptr - (width << 1)); /* A */ jVal1 = *(ptr + width); /* D */ jVal2 = jVal1 - jVal0; /* D-A */ if (jVal2 > 0) { jVal1 -= ((jVal2 + 7) >> 3); jVal0 += ((jVal2 + 7) >> 3); *(ptr - (width << 1)) = (uint8)jVal0; /* A */ *(ptr + width) = (uint8)jVal1; /* D */ } else if (jVal2) { jVal1 += ((7 - jVal2) >> 3); jVal0 -= ((7 - jVal2) >> 3); *(ptr - (width << 1)) = (uint8)jVal0; /* A */ *(ptr + width) = (uint8)jVal1; /* D */ } } } while (++ptr < ptr_e); } /* Soft filter*/ }/* boundary checking*/ }/*bc*/ }/*br*/ brwidth -= (pp_w << 1); /****************** Vert. Filtering ********************/ for (br = mbr; br < mbr + 2; br++) { if (br < pp_h) for (bc = mbc + 1; bc < mbc + 3; bc++) { /****** check boundary for deblocking ************/ if (bc < pp_w) { ptr = rec + (brwidth << 6) + (bc << 3); jVal0 = brwidth + bc; if (chr) QP = QP_store[jVal0]; ptr_e = ptr + (width << 3); if (((pp_mod[jVal0-1]&0x01)) && ((pp_mod[jVal0]&0x01))) { /* Vert Hard filter */ do { jVal1 = *ptr; /* D */ jVal0 = *(ptr - 1); /* C */ jVal2 = jVal1 - jVal0; /* D-C */ if (((jVal2 > 0) && (jVal2 < (QP << 1))) || ((jVal2 < 0) && (jVal2 > -(QP << 1)))) { jVal1 = (jVal0 + jVal1) >> 1; /* (C+D)/2 */ *ptr = jVal1; *(ptr - 1) = jVal1; jVal1 = *(ptr + 1); /* E */ jVal0 = *(ptr - 2); /* B */ jVal2 = jVal1 - jVal0; /* E-B */ if (jVal2 > 0) { jVal1 -= ((jVal2 + 3) >> 2); /* E = E -(E-B)/4 */ jVal0 += ((jVal2 + 3) >> 2); /* B = B +(E-B)/4 */ *(ptr + 1) = jVal1; *(ptr - 2) = jVal0; } else if (jVal2) { jVal1 += ((3 - jVal2) >> 2); /* E = E -(E-B)/4 */ jVal0 -= ((3 - jVal2) >> 2); /* B = B +(E-B)/4 */ *(ptr + 1) = jVal1; *(ptr - 2) = jVal0; } jVal1 = *(ptr + 2); /* F */ jVal0 = *(ptr - 3); /* A */ jVal2 = jVal1 - jVal0; /* (F-A) */ if (jVal2 > 0) { jVal1 -= ((jVal2 + 7) >> 3); /* F -= (F-A)/8 */ jVal0 += ((jVal2 + 7) >> 3); /* A += (F-A)/8 */ *(ptr + 2) = jVal1; *(ptr - 3) = jVal0; } else if (jVal2) { jVal1 -= ((jVal2 - 7) >> 3); /* F -= (F-A)/8 */ jVal0 += ((jVal2 - 7) >> 3); /* A += (F-A)/8 */ *(ptr + 2) = jVal1; *(ptr - 3) = jVal0; } } /* end of ver hard filetering */ } while ((ptr += width) < ptr_e); } else /* Vert soft filter*/ { do { jVal1 = *ptr; /* C */ jVal0 = *(ptr - 1); /* B */ jVal2 = jVal1 - jVal0; if (((jVal2 > 0) && (jVal2 < (QP))) || ((jVal2 < 0) && (jVal2 > -(QP)))) { jVal1 = (jVal0 + jVal1 + 1) >> 1; *ptr = jVal1; /* C */ *(ptr - 1) = jVal1; /* B */ jVal1 = *(ptr + 1); /* D */ jVal0 = *(ptr - 2); /* A */ jVal2 = (jVal1 - jVal0); /* D- A */ if (jVal2 > 0) { jVal1 -= (((jVal2) + 7) >> 3); /* D -= (D-A)/8 */ jVal0 += (((jVal2) + 7) >> 3); /* A += (D-A)/8 */ *(ptr + 1) = jVal1; *(ptr - 2) = jVal0; } else if (jVal2) { jVal1 += ((7 - (jVal2)) >> 3); /* D -= (D-A)/8 */ jVal0 -= ((7 - (jVal2)) >> 3); /* A += (D-A)/8 */ *(ptr + 1) = jVal1; *(ptr - 2) = jVal0; } } } while ((ptr += width) < ptr_e); } /* Soft filter*/ } /* boundary*/ } /*bc*/ brwidth += pp_w; }/*br*/ brwidth -= (pp_w << 1); }/*mbc*/ brwidth += (pp_w << 1); }/*mbr*/ /*---------------------------------------------------------------------------- ; Return nothing or data or data pointer ----------------------------------------------------------------------------*/ return; } void CombinedHorzVertFilter_NoSoftDeblocking( uint8 *rec, int width, int height, int16 *QP_store, int chr, uint8 *pp_mod) { /*---------------------------------------------------------------------------- ; Define all local variables ----------------------------------------------------------------------------*/ int br, bc, mbr, mbc; int QP = 1; uint8 *ptr, *ptr_e; int pp_w, pp_h; int brwidth; int jVal0, jVal1, jVal2; /*---------------------------------------------------------------------------- ; Function body here ----------------------------------------------------------------------------*/ pp_w = (width >> 3); pp_h = (height >> 3); for (mbr = 0; mbr < pp_h; mbr += 2) /* row of blocks */ { brwidth = mbr * pp_w; /* number of blocks above current block row */ for (mbc = 0; mbc < pp_w; mbc += 2) /* col of blocks */ { if (!chr) QP = QP_store[(brwidth>>2) + (mbc>>1)]; /* QP is per MB based value */ /********* for each block **************/ /****************** Horiz. Filtering ********************/ for (br = mbr + 1; br < mbr + 3; br++) /* 2x2 blocks */ { brwidth += pp_w; /* number of blocks above & left current block row */ /* the profile on ARM920T shows separate these two boundary check is faster than combine them */ if (br < pp_h) /* boundary : don't do it on the lowest row block */ for (bc = mbc; bc < mbc + 2; bc++) { /****** check boundary for deblocking ************/ if (bc < pp_w) /* boundary : don't do it on the most right col block */ { ptr = rec + (brwidth << 6) + (bc << 3); jVal0 = brwidth + bc; if (chr) QP = QP_store[jVal0]; ptr_e = ptr + 8; /* pointer to where the loop ends */ if (((pp_mod[jVal0]&0x02)) && ((pp_mod[jVal0-pp_w]&0x02))) { /* Horiz Hard filter */ do { jVal0 = *(ptr - width); /* C */ jVal1 = *ptr; /* D */ jVal2 = jVal1 - jVal0; if (((jVal2 > 0) && (jVal2 < (QP << 1))) || ((jVal2 < 0) && (jVal2 > -(QP << 1)))) /* (D-C) compared with 2QP */ { /* differentiate between real and fake edge */ jVal0 = ((jVal0 + jVal1) >> 1); /* (D+C)/2 */ *(ptr - width) = (uint8)(jVal0); /* C */ *ptr = (uint8)(jVal0); /* D */ jVal0 = *(ptr - (width << 1)); /* B */ jVal1 = *(ptr + width); /* E */ jVal2 = jVal1 - jVal0; /* E-B */ if (jVal2 > 0) { jVal0 += ((jVal2 + 3) >> 2); jVal1 -= ((jVal2 + 3) >> 2); *(ptr - (width << 1)) = (uint8)jVal0; /* store B */ *(ptr + width) = (uint8)jVal1; /* store E */ } else if (jVal2) { jVal0 -= ((3 - jVal2) >> 2); jVal1 += ((3 - jVal2) >> 2); *(ptr - (width << 1)) = (uint8)jVal0; /* store B */ *(ptr + width) = (uint8)jVal1; /* store E */ } jVal0 = *(ptr - (width << 1) - width); /* A */ jVal1 = *(ptr + (width << 1)); /* F */ jVal2 = jVal1 - jVal0; /* (F-A) */ if (jVal2 > 0) { jVal0 += ((jVal2 + 7) >> 3); jVal1 -= ((jVal2 + 7) >> 3); *(ptr - (width << 1) - width) = (uint8)(jVal0); *(ptr + (width << 1)) = (uint8)(jVal1); } else if (jVal2) { jVal0 -= ((7 - jVal2) >> 3); jVal1 += ((7 - jVal2) >> 3); *(ptr - (width << 1) - width) = (uint8)(jVal0); *(ptr + (width << 1)) = (uint8)(jVal1); } }/* a3_0 > 2QP */ } while (++ptr < ptr_e); } }/* boundary checking*/ }/*bc*/ }/*br*/ brwidth -= (pp_w << 1); /****************** Vert. Filtering ********************/ for (br = mbr; br < mbr + 2; br++) { if (br < pp_h) for (bc = mbc + 1; bc < mbc + 3; bc++) { /****** check boundary for deblocking ************/ if (bc < pp_w) { ptr = rec + (brwidth << 6) + (bc << 3); jVal0 = brwidth + bc; if (chr) QP = QP_store[jVal0]; ptr_e = ptr + (width << 3); if (((pp_mod[jVal0-1]&0x01)) && ((pp_mod[jVal0]&0x01))) { /* Vert Hard filter */ do { jVal1 = *ptr; /* D */ jVal0 = *(ptr - 1); /* C */ jVal2 = jVal1 - jVal0; /* D-C */ if (((jVal2 > 0) && (jVal2 < (QP << 1))) || ((jVal2 < 0) && (jVal2 > -(QP << 1)))) { jVal1 = (jVal0 + jVal1) >> 1; /* (C+D)/2 */ *ptr = jVal1; *(ptr - 1) = jVal1; jVal1 = *(ptr + 1); /* E */ jVal0 = *(ptr - 2); /* B */ jVal2 = jVal1 - jVal0; /* E-B */ if (jVal2 > 0) { jVal1 -= ((jVal2 + 3) >> 2); /* E = E -(E-B)/4 */ jVal0 += ((jVal2 + 3) >> 2); /* B = B +(E-B)/4 */ *(ptr + 1) = jVal1; *(ptr - 2) = jVal0; } else if (jVal2) { jVal1 += ((3 - jVal2) >> 2); /* E = E -(E-B)/4 */ jVal0 -= ((3 - jVal2) >> 2); /* B = B +(E-B)/4 */ *(ptr + 1) = jVal1; *(ptr - 2) = jVal0; } jVal1 = *(ptr + 2); /* F */ jVal0 = *(ptr - 3); /* A */ jVal2 = jVal1 - jVal0; /* (F-A) */ if (jVal2 > 0) { jVal1 -= ((jVal2 + 7) >> 3); /* F -= (F-A)/8 */ jVal0 += ((jVal2 + 7) >> 3); /* A += (F-A)/8 */ *(ptr + 2) = jVal1; *(ptr - 3) = jVal0; } else if (jVal2) { jVal1 -= ((jVal2 - 7) >> 3); /* F -= (F-A)/8 */ jVal0 += ((jVal2 - 7) >> 3); /* A += (F-A)/8 */ *(ptr + 2) = jVal1; *(ptr - 3) = jVal0; } } /* end of ver hard filetering */ } while ((ptr += width) < ptr_e); } } /* boundary*/ } /*bc*/ brwidth += pp_w; }/*br*/ brwidth -= (pp_w << 1); }/*mbc*/ brwidth += (pp_w << 1); }/*mbr*/ /*---------------------------------------------------------------------------- ; Return nothing or data or data pointer ----------------------------------------------------------------------------*/ return; } #endif ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/chvr_filter.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "mp4dec_lib.h" #include "post_proc.h" #ifdef PV_POSTPROC_ON void CombinedHorzVertRingFilter( uint8 *rec, int width, int height, int16 *QP_store, int chr, uint8 *pp_mod) { /*---------------------------------------------------------------------------- ; Define all local variables ----------------------------------------------------------------------------*/ int index, counter; int br, bc, incr, mbr, mbc; int QP = 1; int v[5]; uint8 *ptr, *ptr_c, *ptr_n; int w1, w2, w3, w4; int pp_w, pp_h, brwidth; int sum, delta; int a3_0, a3_1, a3_2, A3_0; /* for Deringing Threshold approach (MPEG4)*/ int max_diff, thres, v0, h0, min_blk, max_blk; int cnthflag; /*---------------------------------------------------------------------------- ; Function body here ----------------------------------------------------------------------------*/ /* Calculate the width and height of the area in blocks (divide by 8) */ pp_w = (width >> 3); pp_h = (height >> 3); /* Set up various values needed for updating pointers into rec */ w1 = width; /* Offset to next row in pixels */ w2 = width << 1; /* Offset to two rows in pixels */ w3 = w1 + w2; /* Offset to three rows in pixels */ w4 = w2 << 1; /* Offset to four rows in pixels */ incr = width - BLKSIZE; /* Offset to next row after processing block */ /* Work through the area hortizontally by two rows per step */ for (mbr = 0; mbr < pp_h; mbr += 2) { /* brwidth contains the block number of the leftmost block * of the current row */ brwidth = mbr * pp_w; /* Work through the area vertically by two columns per step */ for (mbc = 0; mbc < pp_w; mbc += 2) { /* if the data is luminance info, get the correct * quantization paramenter. One parameter per macroblock */ if (!chr) { /* brwidth/4 is the macroblock number and mbc/2 is the macroblock col number*/ QP = QP_store[(brwidth>>2) + (mbc>>1)]; } /****************** Horiz. Filtering ********************/ /* Process four blocks for the filtering */ /********************************************************/ /* Loop over two rows of blocks */ for (br = mbr + 1; br < mbr + 3; br++) /* br is the row counter in blocks */ { /* Set brwidth to the first (leftmost) block number of the next row */ /* brwidth is used as an index when counting blocks */ brwidth += pp_w; /* Loop over two columns of blocks in the row */ for (bc = mbc; bc < mbc + 2; bc++) /* bc is the column counter in blocks */ { /****** check boundary for deblocking ************/ /* Execute if the row and column counters are within the area */ if (br < pp_h && bc < pp_w) { /* Set the ptr to the first pixel of the first block of the second row * brwidth * 64 is the pixel row offset * bc * 8 is the pixel column offset */ ptr = rec + (brwidth << 6) + (bc << 3); /* Set the index to the current block of the second row counting in blocks */ index = brwidth + bc; /* if the data is chrominance info, get the correct * quantization paramenter. One parameter per block. */ if (chr) { QP = QP_store[index]; } /* Execute hard horizontal filter if semaphore for horizontal deblocking * is set for the current block and block immediately above it */ if (((pp_mod[index]&0x02) != 0) && ((pp_mod[index-pp_w]&0x02) != 0)) { /* Hard filter */ /* Set HorzHflag (bit 4) in the pp_mod location */ pp_mod[index-pp_w] |= 0x10; /* 4/26/00 reuse pp_mod for HorzHflag*/ /* Filter across the 8 pixels of the block */ for (index = BLKSIZE; index > 0; index--) { /* Difference between the current pixel and the pixel above it */ a3_0 = *ptr - *(ptr - w1); /* if the magnitude of the difference is greater than the KThH threshold * and within the quantization parameter, apply hard filter */ if ((a3_0 > KThH || a3_0 < -KThH) && a3_0 -QP) { ptr_c = ptr - w3; /* Points to pixel three rows above */ ptr_n = ptr + w1; /* Points to pixel one row below */ v[0] = (int)(*(ptr_c - w3)); v[1] = (int)(*(ptr_c - w2)); v[2] = (int)(*(ptr_c - w1)); v[3] = (int)(*ptr_c); v[4] = (int)(*(ptr_c + w1)); sum = v[0] + v[1] + v[2] + *ptr_c + v[4] + (*(ptr_c + w2)) + (*(ptr_c + w3)); /* Current pixel */ delta = (sum + *ptr_c + 4) >> 3; /* Average pixel values with rounding */ *(ptr_c) = (uint8) delta; /* Move pointer down one row of pixels (points to pixel two rows * above current pixel) */ ptr_c += w1; for (counter = 0; counter < 5; counter++) { /* Subtract off highest pixel and add in pixel below */ sum = sum - v[counter] + *ptr_n; /* Average the pixel values with rounding */ delta = (sum + *ptr_c + 4) >> 3; *ptr_c = (uint8)(delta); /* Increment pointers to next pixel row */ ptr_c += w1; ptr_n += w1; } } /* Increment pointer to next pixel */ ++ptr; } /* index*/ } else { /* soft filter*/ /* Clear HorzHflag (bit 4) in the pp_mod location */ pp_mod[index-pp_w] &= 0xef; /* reset 1110,1111 */ for (index = BLKSIZE; index > 0; index--) { /* Difference between the current pixel and the pixel above it */ a3_0 = *(ptr) - *(ptr - w1); /* if the magnitude of the difference is greater than the KTh threshold, * apply soft filter */ if ((a3_0 > KTh || a3_0 < -KTh)) { /* Sum of weighted differences */ a3_0 += ((*(ptr - w2) - *(ptr + w1)) << 1) + (a3_0 << 2); /* Check if sum is less than the quantization parameter */ if (PV_ABS(a3_0) < (QP << 3)) { a3_1 = *(ptr - w2) - *(ptr - w3); a3_1 += ((*(ptr - w4) - *(ptr - w1)) << 1) + (a3_1 << 2); a3_2 = *(ptr + w2) - *(ptr + w1); a3_2 += ((*(ptr) - *(ptr + w3)) << 1) + (a3_2 << 2); A3_0 = PV_ABS(a3_0) - PV_MIN(PV_ABS(a3_1), PV_ABS(a3_2)); if (A3_0 > 0) { A3_0 += A3_0 << 2; A3_0 = (A3_0 + 32) >> 6; if (a3_0 > 0) { A3_0 = -A3_0; } delta = (*(ptr - w1) - *(ptr)) >> 1; if (delta >= 0) { if (delta >= A3_0) { delta = PV_MAX(A3_0, 0); } } else { if (A3_0 > 0) { delta = 0; } else { delta = PV_MAX(A3_0, delta); } } *(ptr - w1) = (uint8)(*(ptr - w1) - delta); *(ptr) = (uint8)(*(ptr) + delta); } } /*threshold*/ } /* Increment pointer to next pixel */ ++ptr; } /*index*/ } /* Soft filter*/ }/* boundary checking*/ }/*bc*/ }/*br*/ brwidth -= (pp_w << 1); /****************** Vert. Filtering *********************/ /* Process four blocks for the filtering */ /********************************************************/ /* Loop over two rows of blocks */ for (br = mbr; br < mbr + 2; br++) /* br is the row counter in blocks */ { for (bc = mbc + 1; bc < mbc + 3; bc++) /* bc is the column counter in blocks */ { /****** check boundary for deblocking ************/ /* Execute if the row and column counters are within the area */ if (br < pp_h && bc < pp_w) { /* Set the ptr to the first pixel of the first block of the second row * brwidth * 64 is the pixel row offset * bc * 8 is the pixel column offset */ ptr = rec + (brwidth << 6) + (bc << 3); /* Set the index to the current block of the second row counting in blocks */ index = brwidth + bc; /* if the data is chrominance info, get the correct * quantization paramenter. One parameter per block. */ if (chr) { QP = QP_store[index]; } /* Execute hard vertical filter if semaphore for vertical deblocking * is set for the current block and block immediately left of it */ if (((pp_mod[index-1]&0x01) != 0) && ((pp_mod[index]&0x01) != 0)) { /* Hard filter */ /* Set VertHflag (bit 5) in the pp_mod location of previous block*/ pp_mod[index-1] |= 0x20; /* 4/26/00 reuse pp_mod for VertHflag*/ /* Filter across the 8 pixels of the block */ for (index = BLKSIZE; index > 0; index--) { /* Difference between the current pixel * and the pixel to left of it */ a3_0 = *ptr - *(ptr - 1); /* if the magnitude of the difference is greater than the KThH threshold * and within the quantization parameter, apply hard filter */ if ((a3_0 > KThH || a3_0 < -KThH) && a3_0 -QP) { ptr_c = ptr - 3; ptr_n = ptr + 1; v[0] = (int)(*(ptr_c - 3)); v[1] = (int)(*(ptr_c - 2)); v[2] = (int)(*(ptr_c - 1)); v[3] = (int)(*ptr_c); v[4] = (int)(*(ptr_c + 1)); sum = v[0] + v[1] + v[2] + *ptr_c + v[4] + (*(ptr_c + 2)) + (*(ptr_c + 3)); delta = (sum + *ptr_c + 4) >> 3; *(ptr_c) = (uint8) delta; /* Move pointer down one pixel to the right */ ptr_c += 1; for (counter = 0; counter < 5; counter++) { /* Subtract off highest pixel and add in pixel below */ sum = sum - v[counter] + *ptr_n; /* Average the pixel values with rounding */ delta = (sum + *ptr_c + 4) >> 3; *ptr_c = (uint8)(delta); /* Increment pointers to next pixel */ ptr_c += 1; ptr_n += 1; } } /* Increment pointers to next pixel row */ ptr += w1; } /* index*/ } else { /* soft filter*/ /* Clear VertHflag (bit 5) in the pp_mod location */ pp_mod[index-1] &= 0xdf; /* reset 1101,1111 */ for (index = BLKSIZE; index > 0; index--) { /* Difference between the current pixel and the pixel above it */ a3_0 = *(ptr) - *(ptr - 1); /* if the magnitude of the difference is greater than the KTh threshold, * apply soft filter */ if ((a3_0 > KTh || a3_0 < -KTh)) { /* Sum of weighted differences */ a3_0 += ((*(ptr - 2) - *(ptr + 1)) << 1) + (a3_0 << 2); /* Check if sum is less than the quantization parameter */ if (PV_ABS(a3_0) < (QP << 3)) { a3_1 = *(ptr - 2) - *(ptr - 3); a3_1 += ((*(ptr - 4) - *(ptr - 1)) << 1) + (a3_1 << 2); a3_2 = *(ptr + 2) - *(ptr + 1); a3_2 += ((*(ptr) - *(ptr + 3)) << 1) + (a3_2 << 2); A3_0 = PV_ABS(a3_0) - PV_MIN(PV_ABS(a3_1), PV_ABS(a3_2)); if (A3_0 > 0) { A3_0 += A3_0 << 2; A3_0 = (A3_0 + 32) >> 6; if (a3_0 > 0) { A3_0 = -A3_0; } delta = (*(ptr - 1) - *(ptr)) >> 1; if (delta >= 0) { if (delta >= A3_0) { delta = PV_MAX(A3_0, 0); } } else { if (A3_0 > 0) { delta = 0; } else { delta = PV_MAX(A3_0, delta); } } *(ptr - 1) = (uint8)(*(ptr - 1) - delta); *(ptr) = (uint8)(*(ptr) + delta); } } /*threshold*/ } ptr += w1; } /*index*/ } /* Soft filter*/ } /* boundary*/ } /*bc*/ /* Increment pointer to next row of pixels */ brwidth += pp_w; }/*br*/ brwidth -= (pp_w << 1); /****************** Deringing ***************************/ /* Process four blocks for the filtering */ /********************************************************/ /* Loop over two rows of blocks */ for (br = mbr; br < mbr + 2; br++) { /* Loop over two columns of blocks in the row */ for (bc = mbc; bc < mbc + 2; bc++) { /* Execute if the row and column counters are within the area */ if (br < pp_h && bc < pp_w) { /* Set the index to the current block */ index = brwidth + bc; /* Execute deringing if semaphore for deringing (bit-3 of pp_mod) * is set for the current block */ if ((pp_mod[index]&0x04) != 0) { /* Don't process deringing if on an edge block */ if (br > 0 && bc > 0 && br < pp_h - 1 && bc < pp_w - 1) { /* cnthflag = weighted average of HorzHflag of current, * one above, previous blocks*/ cnthflag = ((pp_mod[index] & 0x10) + (pp_mod[index-pp_w] & 0x10) + ((pp_mod[index-1] >> 1) & 0x10) + ((pp_mod[index] >> 1) & 0x10)) >> 4; /* 4/26/00*/ /* Do the deringing if decision flags indicate it's necessary */ if (cnthflag < 3) { /* if the data is chrominance info, get the correct * quantization paramenter. One parameter per block. */ if (chr) { QP = QP_store[index]; } /* Set amount to change luminance if it needs to be changed * based on quantization parameter */ max_diff = (QP >> 2) + 4; /* Set pointer to first pixel of current block */ ptr = rec + (brwidth << 6) + (bc << 3); /* Find minimum and maximum value of pixel block */ FindMaxMin(ptr, &min_blk, &max_blk, incr); /* threshold determination */ thres = (max_blk + min_blk + 1) >> 1; /* If pixel range is greater or equal than DERING_THR, smooth the region */ if ((max_blk - min_blk) >= DERING_THR) /*smooth 8x8 region*/ #ifndef NoMMX { /* smooth all pixels in the block*/ DeringAdaptiveSmoothMMX(ptr, width, thres, max_diff); } #else { /* Setup the starting point of the region to smooth */ v0 = (br << 3) - 1; h0 = (bc << 3) - 1; /*smooth 8x8 region*/ AdaptiveSmooth_NoMMX(rec, v0, h0, v0 + 1, h0 + 1, thres, width, max_diff); } #endif }/*cnthflag*/ } /*dering br==1 or bc==1 (boundary block)*/ else /* Process the boundary blocks */ { /* Decide to perform deblocking based on the semaphore flags * of the neighboring blocks in each case. A certain number of * hard filtering flags have to be set in order to signal need * for smoothing */ if (br > 0 && br < pp_h - 1) { if (bc > 0) { cnthflag = ((pp_mod[index-pp_w] & 0x10) + (pp_mod[index] & 0x10) + ((pp_mod[index-1] >> 1) & 0x10)) >> 4; } else { cnthflag = ((pp_mod[index] & 0x10) + (pp_mod[index-pp_w] & 0x10) + ((pp_mod[index] >> 1) & 0x10)) >> 4; } } else if (bc > 0 && bc < pp_w - 1) { if (br > 0) { cnthflag = ((pp_mod[index-pp_w] & 0x10) + ((pp_mod[index-1] >> 1) & 0x10) + ((pp_mod[index] >> 1) & 0x10)) >> 4; } else { cnthflag = ((pp_mod[index] & 0x10) + ((pp_mod[index-1] >> 1) & 0x10) + ((pp_mod[index] >> 1) & 0x10)) >> 4; } } else /* at the corner do default*/ { cnthflag = 0; } /* Do the deringing if decision flags indicate it's necessary */ if (cnthflag < 2) { /* if the data is chrominance info, get the correct * quantization paramenter. One parameter per block. */ if (chr) { QP = QP_store[index]; } /* Set amount to change luminance if it needs to be changed * based on quantization parameter */ max_diff = (QP >> 2) + 4; /* Set pointer to first pixel of current block */ ptr = rec + (brwidth << 6) + (bc << 3); /* Find minimum and maximum value of pixel block */ FindMaxMin(ptr, &min_blk, &max_blk, incr); /* threshold determination */ thres = (max_blk + min_blk + 1) >> 1; /* Setup the starting point of the region to smooth * This is going to be a 4x4 region */ v0 = (br << 3) + 1; h0 = (bc << 3) + 1; /* If pixel range is greater or equal than DERING_THR, smooth the region */ if ((max_blk - min_blk) >= DERING_THR) { /* Smooth 4x4 region */ AdaptiveSmooth_NoMMX(rec, v0, h0, v0 - 3, h0 - 3, thres, width, max_diff); } }/*cnthflag*/ } /* br==0, bc==0*/ } /* dering*/ } /*boundary condition*/ }/*bc*/ brwidth += pp_w; }/*br*/ brwidth -= (pp_w << 1); }/*mbc*/ brwidth += (pp_w << 1); }/*mbr*/ /*---------------------------------------------------------------------------- ; Return nothing or data or data pointer ----------------------------------------------------------------------------*/ return ; } #endif ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 2009 OrangeLabs * * Author: Alexis Gilabert Senar * Date: 2009-07-01 * ------------------------------------------------------------------- */ #define LOG_TAG "NativeDec" #include #include #include "com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder.h" #include "mp4dec_api.h" #include "3GPVideoParser.h" #include "yuv2rgb.h" /* * Global variables * */ VideoDecControls iDecoderControl; uint8* pFrame0,*pFrame1; int32 nLayers = 1; uint8** volbuf; int32 volbuf_size[]= {0}; int32 iHeight = 0; int32 iWidth = 0; uint32 FrameSize = 0; uint32 VideoDecOutputSize = 0; MP4DecodingMode mode= H263_MODE; /* Parser */ uint8* aOutBuffer; uint32 aOutBufferSize = 0; uint32 aOutTimestamp = 0; int parserInitialized = 0; int decoderInitialized = 0; bool Status = false; /** * De-Init decoder */ int deinitDecoder(){ if (PVCleanUpVideoDecoder(&iDecoderControl)== 1) { if (pFrame0) free(pFrame0); if (pFrame1) free(pFrame1); decoderInitialized = 0; return 1; } else { if (pFrame0) free(pFrame0); if (pFrame1) free(pFrame1); decoderInitialized = 0; return 0; } } /** * Init decoder * @param srcWidth video width * @param srcHeight video height */ int initDecoder(int srcWidth, int srcHeight){ if (decoderInitialized == 1) deinitDecoder(); iWidth = srcWidth; iHeight = srcHeight; FrameSize = (srcWidth * srcHeight); VideoDecOutputSize = (FrameSize * 3) >> 1; volbuf_size[0]= VideoDecOutputSize; volbuf = (uint8**)malloc(nLayers * sizeof(uint8*)); if (volbuf == NULL) return 0; volbuf[0] = (uint8*)malloc(volbuf_size[0]); if (volbuf[0] == NULL) return 0; memset(*volbuf,0,volbuf_size[0]); if (!PVInitVideoDecoder(&iDecoderControl,volbuf, volbuf_size, nLayers, iWidth, iHeight, mode)) return 0; PVSetPostProcType(&iDecoderControl,2); pFrame0 = (uint8*) malloc(VideoDecOutputSize); if (pFrame0 == NULL) return 0; pFrame1 = (uint8*) malloc(VideoDecOutputSize); if (pFrame1 == NULL) return 0; memset(pFrame1,0,VideoDecOutputSize); PVSetReferenceYUV(&iDecoderControl, pFrame1); decoderInitialized = 1; return decoderInitialized; } /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder * Method: InitDecoder * Signature: (II)I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_InitDecoder (JNIEnv * env, jclass clazz, jint srcWidth, jint srcHeight){ return initDecoder(srcWidth,srcHeight); } /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder * Method: DeinitDecoder * Signature: ()I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_DeinitDecoder (JNIEnv * env, jclass clazz){ return deinitDecoder(); } /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder * Method: DecodeAndConvert * Signature: ([B[IJ)[I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_DecodeAndConvert (JNIEnv *env, jclass clazz, jbyteArray h263Frame, jintArray decoded, jlong jtimestamp){ /* Return if decoder is not initialized */ if (!decoderInitialized){ return 0; } /* Set volbuf with h263Frame data*/ jint len = env->GetArrayLength(h263Frame); jbyte data[len]; env->GetByteArrayRegion(h263Frame, 0, len, data); /* Decode */ uint32 timestamp[]={(uint32)(jtimestamp & 0xFFFFFFFF)}; uint usetimestamp[]={0}; volbuf[0] = (uint8*)data; volbuf_size[0]=len; if (PVDecodeVideoFrame(&iDecoderControl, volbuf,timestamp,volbuf_size,usetimestamp,pFrame0) == 0){ return 0; } /* Copy result to YUV array ! */ uint8* decodedFrame = iDecoderControl.outputFrame; uint8* pTempFrame; pTempFrame = (uint8*) pFrame0; pFrame0 = (uint8*) pFrame1; pFrame1 = (uint8*) pTempFrame; /* Create the output buffer */ uint32* resultBuffer= (uint32*) malloc(iWidth*iHeight*sizeof(uint32)); if (resultBuffer == NULL) return 0; /*********** Convert to rgb ***************/ if (convert(iWidth,iHeight,decodedFrame,resultBuffer) == 0){ return 0; } /* Return Bitmap image */ (env)->SetIntArrayRegion(decoded, 0, iWidth*iHeight, (const jint*)resultBuffer); free(resultBuffer); return 1; } /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder * Method: InitParser * Signature: (Ljava/lang/String;)I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_InitParser (JNIEnv *env, jclass clazz, jstring pathToFile){ char *str; str = (char*)env->GetStringUTFChars(pathToFile, NULL); if (str == NULL) return 0; /* Init parser */ if (parserInitialized == 1){ parserInitialized = 0; release(); } if(Init3GPVideoParser(str) == 1){ env->ReleaseStringUTFChars(pathToFile, str); iWidth = getVideoWidth(); iHeight = getVideoHeight(); FrameSize = iWidth * iHeight; VideoDecOutputSize = (FrameSize * 3)>>1; aOutBuffer = (uint8*)malloc(VideoDecOutputSize); parserInitialized = 1; } else { env->ReleaseStringUTFChars(pathToFile, str); return 0; } /* Init decoder */ if (decoderInitialized == 1){ deinitDecoder(); } if(initDecoder(iWidth,iHeight)== 0) return 0; return 1; } /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_Native3GPPFileParser * Method: DeinitParser * Signature: ()I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_DeinitParser (JNIEnv *env, jclass clazz){ if (decoderInitialized == 1) deinitDecoder(); parserInitialized = 0; return release(); } /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_Native3GPPFileParser * Method: getVideoLength * Signature: ()I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_getVideoLength (JNIEnv *env, jclass clazz){ jint videoLength = getVideoDuration(); return videoLength; } /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_Native3GPPFileParser * Method: getVideoWidth * Signature: ()I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_getVideoWidth (JNIEnv *env, jclass clazz) { return getVideoWidth(); } /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_Native3GPPFileParser * Method: getVideoHeight * Signature: ()I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_getVideoHeight (JNIEnv *env, jclass clazz) { return getVideoHeight(); } /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_Native3GPPFileParser * Method: getVideoCoding * Signature: ()Ljava/lang/String; */ JNIEXPORT jstring JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_getVideoCoding (JNIEnv *env, jclass clazz) { jstring stringVideoCoding; char* charVideoCoding = getVideoCodec(); stringVideoCoding = (env)->NewStringUTF(charVideoCoding); return stringVideoCoding; } /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_Native3GPPFileParser * Method: getVideoSample * Signature: ([I)Lcom/orangelabs/rcs/core/ims/protocol/rtp/codec/video/VideoSample */ JNIEXPORT jobject JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_getVideoSample (JNIEnv *env, jclass clazz, jintArray Decoded) { jobject object = NULL; /* Error return */ if (parserInitialized == 0){ return object; } // Get the new frame if (getFrame(aOutBuffer,&aOutBufferSize,&aOutTimestamp)!= VPAtomSucces){ return object; } /* Set frame with aOutBuffer data and timestamp*/ jbyteArray H263Frame = (env)->NewByteArray(aOutBufferSize); (env)->SetByteArrayRegion(H263Frame, 0, aOutBufferSize, (const jbyte*)aOutBuffer); /* Decode */ uint32 timestamp[]={aOutTimestamp}; uint usetimestamp[]={0}; volbuf[0] = aOutBuffer; volbuf_size[0]=aOutBufferSize; if (!PVDecodeVideoFrame(&iDecoderControl, volbuf,timestamp,volbuf_size,usetimestamp,pFrame0)){ return object; } /* Copy result to YUV array ! */ uint8* pTempFrame; uint8* decodedFrame = iDecoderControl.outputFrame; pTempFrame = (uint8*) pFrame0; pFrame0 = (uint8*) pFrame1; pFrame1 = (uint8*) pTempFrame; /* Create the output buffer */ uint32* resultBuffer = (uint32*)malloc(FrameSize*sizeof(uint32)); /* Convert YUV to RGB */ convert(iWidth,iHeight,decodedFrame,resultBuffer); /* Set Decoded */ (env)->SetIntArrayRegion(Decoded, 0, FrameSize, (const jint*)resultBuffer); free(resultBuffer); // Create new object /* Find class and method to return VideoSample*/ jclass classe = (env)->FindClass("com/orangelabs/rcs/core/ims/protocol/rtp/codec/video/h263/decoder/VideoSample"); if (classe == 0) { return object; } jmethodID mid = (env)->GetMethodID(classe,"","([BI)V"); if (mid == 0) { return object; } object = (env)->NewObject(classe,mid,H263Frame,aOutTimestamp); if (object == 0) { return object; } // Return created object return object; } /* * This is called by the VM when the shared library is first loaded. */ jint JNI_OnLoad(JavaVM* vm, void* reserved) { JNIEnv* env = NULL; jint result = -1; if (vm->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) { goto bail; } /* success -- return valid version number */ result = JNI_VERSION_1_4; bail: return result; } ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder.h ================================================ /* DO NOT EDIT THIS FILE - it is machine generated */ #include /* Header for class com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder */ #ifndef _Included_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder #define _Included_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder #ifdef __cplusplus extern "C" { #endif /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder * Method: InitDecoder * Signature: (II)I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_InitDecoder (JNIEnv *, jclass, jint, jint); /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder * Method: DeinitDecoder * Signature: ()I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_DeinitDecoder (JNIEnv *, jclass); /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder * Method: DecodeAndConvert * Signature: ([B[IJ)I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_DecodeAndConvert (JNIEnv *, jclass, jbyteArray, jintArray, jlong); /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder * Method: InitParser * Signature: (Ljava/lang/String;)I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_InitParser (JNIEnv *, jclass, jstring); /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder * Method: DeinitParser * Signature: ()I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_DeinitParser (JNIEnv *, jclass); /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder * Method: getVideoLength * Signature: ()I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_getVideoLength (JNIEnv *, jclass); /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder * Method: getVideoWidth * Signature: ()I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_getVideoWidth (JNIEnv *, jclass); /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder * Method: getVideoHeight * Signature: ()I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_getVideoHeight (JNIEnv *, jclass); /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder * Method: getVideoCoding * Signature: ()Ljava/lang/String; */ JNIEXPORT jstring JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_getVideoCoding (JNIEnv *, jclass); /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder * Method: getVideoSample * Signature: ([I)Lcom/orangelabs/rcs/core/ims/protocol/rtp/codec/video/h263/decoder/VideoSample; */ JNIEXPORT jobject JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_getVideoSample (JNIEnv *, jclass, jintArray); #ifdef __cplusplus } #endif #endif ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/combined_decode.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "mp4dec_lib.h" /* video decoder function prototypes */ #include "vlc_decode.h" #include "bitstream.h" #include "scaling.h" #include "mbtype_mode.h" #define OSCL_DISABLE_WARNING_CONDITIONAL_IS_CONSTANT #include "osclconfig_compiler_warnings.h" /* ======================================================================== */ /* Function : DecodeFrameCombinedMode() */ /* Purpose : Decode a frame of MPEG4 bitstream in combined mode. */ /* In/out : */ /* Return : */ /* Modified : */ /* */ /* 03/30/2000 : Cleaned up and optimized the code. */ /* 03/31/2000 : Added proper handling of MB stuffing. */ /* 04/13/2000 : Rewrote this combined mode path completely */ /* so that it handles "Combined Mode With Error */ /* Resilience." Now the code resembles the */ /* pseudo codes in MPEG-4 standard better. */ /* 10/13/2000 : Add fast VLC+dequant */ /* 04/13/2001 : fix MB_stuffing */ /* 08/07/2001 : remove MBzero */ /* ======================================================================== */ PV_STATUS DecodeFrameCombinedMode(VideoDecData *video) { PV_STATUS status; int mbnum; Vop *currVop = video->currVop; BitstreamDecVideo *stream = video->bitstream; int shortVideoHeader = video->shortVideoHeader; int16 QP, *QPMB = video->QPMB; uint8 *Mode = video->headerInfo.Mode; int nTotalMB = video->nTotalMB; int nMBPerRow = video->nMBPerRow; int slice_counter; uint32 tmpvar, long_zero_bits; uint code; int valid_stuffing; int resync_marker_length; int stuffing_length; /* add this for error resilient, 05/18/2000 */ int32 startPacket; int mb_start; /* copy and pad to prev_Vop for INTER coding */ switch (currVop->predictionType) { case I_VOP : // oscl_memset(Mode, MODE_INTRA, sizeof(uint8)*nTotalMB); resync_marker_length = 17; stuffing_length = 9; break; case P_VOP : oscl_memset(video->motX, 0, sizeof(MOT)*4*nTotalMB); oscl_memset(video->motY, 0, sizeof(MOT)*4*nTotalMB); // oscl_memset(Mode, MODE_INTER, sizeof(uint8)*nTotalMB); resync_marker_length = 16 + currVop->fcodeForward; stuffing_length = 10; break; default : mp4dec_log("DecodeFrameCombinedMode(): Vop type not supported.\n"); return PV_FAIL; } #ifdef PV_ANNEX_IJKT_SUPPORT if (video->shortVideoHeader & PV_H263) { if (video->advanced_INTRA) { if (video->modified_quant) { video->vlcDecCoeffIntra = &VlcDecTCOEFShortHeader_AnnexIT; video->vlcDecCoeffInter = &VlcDecTCOEFShortHeader_AnnexT; } else { video->vlcDecCoeffIntra = &VlcDecTCOEFShortHeader_AnnexI; video->vlcDecCoeffInter = &VlcDecTCOEFShortHeader; } } else { if (video->modified_quant) { video->vlcDecCoeffInter = video->vlcDecCoeffIntra = &VlcDecTCOEFShortHeader_AnnexT; } else { video->vlcDecCoeffInter = video->vlcDecCoeffIntra = &VlcDecTCOEFShortHeader; } } } #endif /** Initialize sliceNo ***/ mbnum = slice_counter = 0; // oscl_memset(video->sliceNo, 0, sizeof(uint8)*nTotalMB); QP = video->currVop->quantizer; do { /* This section is equivalent to motion_shape_texture() */ /* in the MPEG-4 standard. 04/13/2000 */ mb_start = mbnum; video->usePrevQP = 0; /* 04/27/01 */ startPacket = getPointer(stream); #ifdef PV_ANNEX_IJKT_SUPPORT if (video->modified_quant) { video->QP_CHR = MQ_chroma_QP_table[QP]; } else { video->QP_CHR = QP; /* ANNEX_T */ } #endif /* remove any stuffing bits */ BitstreamShowBits16(stream, stuffing_length, &code); while (code == 1) { PV_BitstreamFlushBits(stream, stuffing_length); BitstreamShowBits16(stream, stuffing_length, &code); } do { /* we need video->mbnum in lower level functions */ video->mbnum = mbnum; video->mbnum_row = PV_GET_ROW(mbnum, nMBPerRow); video->mbnum_col = mbnum - video->mbnum_row * nMBPerRow; /* assign slice number for each macroblocks */ video->sliceNo[mbnum] = (uint8) slice_counter; /* decode COD, MCBPC, ACpred_flag, CPBY and DQUANT */ /* We have to discard stuffed MB header */ status = GetMBheader(video, &QP); if (status != PV_SUCCESS) { VideoDecoderErrorDetected(video); video->mbnum = mb_start; movePointerTo(stream, (startPacket & -8)); break; } /* Store the QP value for later use in AC prediction */ QPMB[mbnum] = QP; if (Mode[mbnum] != MODE_SKIPPED) { /* decode the DCT coeficients for the MB */ status = GetMBData(video); if (status != PV_SUCCESS) { VideoDecoderErrorDetected(video); video->mbnum = mb_start; movePointerTo(stream, (startPacket & -8)); break; } } else /* MODE_SKIPPED */ { SkippedMBMotionComp(video); /* 08/04/05 */ } // Motion compensation and put video->mblock->pred_block mbnum++; /* remove any stuffing bits */ BitstreamShowBits16(stream, stuffing_length, &code); while (code == 1) { PV_BitstreamFlushBits(stream, stuffing_length); BitstreamShowBits16(stream, stuffing_length, &code); } /* have we reached the end of the video packet or vop? */ if (shortVideoHeader) { #ifdef PV_ANNEX_IJKT_SUPPORT if (!video->slice_structure) { #endif if (mbnum >= (int)(video->mbnum_row + 1)*video->nMBinGOB) /* 10/11/01 */ { if (mbnum >= nTotalMB) return PV_SUCCESS; status = BitstreamShowBits32(stream, GOB_RESYNC_MARKER_LENGTH, &tmpvar); if (tmpvar == GOB_RESYNC_MARKER) { break; } else { status = PV_BitstreamShowBitsByteAlign(stream, GOB_RESYNC_MARKER_LENGTH, &tmpvar); if (tmpvar == GOB_RESYNC_MARKER) break; } } #ifdef PV_ANNEX_IJKT_SUPPORT } else { if (mbnum >= nTotalMB) /* in case no valid stuffing 06/23/01 */ { valid_stuffing = validStuffing_h263(stream); if (valid_stuffing == 0) { VideoDecoderErrorDetected(video); ConcealPacket(video, mb_start, nTotalMB, slice_counter); } return PV_SUCCESS; } /* ANNEX_K */ PV_BitstreamShowBitsByteAlignNoForceStuffing(stream, 17, &tmpvar); if (tmpvar == RESYNC_MARKER) { valid_stuffing = validStuffing_h263(stream); if (valid_stuffing) break; /* 06/21/01 */ } } #endif } else { if (mbnum >= nTotalMB) /* in case no valid stuffing 06/23/01 */ { /* 11/01/2002 if we are at the end of the frame and there is some garbage data at the end of the frame (i.e. no next startcode) break if the stuffing is valid */ valid_stuffing = validStuffing(stream); if (valid_stuffing == 0) { /* end 11/01/2002 */ VideoDecoderErrorDetected(video); ConcealPacket(video, mb_start, nTotalMB, slice_counter); } PV_BitstreamByteAlign(stream); return PV_SUCCESS; } status = PV_BitstreamShowBitsByteAlign(stream, 23, &tmpvar); /* this call is valid for f_code < 8 */ long_zero_bits = !tmpvar; if ((tmpvar >> (23 - resync_marker_length)) == RESYNC_MARKER || long_zero_bits) { valid_stuffing = validStuffing(stream); if (valid_stuffing) break; /* 06/21/01 */ } } } while (TRUE); if (shortVideoHeader) { /* We need to check newgob to refresh quantizer */ #ifdef PV_ANNEX_IJKT_SUPPORT if (!video->slice_structure) { #endif while ((status = PV_GobHeader(video)) == PV_FAIL) { if ((status = quickSearchGOBHeader(stream)) != PV_SUCCESS) { break; } } mbnum = currVop->gobNumber * video->nMBinGOB; #ifdef PV_ANNEX_IJKT_SUPPORT } else { while ((status = PV_H263SliceHeader(video, &mbnum)) == PV_FAIL) { if ((status = quickSearchH263SliceHeader(stream)) != PV_SUCCESS) { break; } } } #endif } else { while ((status = PV_ReadVideoPacketHeader(video, &mbnum)) == PV_FAIL) { if ((status = quickSearchVideoPacketHeader(stream, resync_marker_length)) != PV_SUCCESS) { break; } } } if (status == PV_END_OF_VOP) { mbnum = nTotalMB; } if (mbnum > video->mbnum + 1) { ConcealPacket(video, video->mbnum, mbnum, slice_counter); } QP = video->currVop->quantizer; slice_counter++; if (mbnum >= nTotalMB) break; } while (TRUE); return PV_SUCCESS; } /* ============================================================================ */ /* Function : GetMBHeader() */ /* Purpose : Decode MB header, not_coded, mcbpc, ac_pred_flag, cbpy, dquant. */ /* In/out : */ /* Return : */ /* Modified : */ /* */ /* 3/29/00 : Changed the returned value and optimized the code. */ /* 4/01/01 : new ACDC prediction structure */ /* ============================================================================ */ PV_STATUS GetMBheader(VideoDecData *video, int16 *QP) { BitstreamDecVideo *stream = video->bitstream; int mbnum = video->mbnum; uint8 *Mode = video->headerInfo.Mode; int x_pos = video->mbnum_col; typeDCStore *DC = video->predDC + mbnum; typeDCACStore *DCAC_row = video->predDCAC_row + x_pos; typeDCACStore *DCAC_col = video->predDCAC_col; const static int16 DQ_tab[4] = { -1, -2, 1, 2}; int CBPY, CBPC; int MBtype, VopType; int MCBPC; uint DQUANT; int comp; Bool mb_coded; VopType = video->currVop->predictionType; mb_coded = ((VopType == I_VOP) ? TRUE : !BitstreamRead1Bits_INLINE(stream)); if (!mb_coded) { /* skipped macroblock */ Mode[mbnum] = MODE_SKIPPED; //oscl_memset(DCAC_row, 0, sizeof(typeDCACStore)); /* SKIPPED_ACDC */ //oscl_memset(DCAC_col, 0, sizeof(typeDCACStore)); ZERO_OUT_64BYTES(DCAC_row); ZERO_OUT_64BYTES(DCAC_col); /* 08/12/05 */ for (comp = 0; comp < 6; comp++) { (*DC)[comp] = mid_gray; } } else { /* coded macroblock */ if (VopType == I_VOP) { MCBPC = PV_VlcDecMCBPC_com_intra(stream); } else { #ifdef PV_ANNEX_IJKT_SUPPORT if (!video->deblocking) { MCBPC = PV_VlcDecMCBPC_com_inter(stream); } else { MCBPC = PV_VlcDecMCBPC_com_inter_H263(stream); } #else MCBPC = PV_VlcDecMCBPC_com_inter(stream); #endif } if (VLC_ERROR_DETECTED(MCBPC)) { return PV_FAIL; } Mode[mbnum] = (uint8)(MBtype = MBtype_mode[MCBPC & 7]); CBPC = (MCBPC >> 4) & 3; #ifdef PV_ANNEX_IJKT_SUPPORT if (MBtype & INTRA_MASK) { if (!video->shortVideoHeader) { video->acPredFlag[mbnum] = (uint8) BitstreamRead1Bits(stream); } else { if (video->advanced_INTRA) { if (!BitstreamRead1Bits(stream)) { video->acPredFlag[mbnum] = 0; } else { video->acPredFlag[mbnum] = 1; if (BitstreamRead1Bits(stream)) { video->mblock->direction = 0; } else { video->mblock->direction = 1; } } } else { video->acPredFlag[mbnum] = 0; } } } #else if ((MBtype & INTRA_MASK) && !video->shortVideoHeader) { video->acPredFlag[mbnum] = (uint8) BitstreamRead1Bits_INLINE(stream); } else { video->acPredFlag[mbnum] = 0; } #endif CBPY = PV_VlcDecCBPY(stream, MBtype & INTRA_MASK); /* INTRA || INTRA_Q */ if (CBPY < 0) { return PV_FAIL; } // GW 04/23/99 video->headerInfo.CBP[mbnum] = (uint8)(CBPY << 2 | (CBPC & 3)); #ifdef PV_ANNEX_IJKT_SUPPORT if (MBtype & Q_MASK) { if (!video->modified_quant) { DQUANT = BitstreamReadBits16(stream, 2); *QP += DQ_tab[DQUANT]; if (*QP < 1) *QP = 1; else if (*QP > 31) *QP = 31; video->QP_CHR = *QP; /* ANNEX_T */ } else { if (BitstreamRead1Bits(stream)) { if (BitstreamRead1Bits(stream)) { *QP += DQ_tab_Annex_T_11[*QP]; } else { *QP += DQ_tab_Annex_T_10[*QP]; } if (*QP < 1) *QP = 1; else if (*QP > 31) *QP = 31; } else { *QP = (int16)BitstreamReadBits16(stream, 5); } video->QP_CHR = MQ_chroma_QP_table[*QP]; } } #else if (MBtype & Q_MASK) { DQUANT = BitstreamReadBits16(stream, 2); *QP += DQ_tab[DQUANT]; if (*QP < 1) *QP = 1; else if (*QP > 31) *QP = 31; } #endif } return PV_SUCCESS; } /***********************************************************CommentBegin****** * 3/10/00 : initial modification to the * new PV-Decoder Lib format. * 4/2/2000 : Cleanup and error-handling modification. This * function has been divided into several sub-functions for * better coding style and maintainance reason. I also * greatly shrunk the code size here. * 9/18/2000 : VlcDecode+Dequant optimization * * 4/01/2001 : new ACDC prediction structure * 3/29/2002 : removed GetIntraMB and GetInterMB ***********************************************************CommentEnd********/ PV_STATUS GetMBData(VideoDecData *video) { BitstreamDecVideo *stream = video->bitstream; int mbnum = video->mbnum; MacroBlock *mblock = video->mblock; int16 *dataBlock; PIXEL *c_comp; uint mode = video->headerInfo.Mode[mbnum]; uint CBP = video->headerInfo.CBP[mbnum]; typeDCStore *DC = video->predDC + mbnum; int intra_dc_vlc_thr = video->currVop->intraDCVlcThr; int16 QP = video->QPMB[mbnum]; int16 QP_tmp = QP; int width = video->width; int comp; int switched; int ncoeffs[6] = {0, 0, 0, 0, 0, 0}; int *no_coeff = mblock->no_coeff; int16 DC_coeff; PV_STATUS status; #ifdef PV_POSTPROC_ON /* post-processing */ uint8 *pp_mod[6]; int TotalMB = video->nTotalMB; int MB_in_width = video->nMBPerRow; #endif int y_pos = video->mbnum_row; int x_pos = video->mbnum_col; int32 offset = (int32)(y_pos << 4) * width + (x_pos << 4); /* Decode each 8-by-8 blocks. comp 0 ~ 3 are luminance blocks, 4 ~ 5 */ /* are chrominance blocks. 04/03/2000. */ #ifdef PV_POSTPROC_ON if (video->postFilterType != PV_NO_POST_PROC) { /** post-processing ***/ pp_mod[0] = video->pstprcTypCur + (y_pos << 1) * (MB_in_width << 1) + (x_pos << 1); pp_mod[1] = pp_mod[0] + 1; pp_mod[2] = pp_mod[0] + (MB_in_width << 1); pp_mod[3] = pp_mod[2] + 1; pp_mod[4] = video->pstprcTypCur + (TotalMB << 2) + mbnum; pp_mod[5] = pp_mod[4] + TotalMB; } #endif /* oscl_memset(mblock->block, 0, sizeof(typeMBStore)); Aug 9,2005 */ if (mode & INTRA_MASK) /* MODE_INTRA || MODE_INTRA_Q */ { switched = 0; if (intra_dc_vlc_thr) { if (video->usePrevQP) QP_tmp = video->QPMB[mbnum-1]; /* running QP 04/26/01 */ switched = (intra_dc_vlc_thr == 7 || QP_tmp >= intra_dc_vlc_thr * 2 + 11); } mblock->DCScalarLum = cal_dc_scaler(QP, LUMINANCE_DC_TYPE); /* 3/01/01 */ mblock->DCScalarChr = cal_dc_scaler(QP, CHROMINANCE_DC_TYPE); for (comp = 0; comp < 6; comp++) { dataBlock = mblock->block[comp]; /* 10/20/2000 */ if (video->shortVideoHeader) { #ifdef PV_ANNEX_IJKT_SUPPORT if (!video->advanced_INTRA) { #endif DC_coeff = (int16) BitstreamReadBits16_INLINE(stream, 8); if ((DC_coeff & 0x7f) == 0) /* 128 & 0 */ { /* currently we will only signal FAIL for 128. We will ignore the 0 case */ if (DC_coeff == 128) { return PV_FAIL; } else { VideoDecoderErrorDetected(video); } } if (DC_coeff == 255) { DC_coeff = 128; } dataBlock[0] = (int16) DC_coeff; #ifdef PV_ANNEX_IJKT_SUPPORT } #endif ncoeffs[comp] = VlcDequantH263IntraBlock_SH(video, comp, mblock->bitmapcol[comp], &mblock->bitmaprow[comp]); } else { if (switched == 0) { status = PV_DecodePredictedIntraDC(comp, stream, &DC_coeff); if (status != PV_SUCCESS) return PV_FAIL; dataBlock[0] = (int16) DC_coeff; } ncoeffs[comp] = VlcDequantH263IntraBlock(video, comp, switched, mblock->bitmapcol[comp], &mblock->bitmaprow[comp]); } if (VLC_ERROR_DETECTED(ncoeffs[comp])) { if (switched) return PV_FAIL; else { ncoeffs[comp] = 1; oscl_memset((dataBlock + 1), 0, sizeof(int16)*63); } } no_coeff[comp] = ncoeffs[comp]; #ifdef PV_POSTPROC_ON if (video->postFilterType != PV_NO_POST_PROC) *pp_mod[comp] = (uint8) PostProcSemaphore(dataBlock); #endif } MBlockIDCT(video); } else /* INTER modes */ { /* moved it here Aug 15, 2005 */ /* decode the motion vector (if there are any) */ status = PV_GetMBvectors(video, mode); if (status != PV_SUCCESS) { return status; } MBMotionComp(video, CBP); c_comp = video->currVop->yChan + offset; #ifdef PV_ANNEX_IJKT_SUPPORT for (comp = 0; comp < 4; comp++) { (*DC)[comp] = mid_gray; if (CBP & (1 << (5 - comp))) { ncoeffs[comp] = VlcDequantH263InterBlock(video, comp, mblock->bitmapcol[comp], &mblock->bitmaprow[comp]); if (VLC_ERROR_DETECTED(ncoeffs[comp])) return PV_FAIL; BlockIDCT(c_comp + (comp&2)*(width << 2) + 8*(comp&1), mblock->pred_block + (comp&2)*64 + 8*(comp&1), mblock->block[comp], width, ncoeffs[comp], mblock->bitmapcol[comp], mblock->bitmaprow[comp]); #ifdef PV_POSTPROC_ON /* for inter just test for ringing */ if (video->postFilterType != PV_NO_POST_PROC) *pp_mod[comp] = (uint8)((ncoeffs[comp] > 3) ? 4 : 0); #endif } else { /* no IDCT for all zeros blocks 03/28/2002 */ /* BlockIDCT(); */ #ifdef PV_POSTPROC_ON if (video->postFilterType != PV_NO_POST_PROC) *pp_mod[comp] = 0; #endif } } video->QPMB[mbnum] = video->QP_CHR; /* ANNEX_T */ (*DC)[4] = mid_gray; if (CBP & 2) { ncoeffs[4] = VlcDequantH263InterBlock(video, 4, mblock->bitmapcol[4], &mblock->bitmaprow[4]); if (VLC_ERROR_DETECTED(ncoeffs[4])) return PV_FAIL; BlockIDCT(video->currVop->uChan + (offset >> 2) + (x_pos << 2), mblock->pred_block + 256, mblock->block[4], width >> 1, ncoeffs[4], mblock->bitmapcol[4], mblock->bitmaprow[4]); #ifdef PV_POSTPROC_ON /* for inter just test for ringing */ if (video->postFilterType != PV_NO_POST_PROC) *pp_mod[4] = (uint8)((ncoeffs[4] > 3) ? 4 : 0); #endif } else { /* no IDCT for all zeros blocks 03/28/2002 */ /* BlockIDCT(); */ #ifdef PV_POSTPROC_ON if (video->postFilterType != PV_NO_POST_PROC) *pp_mod[4] = 0; #endif } (*DC)[5] = mid_gray; if (CBP & 1) { ncoeffs[5] = VlcDequantH263InterBlock(video, 5, mblock->bitmapcol[5], &mblock->bitmaprow[5]); if (VLC_ERROR_DETECTED(ncoeffs[5])) return PV_FAIL; BlockIDCT(video->currVop->vChan + (offset >> 2) + (x_pos << 2), mblock->pred_block + 264, mblock->block[5], width >> 1, ncoeffs[5], mblock->bitmapcol[5], mblock->bitmaprow[5]); #ifdef PV_POSTPROC_ON /* for inter just test for ringing */ if (video->postFilterType != PV_NO_POST_PROC) *pp_mod[5] = (uint8)((ncoeffs[5] > 3) ? 4 : 0); #endif } else { /* no IDCT for all zeros blocks 03/28/2002 */ /* BlockIDCT(); */ #ifdef PV_POSTPROC_ON if (video->postFilterType != PV_NO_POST_PROC) *pp_mod[5] = 0; #endif } video->QPMB[mbnum] = QP; /* restore the QP values ANNEX_T*/ #else for (comp = 0; comp < 4; comp++) { (*DC)[comp] = mid_gray; if (CBP & (1 << (5 - comp))) { ncoeffs[comp] = VlcDequantH263InterBlock(video, comp, mblock->bitmapcol[comp], &mblock->bitmaprow[comp]); if (VLC_ERROR_DETECTED(ncoeffs[comp])) return PV_FAIL; BlockIDCT(c_comp + (comp&2)*(width << 2) + 8*(comp&1), mblock->pred_block + (comp&2)*64 + 8*(comp&1), mblock->block[comp], width, ncoeffs[comp], mblock->bitmapcol[comp], mblock->bitmaprow[comp]); #ifdef PV_POSTPROC_ON /* for inter just test for ringing */ if (video->postFilterType != PV_NO_POST_PROC) *pp_mod[comp] = (uint8)((ncoeffs[comp] > 3) ? 4 : 0); #endif } else { /* no IDCT for all zeros blocks 03/28/2002 */ /* BlockIDCT(); */ #ifdef PV_POSTPROC_ON if (video->postFilterType != PV_NO_POST_PROC) *pp_mod[comp] = 0; #endif } } (*DC)[4] = mid_gray; if (CBP & 2) { ncoeffs[4] = VlcDequantH263InterBlock(video, 4, mblock->bitmapcol[4], &mblock->bitmaprow[4]); if (VLC_ERROR_DETECTED(ncoeffs[4])) return PV_FAIL; BlockIDCT(video->currVop->uChan + (offset >> 2) + (x_pos << 2), mblock->pred_block + 256, mblock->block[4], width >> 1, ncoeffs[4], mblock->bitmapcol[4], mblock->bitmaprow[4]); #ifdef PV_POSTPROC_ON /* for inter just test for ringing */ if (video->postFilterType != PV_NO_POST_PROC) *pp_mod[4] = (uint8)((ncoeffs[4] > 3) ? 4 : 0); #endif } else { /* no IDCT for all zeros blocks 03/28/2002 */ /* BlockIDCT(); */ #ifdef PV_POSTPROC_ON if (video->postFilterType != PV_NO_POST_PROC) *pp_mod[4] = 0; #endif } (*DC)[5] = mid_gray; if (CBP & 1) { ncoeffs[5] = VlcDequantH263InterBlock(video, 5, mblock->bitmapcol[5], &mblock->bitmaprow[5]); if (VLC_ERROR_DETECTED(ncoeffs[5])) return PV_FAIL; BlockIDCT(video->currVop->vChan + (offset >> 2) + (x_pos << 2), mblock->pred_block + 264, mblock->block[5], width >> 1, ncoeffs[5], mblock->bitmapcol[5], mblock->bitmaprow[5]); #ifdef PV_POSTPROC_ON /* for inter just test for ringing */ if (video->postFilterType != PV_NO_POST_PROC) *pp_mod[5] = (uint8)((ncoeffs[5] > 3) ? 4 : 0); #endif } else { /* no IDCT for all zeros blocks 03/28/2002 */ /* BlockIDCT(); */ #ifdef PV_POSTPROC_ON if (video->postFilterType != PV_NO_POST_PROC) *pp_mod[5] = 0; #endif #endif // PV_ANNEX_IJKT_SUPPORT } video->usePrevQP = 1; /* should be set after decoding the first Coded 04/27/01 */ return PV_SUCCESS; } ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/conceal.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "mp4dec_lib.h" /* video decoder function prototypes */ #include "vlc_decode.h" #include "bitstream.h" #include "scaling.h" /* ====================================================================== / Function : ConcealTexture_I() Date : 06/12/2001 Purpose : Conceal texture for I-partition In/out : Return : Modified : / ====================================================================== */ void ConcealTexture_I(VideoDecData *video, int32 startFirstPartition, int mb_start, int mb_stop, int slice_counter) { int mbnum; BitstreamDecVideo *stream = video->bitstream; int16 QP; int intra_dc_vlc_thr = video->currVop->intraDCVlcThr; movePointerTo(stream, startFirstPartition); video->usePrevQP = 0; for (mbnum = mb_start; mbnum < mb_stop; mbnum++) { video->mbnum = mbnum; video->mbnum_row = PV_GET_ROW(mbnum, video->nMBPerRow); video->mbnum_col = mbnum - video->mbnum_row * video->nMBPerRow; video->sliceNo[mbnum] = (uint8) slice_counter; QP = video->QPMB[mbnum]; PV_VlcDecMCBPC_com_intra(stream); GetMBheaderDataPart_DQUANT_DC(video, &QP); if (intra_dc_vlc_thr) { if (video->usePrevQP) QP = video->QPMB[mbnum-1]; if (intra_dc_vlc_thr == 7 || QP >= intra_dc_vlc_thr*2 + 11) /* if switched then conceal from previous frame */ { ConcealPacket(video, mbnum, mb_stop, slice_counter); video->mbnum = mb_stop - 1; video->mbnum_row = PV_GET_ROW(video->mbnum, video->nMBPerRow); video->mbnum_col = video->mbnum - video->mbnum_row * video->nMBPerRow; break; } } video->headerInfo.CBP[mbnum] = 0; video->acPredFlag[mbnum] = 0; GetMBData_DataPart(video); video->usePrevQP = 1; } return; } /* ====================================================================== / Function : ConcealTexture_P() Date : 05/16/2000 Purpose : Conceal texture for P-partition In/out : Return : / ====================================================================== */ void ConcealTexture_P(VideoDecData *video, int mb_start, int mb_stop, int slice_counter) { int mbnum; for (mbnum = mb_start; mbnum < mb_stop; mbnum++) { video->mbnum = mbnum; video->mbnum_row = PV_GET_ROW(mbnum, video->nMBPerRow); video->mbnum_col = mbnum - video->mbnum_row * video->nMBPerRow; video->sliceNo[mbnum] = (uint8) slice_counter; oscl_memset(video->mblock->block, 0, sizeof(typeMBStore)); /* to get rid of dark region caused by INTRA blocks */ /* 05/19/2000 */ if (video->headerInfo.Mode[mbnum] & INTER_MASK) { MBMotionComp(video, 0); } else { video->headerInfo.Mode[mbnum] = MODE_SKIPPED; SkippedMBMotionComp(video); } } return; } /*************************************************************** Function: ConcealPacket Purpose : Conceal motion and texture of a packet by direct copying from previous frame. Returned: void Modified: *************************************************************/ void ConcealPacket(VideoDecData *video, int mb_start, int mb_stop, int slice_counter) { int i; for (i = mb_start; i < mb_stop; i++) { CopyVopMB(video->currVop, video->concealFrame, i, video->width, video->height); video->sliceNo[i] = (uint8) slice_counter; video->headerInfo.Mode[i] = MODE_SKIPPED; } return; } /**************************************************************************** Function: CopyVopMB Purpose : Fill a macroblock with previous Vop. Returned : void Modified: 6/04/2001 rewrote the function copies from concealFrame ****************************************************************************/ void CopyVopMB(Vop *curr, uint8 *prevFrame, int mbnum, int width_Y, int height) { int width_C = width_Y >> 1; int row = MB_SIZE; uint8 *y1, *y2, *u1, *u2, *v1, *v2; int xpos, ypos, MB_in_width; int32 lumstart, chrstart, size; MB_in_width = (width_Y + 15) >> 4; ypos = PV_GET_ROW(mbnum, MB_in_width); xpos = mbnum - ypos * MB_in_width; lumstart = (ypos << 4) * (int32)width_Y + (xpos << 4); chrstart = (ypos << 3) * (int32)width_C + (xpos << 3); size = (int32)height * width_Y; y1 = curr->yChan + lumstart; u1 = curr->uChan + chrstart; v1 = curr->vChan + chrstart; y2 = prevFrame + lumstart; u2 = prevFrame + size + chrstart; v2 = prevFrame + size + (size >> 2) + chrstart; while (row) { oscl_memcpy(y1, y2, MB_SIZE); y1 += width_Y; y2 += width_Y; oscl_memcpy(y1, y2, MB_SIZE); y1 += width_Y; y2 += width_Y; oscl_memcpy(y1, y2, MB_SIZE); y1 += width_Y; y2 += width_Y; oscl_memcpy(y1, y2, MB_SIZE); y1 += width_Y; y2 += width_Y; oscl_memcpy(u1, u2, B_SIZE); u1 += width_C; u2 += width_C; oscl_memcpy(u1, u2, B_SIZE); u1 += width_C; u2 += width_C; oscl_memcpy(v1, v2, B_SIZE); v1 += width_C; v2 += width_C; oscl_memcpy(v1, v2, B_SIZE); v1 += width_C; v2 += width_C; row -= 4; } return; } /* CopyVopMB */ ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/datapart_decode.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "mp4dec_lib.h" #include "vlc_decode.h" #include "bitstream.h" #include "scaling.h" #include "mbtype_mode.h" #include "idct.h" #define OSCL_DISABLE_WARNING_CONDITIONAL_IS_CONSTANT #include "osclconfig_compiler_warnings.h" /* ======================================================================== */ /* Function : DecodeFrameDataPartMode() */ /* Purpose : Decode a frame of MPEG4 bitstream in datapartitioning mode. */ /* In/out : */ /* Return : */ /* Modified : */ /* */ /* 04/25/2000 : Rewrite the data partitioning path completely */ /* according to the pseudo codes in MPEG-4 */ /* standard. */ /* Modified : 09/18/2000 add fast VlcDecode+Dequant */ /* 04/17/2001 cleanup */ /* ======================================================================== */ PV_STATUS DecodeFrameDataPartMode(VideoDecData *video) { PV_STATUS status; Vop *currVop = video->currVop; BitstreamDecVideo *stream = video->bitstream; int nMBPerRow = video->nMBPerRow; int vopType = currVop->predictionType; int mbnum; int nTotalMB = video->nTotalMB; int slice_counter; int resync_marker_length; /* copy and pad to prev_Vop for INTER coding */ switch (vopType) { case I_VOP : // oscl_memset(Mode, MODE_INTRA, sizeof(uint8)*nTotalMB); resync_marker_length = 17; break; case P_VOP : oscl_memset(video->motX, 0, sizeof(MOT)*4*nTotalMB); oscl_memset(video->motY, 0, sizeof(MOT)*4*nTotalMB); // oscl_memset(Mode, MODE_INTER, sizeof(uint8)*nTotalMB); resync_marker_length = 16 + currVop->fcodeForward; break; default : mp4dec_log("DecodeFrameDataPartMode(): Vop type not supported.\n"); return PV_FAIL; } /** Initialize sliceNo ***/ mbnum = slice_counter = 0; // oscl_memset(video->sliceNo, 0, sizeof(uint8)*nTotalMB); do { /* This section is equivalent to motion_shape_texture() */ /* in the MPEG-4 standard. 04/13/2000 */ video->mbnum = mbnum; video->mbnum_row = PV_GET_ROW(mbnum, nMBPerRow); /* This is needed if nbnum is read from the packet header */ video->mbnum_col = mbnum - video->mbnum_row * nMBPerRow; switch (vopType) { case I_VOP : status = DecodeDataPart_I_VideoPacket(video, slice_counter); break; case P_VOP : status = DecodeDataPart_P_VideoPacket(video, slice_counter); break; default : mp4dec_log("DecodeFrameDataPartMode(): Vop type not supported.\n"); return PV_FAIL; } while ((status = PV_ReadVideoPacketHeader(video, &mbnum)) == PV_FAIL) { if ((status = quickSearchVideoPacketHeader(stream, resync_marker_length)) != PV_SUCCESS) { break; } } if (status == PV_END_OF_VOP) { mbnum = nTotalMB; } if (mbnum > video->mbnum + 1) { ConcealPacket(video, video->mbnum, mbnum, slice_counter); } slice_counter++; if (mbnum >= nTotalMB) { break; } } while (TRUE); return PV_SUCCESS; } /* ======================================================================== */ /* Function : DecodeDataPart_I_VideoPacket() */ /* Date : 04/25/2000 */ /* Purpose : Decode Data Partitioned Mode Video Packet in I-VOP */ /* In/out : */ /* Return : PV_SUCCESS if successed, PV_FAIL if failed. */ /* Modified : 09/18/2000 add fast VlcDecode+Dequant */ /* 04/01/2001 fixed MB_stuffing, removed unnecessary code */ /* ======================================================================== */ PV_STATUS DecodeDataPart_I_VideoPacket(VideoDecData *video, int slice_counter) { PV_STATUS status; uint8 *Mode = video->headerInfo.Mode; BitstreamDecVideo *stream = video->bitstream; int nTotalMB = video->nTotalMB; int mbnum, mb_start, mb_end; int16 QP, *QPMB = video->QPMB; int MBtype, MCBPC, CBPY; uint32 tmpvar; uint code; int nMBPerRow = video->nMBPerRow; Bool valid_stuffing; int32 startSecondPart, startFirstPart = getPointer(stream); /* decode the first partition */ QP = video->currVop->quantizer; mb_start = mbnum = video->mbnum; video->usePrevQP = 0; /* 04/27/01 */ BitstreamShowBits16(stream, 9, &code); while (code == 1) { PV_BitstreamFlushBits(stream, 9); BitstreamShowBits16(stream, 9, &code); } do { /* decode COD, MCBPC, ACpred_flag, CPBY and DQUANT */ MCBPC = PV_VlcDecMCBPC_com_intra(stream); if (!VLC_ERROR_DETECTED(MCBPC)) { Mode[mbnum] = (uint8)(MBtype = MBtype_mode[MCBPC & 7]); video->headerInfo.CBP[mbnum] = (uint8)((MCBPC >> 4) & 3); status = GetMBheaderDataPart_DQUANT_DC(video, &QP); video->usePrevQP = 1; /* set it after the first coded MB 04/27/01 */ } else { /* Report the error to the application. 06/20/2000 */ VideoDecoderErrorDetected(video); video->mbnum = mb_start; movePointerTo(stream, startFirstPart); return PV_FAIL; } video->sliceNo[mbnum] = (uint8) slice_counter; QPMB[mbnum] = QP; video->mbnum = ++mbnum; BitstreamShowBits16(stream, 9, &code); while (code == 1) { PV_BitstreamFlushBits(stream, 9); BitstreamShowBits16(stream, 9, &code); } /* have we reached the end of the video packet or vop? */ status = BitstreamShowBits32(stream, DC_MARKER_LENGTH, &tmpvar); } while (tmpvar != DC_MARKER && video->mbnum < nTotalMB); if (tmpvar == DC_MARKER) { PV_BitstreamFlushBits(stream, DC_MARKER_LENGTH); } else { status = quickSearchDCM(stream); if (status == PV_SUCCESS) { /* only way you can end up being here is in the last packet,and there is stuffing at the end of the first partition */ PV_BitstreamFlushBits(stream, DC_MARKER_LENGTH); } else { /* Report the error to the application. 06/20/2000 */ VideoDecoderErrorDetected(video); movePointerTo(stream, startFirstPart); video->mbnum = mb_start; /* concealment will be taken care of in the upper layer */ return PV_FAIL; } } /* decode the second partition */ startSecondPart = getPointer(stream); mb_end = video->mbnum; for (mbnum = mb_start; mbnum < mb_end; mbnum++) { MBtype = Mode[mbnum]; /* No skipped mode in I-packets 3/1/2001 */ video->mbnum = mbnum; video->mbnum_row = PV_GET_ROW(mbnum, nMBPerRow); /* This is needed if nbnum is read from the packet header */ video->mbnum_col = mbnum - video->mbnum_row * nMBPerRow; /* there is always acdcpred in DataPart mode 04/10/01 */ video->acPredFlag[mbnum] = (uint8) BitstreamRead1Bits(stream); CBPY = PV_VlcDecCBPY(stream, MBtype & INTRA_MASK); /* MODE_INTRA || MODE_INTRA_Q */ if (CBPY < 0) { /* Report the error to the application. 06/20/2000 */ VideoDecoderErrorDetected(video); movePointerTo(stream, startSecondPart); /* */ /* Conceal packet, 05/15/2000 */ ConcealTexture_I(video, startFirstPart, mb_start, mb_end, slice_counter); return PV_FAIL; } video->headerInfo.CBP[mbnum] |= (uint8)(CBPY << 2); } video->usePrevQP = 0; for (mbnum = mb_start; mbnum < mb_end; mbnum++) { video->mbnum = mbnum; video->mbnum_row = PV_GET_ROW(mbnum , nMBPerRow); /* This is needed if nbnum is read from the packet header */ video->mbnum_col = mbnum - video->mbnum_row * nMBPerRow; /* No skipped mode in I-packets 3/1/2001 */ /* decode the DCT coeficients for the MB */ status = GetMBData_DataPart(video); if (status != PV_SUCCESS) { /* Report the error to the application. 06/20/2000 */ VideoDecoderErrorDetected(video); movePointerTo(stream, startSecondPart); /* */ /* Conceal packet, 05/15/2000 */ ConcealTexture_I(video, startFirstPart, mb_start, mb_end, slice_counter); return status; } video->usePrevQP = 1; /* 04/27/01 should be set after decoding first MB */ } valid_stuffing = validStuffing(stream); if (!valid_stuffing) { VideoDecoderErrorDetected(video); movePointerTo(stream, startSecondPart); ConcealTexture_I(video, startFirstPart, mb_start, mb_end, slice_counter); return PV_FAIL; } return PV_SUCCESS; } /* ======================================================================== */ /* Function : DecodeDataPart_P_VideoPacket() */ /* Date : 04/25/2000 */ /* Purpose : Decode Data Partitioned Mode Video Packet in P-VOP */ /* In/out : */ /* Return : PV_SUCCESS if successed, PV_FAIL if failed. */ /* Modified : 09/18/2000, fast VlcDecode+Dequant */ /* 04/13/2001, fixed MB_stuffing, new ACDC pred structure, */ /* cleanup */ /* 08/07/2001, remove MBzero */ /* ======================================================================== */ PV_STATUS DecodeDataPart_P_VideoPacket(VideoDecData *video, int slice_counter) { PV_STATUS status; uint8 *Mode = video->headerInfo.Mode; BitstreamDecVideo *stream = video->bitstream; int nTotalMB = video->nTotalMB; int mbnum, mb_start, mb_end; int16 QP, *QPMB = video->QPMB; int MBtype, CBPY; Bool valid_stuffing; int intra_MB; uint32 tmpvar; uint code; int32 startFirstPart, startSecondPart; int nMBPerRow = video->nMBPerRow; uint8 *pbyte; /* decode the first partition */ startFirstPart = getPointer(stream); mb_start = video->mbnum; video->usePrevQP = 0; /* 04/27/01 */ BitstreamShowBits16(stream, 10, &code); while (code == 1) { PV_BitstreamFlushBits(stream, 10); BitstreamShowBits16(stream, 10, &code); } do { /* decode COD, MCBPC, ACpred_flag, CPBY and DQUANT */ /* We have to discard stuffed MB header */ status = GetMBheaderDataPart_P(video); if (status != PV_SUCCESS) { /* Report the error to the application. 06/20/2000 */ VideoDecoderErrorDetected(video); movePointerTo(stream, startFirstPart); video->mbnum = mb_start; return PV_FAIL; } /* we must update slice_counter before motion vector decoding. */ video->sliceNo[video->mbnum] = (uint8) slice_counter; if (Mode[video->mbnum] & INTER_MASK) /* INTER || INTER_Q || INTER_4V */ { /* decode the motion vector (if there are any) */ status = PV_GetMBvectors(video, Mode[video->mbnum]); if (status != PV_SUCCESS) { /* Report the error to the application. 06/20/2000 */ VideoDecoderErrorDetected(video); movePointerTo(stream, startFirstPart); video->mbnum = mb_start; return PV_FAIL; } } video->mbnum++; video->mbnum_row = PV_GET_ROW(video->mbnum, nMBPerRow); /* This is needed if mbnum is read from the packet header */ video->mbnum_col = video->mbnum - video->mbnum_row * nMBPerRow; BitstreamShowBits16(stream, 10, &code); while (code == 1) { PV_BitstreamFlushBits(stream, 10); BitstreamShowBits16(stream, 10, &code); } /* have we reached the end of the video packet or vop? */ status = BitstreamShowBits32(stream, MOTION_MARKER_COMB_LENGTH, &tmpvar); /* if (status != PV_SUCCESS && status != PV_END_OF_BUFFER) return status; */ } while (tmpvar != MOTION_MARKER_COMB && video->mbnum < nTotalMB); if (tmpvar == MOTION_MARKER_COMB) { PV_BitstreamFlushBits(stream, MOTION_MARKER_COMB_LENGTH); } else { status = quickSearchMotionMarker(stream); if (status == PV_SUCCESS) { /* only way you can end up being here is in the last packet,and there is stuffing at the end of the first partition */ PV_BitstreamFlushBits(stream, MOTION_MARKER_COMB_LENGTH); } else { /* Report the error to the application. 06/20/2000 */ VideoDecoderErrorDetected(video); movePointerTo(stream, startFirstPart); video->mbnum = mb_start; /* concealment will be taken care of in the upper layer */ return PV_FAIL; } } /* decode the second partition */ startSecondPart = getPointer(stream); QP = video->currVop->quantizer; mb_end = video->mbnum; for (mbnum = mb_start; mbnum < mb_end; mbnum++) { MBtype = Mode[mbnum]; if (MBtype == MODE_SKIPPED) { QPMB[mbnum] = QP; /* 03/01/01 */ continue; } intra_MB = (MBtype & INTRA_MASK); /* (MBtype == MODE_INTRA || MBtype == MODE_INTRA_Q) */ video->mbnum = mbnum; video->mbnum_row = PV_GET_ROW(mbnum, nMBPerRow); /* This is needed if nbnum is read from the packet header */ video->mbnum_col = mbnum - video->mbnum_row * nMBPerRow; /* there is always acdcprediction in DataPart mode 04/10/01 */ if (intra_MB) { video->acPredFlag[mbnum] = (uint8) BitstreamRead1Bits_INLINE(stream); } CBPY = PV_VlcDecCBPY(stream, intra_MB); if (CBPY < 0) { /* Report the error to the application. 06/20/2000 */ VideoDecoderErrorDetected(video); /* Conceal second partition, 5/15/2000 */ movePointerTo(stream, startSecondPart); ConcealTexture_P(video, mb_start, mb_end, slice_counter); return PV_FAIL; } video->headerInfo.CBP[mbnum] |= (uint8)(CBPY << 2); if (intra_MB || MBtype == MODE_INTER_Q) /* 04/26/01 */ { status = GetMBheaderDataPart_DQUANT_DC(video, &QP); if (status != PV_SUCCESS) return status; } video->usePrevQP = 1; /* 04/27/01 */ QPMB[mbnum] = QP; } video->usePrevQP = 0; /* 04/27/01 */ for (mbnum = mb_start; mbnum < mb_end; mbnum++) { video->mbnum = mbnum; video->mbnum_row = PV_GET_ROW(mbnum, nMBPerRow); /* This is needed if nbnum is read from the packet header */ video->mbnum_col = mbnum - video->mbnum_row * nMBPerRow; if (Mode[mbnum] != MODE_SKIPPED) { /* decode the DCT coeficients for the MB */ status = GetMBData_DataPart(video); if (status != PV_SUCCESS) { /* Report the error to the application. 06/20/2000 */ VideoDecoderErrorDetected(video); /* Conceal second partition, 5/15/2000 */ movePointerTo(stream, startSecondPart); ConcealTexture_P(video, mb_start, mb_end, slice_counter); return status; } video->usePrevQP = 1; /* 04/27/01 */ } else { // SKIPPED /* Motion compensation and put it to video->mblock->pred_block */ SkippedMBMotionComp(video); //oscl_memset(video->predDCAC_row + video->mbnum_col, 0, sizeof(typeDCACStore)); /* SKIPPED_ACDC */ //oscl_memset(video->predDCAC_col, 0, sizeof(typeDCACStore)); /* 08/08/2005 */ pbyte = (uint8*)(video->predDCAC_row + video->mbnum_col); ZERO_OUT_64BYTES(pbyte); pbyte = (uint8*)(video->predDCAC_col); ZERO_OUT_64BYTES(pbyte); } } valid_stuffing = validStuffing(stream); /* */ if (!valid_stuffing) { VideoDecoderErrorDetected(video); movePointerTo(stream, startSecondPart); /* */ ConcealTexture_P(video, mb_start, mb_end, slice_counter); return PV_FAIL; } return PV_SUCCESS; } /* ======================================================================== */ /* Function : GetMBheaderDataPart_DQUANT_DC() */ /* Date : 04/26/2000 */ /* Purpose : Decode DQUANT and DC in Data Partitioned Mode for both */ /* I-VOP and P-VOP. */ /* In/out : */ /* Return : PV_SUCCESS if successed, PV_FAIL if failed. */ /* Modified : 02/13/2001 new ACDC prediction structure, */ /* cleanup */ /* ======================================================================== */ PV_STATUS GetMBheaderDataPart_DQUANT_DC(VideoDecData *video, int16 *QP) { PV_STATUS status = PV_SUCCESS; BitstreamDecVideo *stream = video->bitstream; int mbnum = video->mbnum; int intra_dc_vlc_thr = video->currVop->intraDCVlcThr; uint8 *Mode = video->headerInfo.Mode; int MBtype = Mode[mbnum]; typeDCStore *DC = video->predDC + mbnum; int comp; Bool switched; uint DQUANT; int16 QP_tmp; const static int DQ_tab[4] = { -1, -2, 1, 2}; if (MBtype & Q_MASK) /* INTRA_Q || INTER_Q */ { DQUANT = BitstreamReadBits16(stream, 2); *QP += DQ_tab[DQUANT]; if (*QP < 1) *QP = 1; else if (*QP > 31) *QP = 31; } if (MBtype & INTRA_MASK) /* INTRA || INTRA_Q */ /* no switch, code DC separately */ { QP_tmp = *QP; /* running QP 04/26/01*/ switched = 0; if (intra_dc_vlc_thr) /* 04/27/01 */ { if (video->usePrevQP) QP_tmp = video->QPMB[mbnum-1]; switched = (intra_dc_vlc_thr == 7 || QP_tmp >= intra_dc_vlc_thr * 2 + 11); } if (!switched) { for (comp = 0; comp < 6; comp++) { status = PV_DecodePredictedIntraDC(comp, stream, (*DC + comp)); /* 03/01/01 */ if (status != PV_SUCCESS) return PV_FAIL; } } else { for (comp = 0; comp < 6; comp++) { (*DC)[comp] = 0; /* 04/26/01 needed for switched case*/ } } } return status; } /***********************************************************CommentBegin****** * 04/25/2000 : Initial modification to the new PV Lib format. * 04/17/2001 : new ACDC pred structure ***********************************************************CommentEnd********/ PV_STATUS GetMBheaderDataPart_P(VideoDecData *video) { BitstreamDecVideo *stream = video->bitstream; int mbnum = video->mbnum; uint8 *Mode = video->headerInfo.Mode; typeDCStore *DC = video->predDC + mbnum; uint no_dct_flag; int comp; int MCBPC; no_dct_flag = BitstreamRead1Bits_INLINE(stream); if (no_dct_flag) { /* skipped macroblock */ Mode[mbnum] = MODE_SKIPPED; for (comp = 0; comp < 6; comp++) { (*DC)[comp] = mid_gray; /* ACDC REMOVE AC coefs are set in DecodeDataPart_P */ } } else { /* coded macroblock */ MCBPC = PV_VlcDecMCBPC_com_inter(stream); if (VLC_ERROR_DETECTED(MCBPC)) { return PV_FAIL; } Mode[mbnum] = (uint8)MBtype_mode[MCBPC & 7]; video->headerInfo.CBP[mbnum] = (uint8)((MCBPC >> 4) & 3); } return PV_SUCCESS; } /***********************************************************CommentBegin****** * 04/17/01 new ACDC pred structure, reorganized code, cleanup ***********************************************************CommentEnd********/ PV_STATUS GetMBData_DataPart(VideoDecData *video) { int mbnum = video->mbnum; int16 *dataBlock; MacroBlock *mblock = video->mblock; int QP = video->QPMB[mbnum]; int32 offset; PIXEL *c_comp; int width = video->width; int intra_dc_vlc_thr = video->currVop->intraDCVlcThr; uint CBP = video->headerInfo.CBP[mbnum]; uint8 mode = video->headerInfo.Mode[mbnum]; int x_pos = video->mbnum_col; typeDCStore *DC = video->predDC + mbnum; int ncoeffs[6], *no_coeff = mblock->no_coeff; int comp; Bool switched; int QP_tmp = QP; int y_pos = video->mbnum_row; #ifdef PV_POSTPROC_ON uint8 *pp_mod[6]; int TotalMB = video->nTotalMB; int MB_in_width = video->nMBPerRow; #endif /***** * Decoding of the 6 blocks (depending on transparent pattern) *****/ #ifdef PV_POSTPROC_ON if (video->postFilterType != PV_NO_POST_PROC) { /** post-processing ***/ pp_mod[0] = video->pstprcTypCur + (y_pos << 1) * (MB_in_width << 1) + (x_pos << 1); pp_mod[1] = pp_mod[0] + 1; pp_mod[2] = pp_mod[0] + (MB_in_width << 1); pp_mod[3] = pp_mod[2] + 1; pp_mod[4] = video->pstprcTypCur + (TotalMB << 2) + mbnum; pp_mod[5] = pp_mod[4] + TotalMB; } #endif /* oscl_memset(mblock->block, 0, sizeof(typeMBStore)); Aug 9,2005 */ if (mode & INTRA_MASK) /* MODE_INTRA || mode == MODE_INTRA_Q */ { switched = 0; if (intra_dc_vlc_thr) { if (video->usePrevQP) QP_tmp = video->QPMB[mbnum-1]; /* running QP 04/26/01 */ switched = (intra_dc_vlc_thr == 7 || QP_tmp >= intra_dc_vlc_thr * 2 + 11); } mblock->DCScalarLum = cal_dc_scaler(QP, LUMINANCE_DC_TYPE); /* ACDC 03/01/01 */ mblock->DCScalarChr = cal_dc_scaler(QP, CHROMINANCE_DC_TYPE); for (comp = 0; comp < 6; comp++) { dataBlock = mblock->block[comp]; /*, 10/20/2000 */ dataBlock[0] = (*DC)[comp]; ncoeffs[comp] = VlcDequantH263IntraBlock(video, comp, switched, mblock->bitmapcol[comp], &mblock->bitmaprow[comp]); if (VLC_ERROR_DETECTED(ncoeffs[comp])) /* */ { if (switched) return PV_FAIL; else { ncoeffs[comp] = 1; oscl_memset((dataBlock + 1), 0, sizeof(int16)*63); } } no_coeff[comp] = ncoeffs[comp]; /* modified to new semaphore for post-proc */ // Future work:: can be combined in the dequant function // @todo Deblocking Semaphore for INTRA block #ifdef PV_POSTPROC_ON if (video->postFilterType != PV_NO_POST_PROC) *pp_mod[comp] = (uint8) PostProcSemaphore(dataBlock); #endif } MBlockIDCT(video); } else /* MODE INTER*/ { MBMotionComp(video, CBP); offset = (int32)(y_pos << 4) * width + (x_pos << 4); c_comp = video->currVop->yChan + offset; for (comp = 0; comp < 4; comp++) { (*DC)[comp] = mid_gray; if (CBP & (1 << (5 - comp))) { ncoeffs[comp] = VlcDequantH263InterBlock(video, comp, mblock->bitmapcol[comp], &mblock->bitmaprow[comp]); if (VLC_ERROR_DETECTED(ncoeffs[comp])) return PV_FAIL; BlockIDCT(c_comp + (comp&2)*(width << 2) + 8*(comp&1), mblock->pred_block + (comp&2)*64 + 8*(comp&1), mblock->block[comp], width, ncoeffs[comp], mblock->bitmapcol[comp], mblock->bitmaprow[comp]); } else { ncoeffs[comp] = 0; } /* @todo Deblocking Semaphore for INTRA block, for inter just test for ringing */ #ifdef PV_POSTPROC_ON if (video->postFilterType != PV_NO_POST_PROC) *pp_mod[comp] = (uint8)((ncoeffs[comp] > 3) ? 4 : 0); #endif } (*DC)[4] = mid_gray; if (CBP & 2) { ncoeffs[4] = VlcDequantH263InterBlock(video, 4, mblock->bitmapcol[4], &mblock->bitmaprow[4]); if (VLC_ERROR_DETECTED(ncoeffs[4])) return PV_FAIL; BlockIDCT(video->currVop->uChan + (offset >> 2) + (x_pos << 2), mblock->pred_block + 256, mblock->block[4], width >> 1, ncoeffs[4], mblock->bitmapcol[4], mblock->bitmaprow[4]); } else { ncoeffs[4] = 0; } #ifdef PV_POSTPROC_ON if (video->postFilterType != PV_NO_POST_PROC) *pp_mod[4] = (uint8)((ncoeffs[4] > 3) ? 4 : 0); #endif (*DC)[5] = mid_gray; if (CBP & 1) { ncoeffs[5] = VlcDequantH263InterBlock(video, 5, mblock->bitmapcol[5], &mblock->bitmaprow[5]); if (VLC_ERROR_DETECTED(ncoeffs[5])) return PV_FAIL; BlockIDCT(video->currVop->vChan + (offset >> 2) + (x_pos << 2), mblock->pred_block + 264, mblock->block[5], width >> 1, ncoeffs[5], mblock->bitmapcol[5], mblock->bitmaprow[5]); } else { ncoeffs[5] = 0; } #ifdef PV_POSTPROC_ON if (video->postFilterType != PV_NO_POST_PROC) *pp_mod[5] = (uint8)((ncoeffs[5] > 3) ? 4 : 0); #endif /* Motion compensation and put it to video->mblock->pred_block */ } return PV_SUCCESS; } ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/dcac_prediction.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /*---------------------------------------------------------------------------- ; INCLUDES ----------------------------------------------------------------------------*/ #include "mp4dec_lib.h" #include "vlc_decode.h" #include "bitstream.h" #include "zigzag.h" #include "scaling.h" void doDCACPrediction( VideoDecData *video, int comp, int16 *q_block, int *direction ) { /*---------------------------------------------------------------------------- ; Define all local variables ----------------------------------------------------------------------------*/ int i; int mbnum = video->mbnum; int nMBPerRow = video->nMBPerRow; int x_pos = video->mbnum_col; int y_pos = video->mbnum_row; int16 *AC_tmp; int QP_tmp; int16 *QP_store = video->QPMB + mbnum; int QP = video->QPMB[mbnum]; int QP_half = QP >> 1; int32 val; int flag_0 = FALSE, flag_1 = FALSE; uint8 *slice_nb = video->sliceNo; typeDCStore *DC_store = video->predDC + mbnum; typeDCACStore *DCAC_row = video->predDCAC_row + x_pos; typeDCACStore *DCAC_col = video->predDCAC_col; uint ACpred_flag = (uint) video->acPredFlag[mbnum]; int left_bnd, up_bnd; static const int Xpos[6] = { -1, 0, -1, 0, -1, -1}; static const int Ypos[6] = { -1, -1, 0, 0, -1, -1}; static const int Xtab[6] = {1, 0, 3, 2, 4, 5}; static const int Ytab[6] = {2, 3, 0, 1, 4, 5}; static const int Ztab[6] = {3, 2, 1, 0, 4, 5}; /* I added these to speed up comparisons */ static const int Pos0[6] = { 1, 1, 0, 0, 1, 1}; static const int Pos1[6] = { 1, 0, 1, 0, 1, 1}; static const int B_Xtab[6] = {0, 1, 0, 1, 2, 3}; static const int B_Ytab[6] = {0, 0, 1, 1, 2, 3}; // int *direction; /* 0: HORIZONTAL, 1: VERTICAL */ int block_A, block_B, block_C; int DC_pred; int y_offset, x_offset, x_tab, y_tab, z_tab; /* speedup coefficients */ int b_xtab, b_ytab; if (!comp && x_pos && !(video->headerInfo.Mode[mbnum-1]&INTRA_MASK)) /* not intra */ { oscl_memset(DCAC_col, 0, sizeof(typeDCACStore)); } if (!comp && y_pos && !(video->headerInfo.Mode[mbnum-nMBPerRow]&INTRA_MASK)) /* not intra */ { oscl_memset(DCAC_row, 0, sizeof(typeDCACStore)); } y_offset = Ypos[comp] * nMBPerRow; x_offset = Xpos[comp]; x_tab = Xtab[comp]; y_tab = Ytab[comp]; z_tab = Ztab[comp]; b_xtab = B_Xtab[comp]; b_ytab = B_Ytab[comp]; /*---------------------------------------------------------------------------- ; Function body here ----------------------------------------------------------------------------*/ /* Find the direction of prediction and the DC prediction */ if (x_pos == 0 && y_pos == 0) { /* top left corner */ block_A = (comp == 1 || comp == 3) ? flag_0 = TRUE, DC_store[0][x_tab] : mid_gray; block_B = (comp == 3) ? DC_store[x_offset][z_tab] : mid_gray; block_C = (comp == 2 || comp == 3) ? flag_1 = TRUE, DC_store[0][y_tab] : mid_gray; } else if (x_pos == 0) { /* left edge */ up_bnd = Pos0[comp] && slice_nb[mbnum] == slice_nb[mbnum-nMBPerRow]; block_A = (comp == 1 || comp == 3) ? flag_0 = TRUE, DC_store[0][x_tab] : mid_gray; block_B = ((comp == 1 && up_bnd) || comp == 3) ? DC_store[y_offset+x_offset][z_tab] : mid_gray; block_C = (comp == 2 || comp == 3 || up_bnd) ? flag_1 = TRUE, DC_store[y_offset][y_tab] : mid_gray; } else if (y_pos == 0) { /* top row */ left_bnd = Pos1[comp] && slice_nb[mbnum] == slice_nb[mbnum-1]; block_A = (comp == 1 || comp == 3 || left_bnd) ? flag_0 = TRUE, DC_store[x_offset][x_tab] : mid_gray; block_B = ((comp == 2 && left_bnd) || comp == 3) ? DC_store[y_offset + x_offset][z_tab] : mid_gray; block_C = (comp == 2 || comp == 3) ? flag_1 = TRUE, DC_store[y_offset][y_tab] : mid_gray; } else { up_bnd = Pos0[comp] && slice_nb[mbnum] == slice_nb[mbnum-nMBPerRow]; left_bnd = Pos1[comp] && slice_nb[mbnum] == slice_nb[mbnum-1]; block_A = (comp == 1 || comp == 3 || left_bnd) ? flag_0 = TRUE, DC_store[x_offset][x_tab] : mid_gray; block_B = (((comp == 0 || comp == 4 || comp == 5) && slice_nb[mbnum] == slice_nb[mbnum-1-nMBPerRow]) || (comp == 1 && up_bnd) || (comp == 2 && left_bnd) || (comp == 3)) ? DC_store[y_offset+x_offset][z_tab] : mid_gray; block_C = (comp == 2 || comp == 3 || up_bnd) ? flag_1 = TRUE, DC_store[y_offset][y_tab] : mid_gray; } if ((PV_ABS((block_A - block_B))) < (PV_ABS((block_B - block_C)))) { DC_pred = block_C; *direction = 1; if (ACpred_flag == 1) { if (flag_1) { AC_tmp = DCAC_row[0][b_xtab]; QP_tmp = QP_store[y_offset]; if (QP_tmp == QP) { for (i = 1; i < 8; i++) { q_block[i] = *AC_tmp++; } } else { for (i = 1; i < 8; i++) { val = (int32)(*AC_tmp++) * QP_tmp; q_block[i] = (val < 0) ? (int16)((val - QP_half) / QP) : (int16)((val + QP_half) / QP); /* Vertical, top ROW of block C */ } } } } } else { DC_pred = block_A; *direction = 0; if (ACpred_flag == 1) { if (flag_0) { AC_tmp = DCAC_col[0][b_ytab]; QP_tmp = QP_store[x_offset]; if (QP_tmp == QP) { for (i = 1; i < 8; i++) { q_block[i<<3] = *AC_tmp++; } } else { for (i = 1; i < 8; i++) { val = (int32)(*AC_tmp++) * QP_tmp; q_block[i<<3] = (val < 0) ? (int16)((val - QP_half) / QP) : (int16)((val + QP_half) / QP); /* Vertical, top ROW of block C */ } } } } } /* Now predict the DC coefficient */ QP_tmp = (comp < 4) ? video->mblock->DCScalarLum : video->mblock->DCScalarChr; q_block[0] += (int16)((DC_pred + (QP_tmp >> 1)) * scale[QP_tmp] >> 18); // q_block[0] += (DC_pred+(QP_tmp>>1))/QP_tmp; /*---------------------------------------------------------------------------- ; Return nothing or data or data pointer ----------------------------------------------------------------------------*/ return; } #ifdef PV_ANNEX_IJKT_SUPPORT void doDCACPrediction_I( VideoDecData *video, int comp, int16 *q_block ) { /*---------------------------------------------------------------------------- ; Define all local variables ----------------------------------------------------------------------------*/ int mbnum = video->mbnum; int nMBPerRow = video->nMBPerRow; int x_pos = video->mbnum_col; int y_pos = video->mbnum_row; int16 *AC_tmp; int flag_0 = FALSE, flag_1 = FALSE; uint8 *slice_nb = video->sliceNo; typeDCStore *DC_store = video->predDC + mbnum; typeDCACStore *DCAC_row = video->predDCAC_row + x_pos; typeDCACStore *DCAC_col = video->predDCAC_col; int left_bnd, up_bnd; uint8 *mode = video->headerInfo.Mode; uint ACpred_flag = (uint) video->acPredFlag[mbnum]; static const int Xpos[6] = { -1, 0, -1, 0, -1, -1}; static const int Ypos[6] = { -1, -1, 0, 0, -1, -1}; static const int Xtab[6] = {1, 0, 3, 2, 4, 5}; static const int Ytab[6] = {2, 3, 0, 1, 4, 5}; /* I added these to speed up comparisons */ static const int Pos0[6] = { 1, 1, 0, 0, 1, 1}; static const int Pos1[6] = { 1, 0, 1, 0, 1, 1}; static const int B_Xtab[6] = {0, 1, 0, 1, 2, 3}; static const int B_Ytab[6] = {0, 0, 1, 1, 2, 3}; // int *direction; /* 0: HORIZONTAL, 1: VERTICAL */ int block_A, block_C; int y_offset, x_offset, x_tab, y_tab; /* speedup coefficients */ int b_xtab, b_ytab; y_offset = Ypos[comp] * nMBPerRow; x_offset = Xpos[comp]; x_tab = Xtab[comp]; y_tab = Ytab[comp]; b_xtab = B_Xtab[comp]; b_ytab = B_Ytab[comp]; /*---------------------------------------------------------------------------- ; Function body here ----------------------------------------------------------------------------*/ /* Find the direction of prediction and the DC prediction */ if (x_pos == 0 && y_pos == 0) { /* top left corner */ block_A = (comp == 1 || comp == 3) ? flag_0 = TRUE, DC_store[0][x_tab] : mid_gray; block_C = (comp == 2 || comp == 3) ? flag_1 = TRUE, DC_store[0][y_tab] : mid_gray; } else if (x_pos == 0) { /* left edge */ up_bnd = (Pos0[comp] && slice_nb[mbnum] == slice_nb[mbnum-nMBPerRow]) && (mode[mbnum-nMBPerRow] == MODE_INTRA || mode[mbnum-nMBPerRow] == MODE_INTRA_Q);; block_A = (comp == 1 || comp == 3) ? flag_0 = TRUE, DC_store[0][x_tab] : mid_gray; block_C = (comp == 2 || comp == 3 || up_bnd) ? flag_1 = TRUE, DC_store[y_offset][y_tab] : mid_gray; } else if (y_pos == 0) { /* top row */ left_bnd = (Pos1[comp] && slice_nb[mbnum] == slice_nb[mbnum-1]) && (mode[mbnum-1] == MODE_INTRA || mode[mbnum-1] == MODE_INTRA_Q); block_A = (comp == 1 || comp == 3 || left_bnd) ? flag_0 = TRUE, DC_store[x_offset][x_tab] : mid_gray; block_C = (comp == 2 || comp == 3) ? flag_1 = TRUE, DC_store[y_offset][y_tab] : mid_gray; } else { up_bnd = (Pos0[comp] && slice_nb[mbnum] == slice_nb[mbnum-nMBPerRow]) && (mode[mbnum-nMBPerRow] == MODE_INTRA || mode[mbnum-nMBPerRow] == MODE_INTRA_Q); left_bnd = (Pos1[comp] && slice_nb[mbnum] == slice_nb[mbnum-1]) && (mode[mbnum-1] == MODE_INTRA || mode[mbnum-1] == MODE_INTRA_Q); block_A = (comp == 1 || comp == 3 || left_bnd) ? flag_0 = TRUE, DC_store[x_offset][x_tab] : mid_gray; block_C = (comp == 2 || comp == 3 || up_bnd) ? flag_1 = TRUE, DC_store[y_offset][y_tab] : mid_gray; } if (ACpred_flag == 0) { if (flag_0 == TRUE) { if (flag_1 == TRUE) { q_block[0] = (int16)((block_A + block_C) >> 1); } else { q_block[0] = (int16)block_A; } } else { if (flag_1 == TRUE) { q_block[0] = (int16)block_C; } else { q_block[0] = mid_gray; } } } else { if (video->mblock->direction == 1) { if (flag_1 == TRUE) { q_block[0] = (int16)block_C; AC_tmp = DCAC_row[0][b_xtab]; q_block[1] = AC_tmp[0]; q_block[2] = AC_tmp[1]; q_block[3] = AC_tmp[2]; q_block[4] = AC_tmp[3]; q_block[5] = AC_tmp[4]; q_block[6] = AC_tmp[5]; q_block[7] = AC_tmp[6]; } else { q_block[0] = mid_gray; } } else { if (flag_0 == TRUE) { q_block[0] = (int16)block_A; AC_tmp = DCAC_col[0][b_ytab]; q_block[8] = AC_tmp[0]; q_block[16] = AC_tmp[1]; q_block[24] = AC_tmp[2]; q_block[32] = AC_tmp[3]; q_block[40] = AC_tmp[4]; q_block[48] = AC_tmp[5]; q_block[56] = AC_tmp[6]; } else { q_block[0] = mid_gray; } } } /*---------------------------------------------------------------------------- ; Return nothing or data or data pointer ----------------------------------------------------------------------------*/ return; } #endif ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/dec_pred_intra_dc.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "mp4dec_lib.h" #include "vlc_decode.h" #include "bitstream.h" #include "zigzag.h" PV_STATUS PV_DecodePredictedIntraDC( int compnum, BitstreamDecVideo *stream, int16 *INTRADC_delta) { /*---------------------------------------------------------------------------- ; Define all local variables ----------------------------------------------------------------------------*/ PV_STATUS status = PV_SUCCESS; uint DC_size; uint code; int first_bit; /*---------------------------------------------------------------------------- ; Function body here ----------------------------------------------------------------------------*/ /* read DC size 2 - 8 bits */ status = PV_VlcDecIntraDCPredSize(stream, compnum, &DC_size); if (status == PV_SUCCESS) { if (DC_size == 0) { *INTRADC_delta = 0; } else { /* read delta DC 0 - 8 bits */ code = (int) BitstreamReadBits16_INLINE(stream, DC_size); first_bit = code >> (DC_size - 1); if (first_bit == 0) { /* negative delta INTRA DC */ *INTRADC_delta = code ^((1 << DC_size) - 1); *INTRADC_delta = -(*INTRADC_delta); } else { /* positive delta INTRA DC */ *INTRADC_delta = code; } if (DC_size > 8) BitstreamRead1Bits_INLINE(stream); } } /*---------------------------------------------------------------------------- ; Return nothing or data or data pointer ----------------------------------------------------------------------------*/ return status; } ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/deringing_chroma.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "mp4dec_lib.h" #include "post_proc.h" #ifdef PV_POSTPROC_ON void Deringing_Chroma( uint8 *Rec_C, int width, int height, int16 *QP_store, int Combined, uint8 *pp_mod ) { OSCL_UNUSED_ARG(Combined); /*---------------------------------------------------------------------------- ; Define all local variables ----------------------------------------------------------------------------*/ int thres; int v_blk, h_blk; int max_diff; int v_pel, h_pel; int max_blk, min_blk; int v0, h0; uint8 *ptr; int sum, sum1, incr; int32 addr_v; int sign_v[10], sum_v[10]; int *ptr2, *ptr3; uint8 pelu, pelc, pell; incr = width - BLKSIZE; /*---------------------------------------------------------------------------- ; Function body here ----------------------------------------------------------------------------*/ /* chrominance */ /* Do the first line (7 pixels at a time => Don't use MMX)*/ for (h_blk = 0; h_blk < width; h_blk += BLKSIZE) { max_diff = (QP_store[h_blk>>3] >> 2) + 4; ptr = &Rec_C[h_blk]; max_blk = min_blk = *ptr; FindMaxMin(ptr, &min_blk, &max_blk, width); h0 = ((h_blk - 1) >= 1) ? (h_blk - 1) : 1; if (max_blk - min_blk >= 4) { thres = (max_blk + min_blk + 1) >> 1; for (v_pel = 1; v_pel < BLKSIZE - 1; v_pel++) { addr_v = (int32)v_pel * width; ptr = &Rec_C[addr_v + h0 - 1]; ptr2 = &sum_v[0]; ptr3 = &sign_v[0]; pelu = *(ptr - width); pelc = *ptr; pell = *(ptr + width); ptr++; *ptr2++ = pelu + (pelc << 1) + pell; *ptr3++ = INDEX(pelu, thres) + INDEX(pelc, thres) + INDEX(pell, thres); pelu = *(ptr - width); pelc = *ptr; pell = *(ptr + width); ptr++; *ptr2++ = pelu + (pelc << 1) + pell; *ptr3++ = INDEX(pelu, thres) + INDEX(pelc, thres) + INDEX(pell, thres); for (h_pel = h0; h_pel < h_blk + BLKSIZE - 1; h_pel++) { pelu = *(ptr - width); pelc = *ptr; pell = *(ptr + width); *ptr2 = pelu + (pelc << 1) + pell; *ptr3 = INDEX(pelu, thres) + INDEX(pelc, thres) + INDEX(pell, thres); sum1 = *(ptr3 - 2) + *(ptr3 - 1) + *ptr3; if (sum1 == 0 || sum1 == 9) { sum = (*(ptr2 - 2) + (*(ptr2 - 1) << 1) + *ptr2 + 8) >> 4; ptr--; if (PV_ABS(*ptr - sum) > max_diff) { if (sum > *ptr) sum = *ptr + max_diff; else sum = *ptr - max_diff; } *ptr++ = (uint8) sum; } ptr++; ptr2++; ptr3++; } } } } for (v_blk = BLKSIZE; v_blk < height; v_blk += BLKSIZE) { v0 = v_blk - 1; /* Do the first block (pixels=7 => No MMX) */ max_diff = (QP_store[((((int32)v_blk*width)>>3))>>3] >> 2) + 4; ptr = &Rec_C[(int32)v_blk * width]; max_blk = min_blk = *ptr; FindMaxMin(ptr, &min_blk, &max_blk, incr); if (max_blk - min_blk >= 4) { thres = (max_blk + min_blk + 1) >> 1; for (v_pel = v0; v_pel < v_blk + BLKSIZE - 1; v_pel++) { addr_v = v_pel * width; ptr = &Rec_C[addr_v]; ptr2 = &sum_v[0]; ptr3 = &sign_v[0]; pelu = *(ptr - width); pelc = *ptr; pell = *(ptr + width); ptr++; *ptr2++ = pelu + (pelc << 1) + pell; *ptr3++ = INDEX(pelu, thres) + INDEX(pelc, thres) + INDEX(pell, thres); pelu = *(ptr - width); pelc = *ptr; pell = *(ptr + width); ptr++; *ptr2++ = pelu + (pelc << 1) + pell; *ptr3++ = INDEX(pelu, thres) + INDEX(pelc, thres) + INDEX(pell, thres); for (h_pel = 1; h_pel < BLKSIZE - 1; h_pel++) { pelu = *(ptr - width); pelc = *ptr; pell = *(ptr + width); *ptr2 = pelu + (pelc << 1) + pell; *ptr3 = INDEX(pelu, thres) + INDEX(pelc, thres) + INDEX(pell, thres); sum1 = *(ptr3 - 2) + *(ptr3 - 1) + *ptr3; if (sum1 == 0 || sum1 == 9) { sum = (*(ptr2 - 2) + (*(ptr2 - 1) << 1) + *ptr2 + 8) >> 4; ptr--; if (PV_ABS(*ptr - sum) > max_diff) { if (sum > *ptr) sum = *ptr + max_diff; else sum = *ptr - max_diff; } *ptr++ = (uint8) sum; } ptr++; ptr2++; ptr3++; } } } /* Do the rest in MMX */ for (h_blk = BLKSIZE; h_blk < width; h_blk += BLKSIZE) { if ((pp_mod[(v_blk/8)*(width/8)+h_blk/8]&0x4) != 0) { max_diff = (QP_store[((((int32)v_blk*width)>>3)+h_blk)>>3] >> 2) + 4; ptr = &Rec_C[(int32)v_blk * width + h_blk]; max_blk = min_blk = *ptr; FindMaxMin(ptr, &min_blk, &max_blk, incr); h0 = h_blk - 1; if (max_blk - min_blk >= 4) { thres = (max_blk + min_blk + 1) >> 1; #ifdef NoMMX AdaptiveSmooth_NoMMX(Rec_C, v0, h0, v_blk, h_blk, thres, width, max_diff); #else DeringAdaptiveSmoothMMX(&Rec_C[(int32)v0*width+h0], width, thres, max_diff); #endif } } } } /* macroblock level */ /*---------------------------------------------------------------------------- ; Return nothing or data or data pointer ----------------------------------------------------------------------------*/ return; } #endif ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/deringing_luma.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "mp4dec_lib.h" #include "post_proc.h" #ifdef PV_POSTPROC_ON void Deringing_Luma( uint8 *Rec_Y, int width, int height, int16 *QP_store, int Combined, uint8 *pp_mod) { OSCL_UNUSED_ARG(Combined); /*---------------------------------------------------------------------------- ; Define all local variables ----------------------------------------------------------------------------*/ int thres[4], range[4], max_range_blk, max_thres_blk; int MB_V, MB_H, BLK_V, BLK_H; int v_blk, h_blk; int max_diff; int max_blk, min_blk; int v0, h0; uint8 *ptr; int thr, blks, incr; int mb_indx, blk_indx; /*---------------------------------------------------------------------------- ; Function body here ----------------------------------------------------------------------------*/ incr = width - BLKSIZE; /* Dering the first line of macro blocks */ for (MB_H = 0; MB_H < width; MB_H += MBSIZE) { max_diff = (QP_store[(MB_H)>>4] >> 2) + 4; /* threshold determination */ max_range_blk = max_thres_blk = 0; blks = 0; for (BLK_V = 0; BLK_V < MBSIZE; BLK_V += BLKSIZE) { for (BLK_H = 0; BLK_H < MBSIZE; BLK_H += BLKSIZE) { ptr = &Rec_Y[(int32)(BLK_V) * width + MB_H + BLK_H]; FindMaxMin(ptr, &min_blk, &max_blk, incr); thres[blks] = (max_blk + min_blk + 1) >> 1; range[blks] = max_blk - min_blk; if (range[blks] >= max_range_blk) { max_range_blk = range[blks]; max_thres_blk = thres[blks]; } blks++; } } blks = 0; for (v_blk = 0; v_blk < MBSIZE; v_blk += BLKSIZE) { v0 = ((v_blk - 1) >= 1) ? (v_blk - 1) : 1; for (h_blk = MB_H; h_blk < MB_H + MBSIZE; h_blk += BLKSIZE) { h0 = ((h_blk - 1) >= 1) ? (h_blk - 1) : 1; /* threshold rearrangement for flat region adjacent to non-flat region */ if (range[blks]<32 && max_range_blk >= 64) thres[blks] = max_thres_blk; /* threshold rearrangement for deblocking (blockiness annoying at DC dominant region) */ if (max_range_blk >= 16) { /* adaptive smoothing */ thr = thres[blks]; AdaptiveSmooth_NoMMX(Rec_Y, v0, h0, v_blk, h_blk, thr, width, max_diff); } blks++; } /* block level (Luminance) */ } } /* macroblock level */ /* Do the rest of the macro-block-lines */ for (MB_V = MBSIZE; MB_V < height; MB_V += MBSIZE) { /* First macro-block */ max_diff = (QP_store[((((int32)MB_V*width)>>4))>>4] >> 2) + 4; /* threshold determination */ max_range_blk = max_thres_blk = 0; blks = 0; for (BLK_V = 0; BLK_V < MBSIZE; BLK_V += BLKSIZE) { for (BLK_H = 0; BLK_H < MBSIZE; BLK_H += BLKSIZE) { ptr = &Rec_Y[(int32)(MB_V + BLK_V) * width + BLK_H]; FindMaxMin(ptr, &min_blk, &max_blk, incr); thres[blks] = (max_blk + min_blk + 1) >> 1; range[blks] = max_blk - min_blk; if (range[blks] >= max_range_blk) { max_range_blk = range[blks]; max_thres_blk = thres[blks]; } blks++; } } blks = 0; for (v_blk = MB_V; v_blk < MB_V + MBSIZE; v_blk += BLKSIZE) { v0 = v_blk - 1; for (h_blk = 0; h_blk < MBSIZE; h_blk += BLKSIZE) { h0 = ((h_blk - 1) >= 1) ? (h_blk - 1) : 1; /* threshold rearrangement for flat region adjacent to non-flat region */ if (range[blks]<32 && max_range_blk >= 64) thres[blks] = max_thres_blk; /* threshold rearrangement for deblocking (blockiness annoying at DC dominant region) */ if (max_range_blk >= 16) { /* adaptive smoothing */ thr = thres[blks]; AdaptiveSmooth_NoMMX(Rec_Y, v0, h0, v_blk, h_blk, thr, width, max_diff); } blks++; } } /* block level (Luminance) */ /* Rest of the macro-blocks */ for (MB_H = MBSIZE; MB_H < width; MB_H += MBSIZE) { max_diff = (QP_store[((((int32)MB_V*width)>>4)+MB_H)>>4] >> 2) + 4; /* threshold determination */ max_range_blk = max_thres_blk = 0; blks = 0; mb_indx = (MB_V / 8) * (width / 8) + MB_H / 8; for (BLK_V = 0; BLK_V < MBSIZE; BLK_V += BLKSIZE) { for (BLK_H = 0; BLK_H < MBSIZE; BLK_H += BLKSIZE) { blk_indx = mb_indx + (BLK_V / 8) * width / 8 + BLK_H / 8; /* Update based on pp_mod only */ if ((pp_mod[blk_indx]&0x4) != 0) { ptr = &Rec_Y[(int32)(MB_V + BLK_V) * width + MB_H + BLK_H]; FindMaxMin(ptr, &min_blk, &max_blk, incr); thres[blks] = (max_blk + min_blk + 1) >> 1; range[blks] = max_blk - min_blk; if (range[blks] >= max_range_blk) { max_range_blk = range[blks]; max_thres_blk = thres[blks]; } } blks++; } } blks = 0; for (v_blk = MB_V; v_blk < MB_V + MBSIZE; v_blk += BLKSIZE) { v0 = v_blk - 1; mb_indx = (v_blk / 8) * (width / 8); for (h_blk = MB_H; h_blk < MB_H + MBSIZE; h_blk += BLKSIZE) { h0 = h_blk - 1; blk_indx = mb_indx + h_blk / 8; if ((pp_mod[blk_indx]&0x4) != 0) { /* threshold rearrangement for flat region adjacent to non-flat region */ if (range[blks]<32 && max_range_blk >= 64) thres[blks] = max_thres_blk; /* threshold rearrangement for deblocking (blockiness annoying at DC dominant region) */ if (max_range_blk >= 16) { /* adaptive smoothing */ thr = thres[blks]; #ifdef NoMMX AdaptiveSmooth_NoMMX(Rec_Y, v0, h0, v_blk, h_blk, thr, width, max_diff); #else DeringAdaptiveSmoothMMX(&Rec_Y[v0*width+h0], width, thr, max_diff); #endif } } blks++; } } /* block level (Luminance) */ } /* macroblock level */ } /* macroblock level */ /*---------------------------------------------------------------------------- ; Return nothing or data or data pointer ----------------------------------------------------------------------------*/ return; } #endif ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/find_min_max.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /* ------------------------------------------------------------------------------ INPUT AND OUTPUT DEFINITIONS Inputs: input_ptr = pointer to the buffer containing values of type UChar in a 2D block of data. min_ptr = pointer to the minimum value of type Int to be found in a square block of size BLKSIZE contained in 2D block of data. max_ptr = pointer to the maximum value of type Int to be found in a square block of size BLKSIZE contained in 2D block of data. incr = value of type Int representing the width of 2D block of data. Local Stores/Buffers/Pointers Needed: None Global Stores/Buffers/Pointers Needed: None Outputs: None Pointers and Buffers Modified: min_ptr points to the found minimum value in the square block of size BLKSIZE contained in 2D block of data. max_ptr points to the found maximum value in the square block of size BLKSIZE contained in 2D block of data. Local Stores Modified: None Global Stores Modified: None ------------------------------------------------------------------------------ FUNCTION DESCRIPTION This function finds the maximum and the minimum values in a square block of data of size BLKSIZE * BLKSIZE. The data is contained in the buffer which represents a 2D block of data that is larger than BLKSIZE * BLKSIZE. This is illustrated below. mem loc x + 00h -> o o o o o o o o o o o o o o o o mem loc x + 10h -> o o o o o X X X X X X X X o o o mem loc x + 20h -> o o o o o X X X X X X X X o o o mem loc x + 30h -> o o o o o X X X X X X X X o o o mem loc x + 40h -> o o o o o X X X X X X X X o o o mem loc x + 50h -> o o o o o X X X X X X X X o o o mem loc x + 60h -> o o o o o X X X X X X X X o o o mem loc x + 70h -> o o o o o X X X X X X X X o o o mem loc x + 80h -> o o o o o X X X X X X X X o o o mem loc x + 90h -> o o o o o o o o o o o o o o o o mem loc x + A0h -> o o o o o o o o o o o o o o o o mem loc x + B0h -> o o o o o o o o o o o o o o o o For illustration purposes, the diagram assumes that BLKSIZE is equal to 8 but this is not a requirement. In this diagram, the buffer starts at location x but the input pointer, input_ptr, passed into this function would be the first row of data to be searched which is at x + 15h. The value of incr passed onto this function represents the amount the input_ptr needs to be incremented to point to the next row of data. This function compares each value in a row to the current maximum and minimum. After each row, input_ptr is incremented to point to the next row. This is repeated until all rows have been processed. When the search is complete the location pointed to by min_ptr contains the minimum value found and the location pointed to by max_ptr contains the maximum value found. ------------------------------------------------------------------------------ */ /*---------------------------------------------------------------------------- ; INCLUDES ----------------------------------------------------------------------------*/ #include "mp4dec_lib.h" #include "post_proc.h" /*---------------------------------------------------------------------------- ; MACROS ; Define module specific macros here ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; DEFINES ; Include all pre-processor statements here. Include conditional ; compile variables also. ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; LOCAL FUNCTION DEFINITIONS ; Function Prototype declaration ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; LOCAL STORE/BUFFER/POINTER DEFINITIONS ; Variable declaration - defined here and used outside this module ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; EXTERNAL FUNCTION REFERENCES ; Declare functions defined elsewhere and referenced in this module ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; EXTERNAL GLOBAL STORE/BUFFER/POINTER REFERENCES ; Declare variables used in this module but defined elsewhere ----------------------------------------------------------------------------*/ #ifdef PV_POSTPROC_ON /*---------------------------------------------------------------------------- ; FUNCTION CODE ----------------------------------------------------------------------------*/ void FindMaxMin( uint8 *input_ptr, int *min_ptr, int *max_ptr, int incr) { /*---------------------------------------------------------------------------- ; Define all local variables ----------------------------------------------------------------------------*/ register uint i, j; register int min, max; /*---------------------------------------------------------------------------- ; Function body here ----------------------------------------------------------------------------*/ max = min = *input_ptr; /* incr = incr - BLKSIZE; */ /* 09/06/2001, already passed in as width - BLKSIZE */ for (i = BLKSIZE; i > 0; i--) { for (j = BLKSIZE; j > 0; j--) { if (*input_ptr > max) { max = *input_ptr; } else if (*input_ptr < min) { min = *input_ptr; } input_ptr += 1; } /* set pointer to the beginning of the next row*/ input_ptr += incr; } *max_ptr = max; *min_ptr = min; /*---------------------------------------------------------------------------- ; Return nothing or data or data pointer ----------------------------------------------------------------------------*/ return; } #endif ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/get_pred_adv_b_add.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /* ------------------------------------------------------------------------------ INPUT AND OUTPUT DEFINITIONS Inputs: xpos = x half-pixel of (x,y) coordinates within a VOP; motion compensated coordinates; native type ypos = y half-pixel of (x,y) coordinates within a VOP; motion compensated coordinates; native type comp = pointer to 8-bit compensated prediction values within a VOP; computed by this module (i/o); full-pel resolution c_prev = pointer to previous 8-bit prediction values within a VOP; values range from (0-255); full-pel resolution sh_d = pointer to residual values used to compensate the predicted value; values range from (-512 to 511); full-pel resolution width = width of the VOP in pixels (x axis); full-pel resolution rnd1 = rounding value for case when one dimension uses half-pel resolution rnd2 = rounding value for case when two dimensions uses half-pel resolution CBP = flag indicating whether residual is all zeros (0 -> all zeros, 1 -> not all zeros) outside_flag = flag indicating whether motion vector is outside the VOP (0 -> inside, 1 -> outside) Outputs: returns 1 Local Stores/Buffers/Pointers Needed: None Global Stores/Buffers/Pointers Needed: None Pointers and Buffers Modified: comp = buffer contains newly computed compensated prediction values Local Stores Modified: None Global Stores Modified: None ------------------------------------------------------------------------------ FUNCTION DESCRIPTION Compute pixel values for a block in the current VOP. The prediction values are generated by averaging pixel values in the previous VOP; the block position in the previous frame is computed from the current block's motion vector. The computed pixel values are then computed by adding the prediction values to the block residual values. ------------------------------------------------------------------------------ */ /*---------------------------------------------------------------------------- ; INCLUDES ----------------------------------------------------------------------------*/ #include "mp4dec_lib.h" #include "motion_comp.h" #define OSCL_DISABLE_WARNING_CONV_POSSIBLE_LOSS_OF_DATA #include "osclconfig_compiler_warnings.h" int GetPredAdvancedBy0x0( uint8 *prev, /* i */ uint8 *pred_block, /* i */ int width, /* i */ int pred_width_rnd /* i */ ) { uint i; /* loop variable */ int offset, offset2; uint32 pred_word, word1, word2; int tmp; /* initialize offset to adjust pixel counter */ /* the next row; full-pel resolution */ offset = width - B_SIZE; /* offset for prev */ offset2 = (pred_width_rnd >> 1) - 4; /* offset for pred_block */ tmp = (uint32)prev & 0x3; pred_block -= offset2; /* preset */ if (tmp == 0) /* word-aligned */ { for (i = B_SIZE; i > 0; i--) { *((uint32*)(pred_block += offset2)) = *((uint32*)prev); *((uint32*)(pred_block += 4)) = *((uint32*)(prev + 4)); prev += width; } return 1; } else if (tmp == 1) /* first position */ { prev--; /* word-aligned */ for (i = B_SIZE; i > 0; i--) { word1 = *((uint32*)prev); /* read 4 bytes, b4 b3 b2 b1 */ word2 = *((uint32*)(prev += 4)); /* read 4 bytes, b8 b7 b6 b5 */ word1 >>= 8; /* 0 b4 b3 b2 */ pred_word = word1 | (word2 << 24); /* b5 b4 b3 b2 */ *((uint32*)(pred_block += offset2)) = pred_word; word1 = *((uint32*)(prev += 4)); /* b12 b11 b10 b9 */ word2 >>= 8; /* 0 b8 b7 b6 */ pred_word = word2 | (word1 << 24); /* b9 b8 b7 b6 */ *((uint32*)(pred_block += 4)) = pred_word; prev += offset; } return 1; } else if (tmp == 2) /* second position */ { prev -= 2; /* word1-aligned */ for (i = B_SIZE; i > 0; i--) { word1 = *((uint32*)prev); /* read 4 bytes, b4 b3 b2 b1 */ word2 = *((uint32*)(prev += 4)); /* read 4 bytes, b8 b7 b6 b5 */ word1 >>= 16; /* 0 0 b4 b3 */ pred_word = word1 | (word2 << 16); /* b6 b5 b4 b3 */ *((uint32*)(pred_block += offset2)) = pred_word; word1 = *((uint32*)(prev += 4)); /* b12 b11 b10 b9 */ word2 >>= 16; /* 0 0 b8 b7 */ pred_word = word2 | (word1 << 16); /* b10 b9 b8 b7 */ *((uint32*)(pred_block += 4)) = pred_word; prev += offset; } return 1; } else /* third position */ { prev -= 3; /* word1-aligned */ for (i = B_SIZE; i > 0; i--) { word1 = *((uint32*)prev); /* read 4 bytes, b4 b3 b2 b1 */ word2 = *((uint32*)(prev += 4)); /* read 4 bytes, b8 b7 b6 b5 */ word1 >>= 24; /* 0 0 0 b4 */ pred_word = word1 | (word2 << 8); /* b7 b6 b5 b4 */ *((uint32*)(pred_block += offset2)) = pred_word; word1 = *((uint32*)(prev += 4)); /* b12 b11 b10 b9 */ word2 >>= 24; /* 0 0 0 b8 */ pred_word = word2 | (word1 << 8); /* b11 b10 b9 b8 */ *((uint32*)(pred_block += 4)) = pred_word; prev += offset; } return 1; } } /**************************************************************************/ int GetPredAdvancedBy0x1( uint8 *prev, /* i */ uint8 *pred_block, /* i */ int width, /* i */ int pred_width_rnd /* i */ ) { uint i; /* loop variable */ int offset, offset2; uint32 word1, word2, word3, word12; int tmp; int rnd1; uint32 mask; /* initialize offset to adjust pixel counter */ /* the next row; full-pel resolution */ offset = width - B_SIZE; /* offset for prev */ offset2 = (pred_width_rnd >> 1) - 4; /* offset of pred_block */ rnd1 = pred_width_rnd & 1; /* Branch based on pixel location (half-pel or full-pel) for x and y */ pred_block -= offset2; /* preset */ tmp = (uint32)prev & 3; mask = 254; mask |= (mask << 8); mask |= (mask << 16); /* 0xFEFEFEFE */ if (tmp == 0) /* word-aligned */ { if (rnd1 == 1) { for (i = B_SIZE; i > 0; i--) { word1 = *((uint32*)prev); /* b4 b3 b2 b1 */ word2 = *((uint32*)(prev += 4)); /* b8 b7 b6 b5 */ word12 = (word1 >> 8); /* 0 b4 b3 b2 */ word12 |= (word2 << 24); /* b5 b4 b3 b2 */ word3 = word1 | word12; // rnd1 = 1; otherwise word3 = word1&word12 word1 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word1 >>= 1; word1 = word1 + (word12 >> 1); word1 += word3; *((uint32*)(pred_block += offset2)) = word1; /* write 4 pixels */ word1 = *((uint32*)(prev += 4)); /* b12 b11 b10 b9 */ word12 = (word2 >> 8); /* 0 b8 b7 b6 */ word12 |= (word1 << 24); /* b9 b8 b7 b6 */ word3 = word2 | word12; word2 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word2 >>= 1; word2 = word2 + (word12 >> 1); word2 += word3; *((uint32*)(pred_block += 4)) = word2; /* write 4 pixels */ prev += offset; } return 1; } else /* rnd1 == 0 */ { for (i = B_SIZE; i > 0; i--) { word1 = *((uint32*)prev); /* b4 b3 b2 b1 */ word2 = *((uint32*)(prev += 4)); /* b8 b7 b6 b5 */ word12 = (word1 >> 8); /* 0 b4 b3 b2 */ word12 |= (word2 << 24); /* b5 b4 b3 b2 */ word3 = word1 & word12; // rnd1 = 1; otherwise word3 = word1&word12 word1 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word1 >>= 1; word1 = word1 + (word12 >> 1); word1 += word3; *((uint32*)(pred_block += offset2)) = word1; /* write 4 pixels */ word1 = *((uint32*)(prev += 4)); /* b12 b11 b10 b9 */ word12 = (word2 >> 8); /* 0 b8 b7 b6 */ word12 |= (word1 << 24); /* b9 b8 b7 b6 */ word3 = word2 & word12; word2 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word2 >>= 1; word2 = word2 + (word12 >> 1); word2 += word3; *((uint32*)(pred_block += 4)) = word2; /* write 4 pixels */ prev += offset; } return 1; } /* rnd1 */ } else if (tmp == 1) { prev--; /* word-aligned */ if (rnd1 == 1) { for (i = B_SIZE; i > 0; i--) { word1 = *((uint32*)prev); /* b3 b2 b1 b0 */ word2 = *((uint32*)(prev += 4)); /* b7 b6 b5 b4 */ word12 = (word1 >> 8); /* 0 b3 b2 b1 */ word1 >>= 16; /* 0 0 b3 b2 */ word12 |= (word2 << 24); /* b4 b3 b2 b1 */ word1 |= (word2 << 16); /* b5 b4 b3 b2 */ word3 = word1 | word12; // rnd1 = 1; otherwise word3 = word1&word12 word1 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word1 >>= 1; word1 = word1 + (word12 >> 1); word1 += word3; *((uint32*)(pred_block += offset2)) = word1; /* write 4 pixels */ word1 = *((uint32*)(prev += 4)); /* b11 b10 b9 b8 */ word12 = (word2 >> 8); /* 0 b7 b6 b5 */ word2 >>= 16; /* 0 0 b7 b6 */ word12 |= (word1 << 24); /* b8 b7 b6 b5 */ word2 |= (word1 << 16); /* b9 b8 b7 b6 */ word3 = word2 | word12; // rnd1 = 1; otherwise word3 = word2&word12 word2 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word2 >>= 1; word2 = word2 + (word12 >> 1); word2 += word3; *((uint32*)(pred_block += 4)) = word2; /* write 4 pixels */ prev += offset; } return 1; } else /* rnd1 = 0 */ { for (i = B_SIZE; i > 0; i--) { word1 = *((uint32*)prev); /* b3 b2 b1 b0 */ word2 = *((uint32*)(prev += 4)); /* b7 b6 b5 b4 */ word12 = (word1 >> 8); /* 0 b3 b2 b1 */ word1 >>= 16; /* 0 0 b3 b2 */ word12 |= (word2 << 24); /* b4 b3 b2 b1 */ word1 |= (word2 << 16); /* b5 b4 b3 b2 */ word3 = word1 & word12; word1 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word1 >>= 1; word1 = word1 + (word12 >> 1); word1 += word3; *((uint32*)(pred_block += offset2)) = word1; /* write 4 pixels */ word1 = *((uint32*)(prev += 4)); /* b11 b10 b9 b8 */ word12 = (word2 >> 8); /* 0 b7 b6 b5 */ word2 >>= 16; /* 0 0 b7 b6 */ word12 |= (word1 << 24); /* b8 b7 b6 b5 */ word2 |= (word1 << 16); /* b9 b8 b7 b6 */ word3 = word2 & word12; word2 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word2 >>= 1; word2 = word2 + (word12 >> 1); word2 += word3; *((uint32*)(pred_block += 4)) = word2; /* write 4 pixels */ prev += offset; } return 1; } /* rnd1 */ } else if (tmp == 2) { prev -= 2; /* word-aligned */ if (rnd1 == 1) { for (i = B_SIZE; i > 0; i--) { word1 = *((uint32*)prev); /* b2 b1 b0 bN1 */ word2 = *((uint32*)(prev += 4)); /* b6 b5 b4 b3 */ word12 = (word1 >> 16); /* 0 0 b2 b1 */ word1 >>= 24; /* 0 0 0 b2 */ word12 |= (word2 << 16); /* b4 b3 b2 b1 */ word1 |= (word2 << 8); /* b5 b4 b3 b2 */ word3 = word1 | word12; // rnd1 = 1; otherwise word3 = word1&word12 word1 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word1 >>= 1; word1 = word1 + (word12 >> 1); word1 += word3; *((uint32*)(pred_block += offset2)) = word1; /* write 4 pixels */ word1 = *((uint32*)(prev += 4)); /* b10 b9 b8 b7 */ word12 = (word2 >> 16); /* 0 0 b6 b5 */ word2 >>= 24; /* 0 0 0 b6 */ word12 |= (word1 << 16); /* b8 b7 b6 b5 */ word2 |= (word1 << 8); /* b9 b8 b7 b6 */ word3 = word2 | word12; // rnd1 = 1; otherwise word3 = word1&word12 word2 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word2 >>= 1; word2 = word2 + (word12 >> 1); word2 += word3; *((uint32*)(pred_block += 4)) = word2; /* write 4 pixels */ prev += offset; } return 1; } else /* rnd1 == 0 */ { for (i = B_SIZE; i > 0; i--) { word1 = *((uint32*)prev); /* b2 b1 b0 bN1 */ word2 = *((uint32*)(prev += 4)); /* b6 b5 b4 b3 */ word12 = (word1 >> 16); /* 0 0 b2 b1 */ word1 >>= 24; /* 0 0 0 b2 */ word12 |= (word2 << 16); /* b4 b3 b2 b1 */ word1 |= (word2 << 8); /* b5 b4 b3 b2 */ word3 = word1 & word12; // rnd1 = 1; otherwise word3 = word1&word12 word1 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word1 >>= 1; word1 = word1 + (word12 >> 1); word1 += word3; *((uint32*)(pred_block += offset2)) = word1; /* write 4 pixels */ word1 = *((uint32*)(prev += 4)); /* b10 b9 b8 b7 */ word12 = (word2 >> 16); /* 0 0 b6 b5 */ word2 >>= 24; /* 0 0 0 b6 */ word12 |= (word1 << 16); /* b8 b7 b6 b5 */ word2 |= (word1 << 8); /* b9 b8 b7 b6 */ word3 = word2 & word12; // rnd1 = 1; otherwise word3 = word1&word12 word2 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word2 >>= 1; word2 = word2 + (word12 >> 1); word2 += word3; *((uint32*)(pred_block += 4)) = word2; /* write 4 pixels */ prev += offset; } return 1; } } else /* tmp = 3 */ { prev -= 3; /* word-aligned */ if (rnd1 == 1) { for (i = B_SIZE; i > 0; i--) { word1 = *((uint32*)prev); /* b1 b0 bN1 bN2 */ word2 = *((uint32*)(prev += 4)); /* b5 b4 b3 b2 */ word12 = (word1 >> 24); /* 0 0 0 b1 */ word12 |= (word2 << 8); /* b4 b3 b2 b1 */ word1 = word2; word3 = word1 | word12; // rnd1 = 1; otherwise word3 = word1&word12 word1 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word1 >>= 1; word1 = word1 + (word12 >> 1); word1 += word3; *((uint32*)(pred_block += offset2)) = word1; /* write 4 pixels */ word1 = *((uint32*)(prev += 4)); /* b9 b8 b7 b6 */ word12 = (word2 >> 24); /* 0 0 0 b5 */ word12 |= (word1 << 8); /* b8 b7 b6 b5 */ word2 = word1; /* b9 b8 b7 b6 */ word3 = word2 | word12; // rnd1 = 1; otherwise word3 = word1&word12 word2 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word2 >>= 1; word2 = word2 + (word12 >> 1); word2 += word3; *((uint32*)(pred_block += 4)) = word2; /* write 4 pixels */ prev += offset; } return 1; } else { for (i = B_SIZE; i > 0; i--) { word1 = *((uint32*)prev); /* b1 b0 bN1 bN2 */ word2 = *((uint32*)(prev += 4)); /* b5 b4 b3 b2 */ word12 = (word1 >> 24); /* 0 0 0 b1 */ word12 |= (word2 << 8); /* b4 b3 b2 b1 */ word1 = word2; word3 = word1 & word12; // rnd1 = 1; otherwise word3 = word1&word12 word1 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word1 >>= 1; word1 = word1 + (word12 >> 1); word1 += word3; *((uint32*)(pred_block += offset2)) = word1; /* write 4 pixels */ word1 = *((uint32*)(prev += 4)); /* b9 b8 b7 b6 */ word12 = (word2 >> 24); /* 0 0 0 b5 */ word12 |= (word1 << 8); /* b8 b7 b6 b5 */ word2 = word1; /* b9 b8 b7 b6 */ word3 = word2 & word12; // rnd1 = 1; otherwise word3 = word1&word12 word2 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word2 >>= 1; word2 = word2 + (word12 >> 1); word2 += word3; *((uint32*)(pred_block += 4)) = word2; /* write 4 pixels */ prev += offset; } return 1; } } } /**************************************************************************/ int GetPredAdvancedBy1x0( uint8 *prev, /* i */ uint8 *pred_block, /* i */ int width, /* i */ int pred_width_rnd /* i */ ) { uint i; /* loop variable */ int offset, offset2; uint32 word1, word2, word3, word12, word22; int tmp; int rnd1; uint32 mask; /* initialize offset to adjust pixel counter */ /* the next row; full-pel resolution */ offset = width - B_SIZE; /* offset for prev */ offset2 = (pred_width_rnd >> 1) - 4; /* offset for pred_block */ rnd1 = pred_width_rnd & 1; /* Branch based on pixel location (half-pel or full-pel) for x and y */ pred_block -= offset2; /* preset */ tmp = (uint32)prev & 3; mask = 254; mask |= (mask << 8); mask |= (mask << 16); /* 0xFEFEFEFE */ if (tmp == 0) /* word-aligned */ { prev -= 4; if (rnd1 == 1) { for (i = B_SIZE; i > 0; i--) { word1 = *((uint32*)(prev += 4)); word2 = *((uint32*)(prev + width)); word3 = word1 | word2; // rnd1 = 1; otherwise word3 = word1&word2 word1 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word2 &= mask; word1 >>= 1; word1 = word1 + (word2 >> 1); word1 += word3; *((uint32*)(pred_block += offset2)) = word1; word1 = *((uint32*)(prev += 4)); word2 = *((uint32*)(prev + width)); word3 = word1 | word2; // rnd1 = 1; otherwise word3 = word1&word2 word1 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word2 &= mask; word1 >>= 1; word1 = word1 + (word2 >> 1); word1 += word3; *((uint32*)(pred_block += 4)) = word1; prev += offset; } return 1; } else /* rnd1 = 0 */ { for (i = B_SIZE; i > 0; i--) { word1 = *((uint32*)(prev += 4)); word2 = *((uint32*)(prev + width)); word3 = word1 & word2; /* rnd1 = 0; */ word1 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word2 &= mask; word1 >>= 1; word1 = word1 + (word2 >> 1); word1 += word3; *((uint32*)(pred_block += offset2)) = word1; word1 = *((uint32*)(prev += 4)); word2 = *((uint32*)(prev + width)); word3 = word1 & word2; /* rnd1 = 0; */ word1 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word2 &= mask; word1 >>= 1; word1 = word1 + (word2 >> 1); word1 += word3; *((uint32*)(pred_block += 4)) = word1; prev += offset; } return 1; } } else if (tmp == 1) { prev--; /* word-aligned */ if (rnd1 == 1) { for (i = B_SIZE; i > 0; i--) { word12 = *((uint32*)prev); /* read b4 b3 b2 b1 */ word22 = *((uint32*)(prev + width)); word1 = *((uint32*)(prev += 4)); /* read b8 b7 b6 b5 */ word2 = *((uint32*)(prev + width)); word12 >>= 8; /* 0 b4 b3 b2 */ word22 >>= 8; word12 = word12 | (word1 << 24); /* b5 b4 b3 b2 */ word22 = word22 | (word2 << 24); word3 = word12 | word22; word12 &= mask; word22 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 >>= 1; word12 = word12 + (word22 >> 1); word12 += word3; *((uint32*)(pred_block += offset2)) = word12; word12 = *((uint32*)(prev += 4)); /* read b12 b11 b10 b9 */ word22 = *((uint32*)(prev + width)); word1 >>= 8; /* 0 b8 b7 b6 */ word2 >>= 8; word1 = word1 | (word12 << 24); /* b9 b8 b7 b6 */ word2 = word2 | (word22 << 24); word3 = word1 | word2; word1 &= mask; word2 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word1 >>= 1; word1 = word1 + (word2 >> 1); word1 += word3; *((uint32*)(pred_block += 4)) = word1; prev += offset; } return 1; } else /* rnd1 = 0 */ { for (i = B_SIZE; i > 0; i--) { word12 = *((uint32*)prev); /* read b4 b3 b2 b1 */ word22 = *((uint32*)(prev + width)); word1 = *((uint32*)(prev += 4)); /* read b8 b7 b6 b5 */ word2 = *((uint32*)(prev + width)); word12 >>= 8; /* 0 b4 b3 b2 */ word22 >>= 8; word12 = word12 | (word1 << 24); /* b5 b4 b3 b2 */ word22 = word22 | (word2 << 24); word3 = word12 & word22; word12 &= mask; word22 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 >>= 1; word12 = word12 + (word22 >> 1); word12 += word3; *((uint32*)(pred_block += offset2)) = word12; word12 = *((uint32*)(prev += 4)); /* read b12 b11 b10 b9 */ word22 = *((uint32*)(prev + width)); word1 >>= 8; /* 0 b8 b7 b6 */ word2 >>= 8; word1 = word1 | (word12 << 24); /* b9 b8 b7 b6 */ word2 = word2 | (word22 << 24); word3 = word1 & word2; word1 &= mask; word2 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word1 >>= 1; word1 = word1 + (word2 >> 1); word1 += word3; *((uint32*)(pred_block += 4)) = word1; prev += offset; } return 1; } } else if (tmp == 2) { prev -= 2; /* word-aligned */ if (rnd1 == 1) { for (i = B_SIZE; i > 0; i--) { word12 = *((uint32*)prev); /* read b4 b3 b2 b1 */ word22 = *((uint32*)(prev + width)); word1 = *((uint32*)(prev += 4)); /* read b8 b7 b6 b5 */ word2 = *((uint32*)(prev + width)); word12 >>= 16; /* 0 0 b4 b3 */ word22 >>= 16; word12 = word12 | (word1 << 16); /* b6 b5 b4 b3 */ word22 = word22 | (word2 << 16); word3 = word12 | word22; word12 &= mask; word22 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 >>= 1; word12 = word12 + (word22 >> 1); word12 += word3; *((uint32*)(pred_block += offset2)) = word12; word12 = *((uint32*)(prev += 4)); /* read b12 b11 b10 b9 */ word22 = *((uint32*)(prev + width)); word1 >>= 16; /* 0 0 b8 b7 */ word2 >>= 16; word1 = word1 | (word12 << 16); /* b10 b9 b8 b7 */ word2 = word2 | (word22 << 16); word3 = word1 | word2; word1 &= mask; word2 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word1 >>= 1; word1 = word1 + (word2 >> 1); word1 += word3; *((uint32*)(pred_block += 4)) = word1; prev += offset; } return 1; } else /* rnd1 = 0 */ { for (i = B_SIZE; i > 0; i--) { word12 = *((uint32*)prev); /* read b4 b3 b2 b1 */ word22 = *((uint32*)(prev + width)); word1 = *((uint32*)(prev += 4)); /* read b8 b7 b6 b5 */ word2 = *((uint32*)(prev + width)); word12 >>= 16; /* 0 0 b4 b3 */ word22 >>= 16; word12 = word12 | (word1 << 16); /* b6 b5 b4 b3 */ word22 = word22 | (word2 << 16); word3 = word12 & word22; word12 &= mask; word22 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 >>= 1; word12 = word12 + (word22 >> 1); word12 += word3; *((uint32*)(pred_block += offset2)) = word12; word12 = *((uint32*)(prev += 4)); /* read b12 b11 b10 b9 */ word22 = *((uint32*)(prev + width)); word1 >>= 16; /* 0 0 b8 b7 */ word2 >>= 16; word1 = word1 | (word12 << 16); /* b10 b9 b8 b7 */ word2 = word2 | (word22 << 16); word3 = word1 & word2; word1 &= mask; word2 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word1 >>= 1; word1 = word1 + (word2 >> 1); word1 += word3; *((uint32*)(pred_block += 4)) = word1; prev += offset; } return 1; } } else /* tmp == 3 */ { prev -= 3; /* word-aligned */ if (rnd1 == 1) { for (i = B_SIZE; i > 0; i--) { word12 = *((uint32*)prev); /* read b4 b3 b2 b1 */ word22 = *((uint32*)(prev + width)); word1 = *((uint32*)(prev += 4)); /* read b8 b7 b6 b5 */ word2 = *((uint32*)(prev + width)); word12 >>= 24; /* 0 0 0 b4 */ word22 >>= 24; word12 = word12 | (word1 << 8); /* b7 b6 b5 b4 */ word22 = word22 | (word2 << 8); word3 = word12 | word22; word12 &= mask; word22 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 >>= 1; word12 = word12 + (word22 >> 1); word12 += word3; *((uint32*)(pred_block += offset2)) = word12; word12 = *((uint32*)(prev += 4)); /* read b12 b11 b10 b9 */ word22 = *((uint32*)(prev + width)); word1 >>= 24; /* 0 0 0 b8 */ word2 >>= 24; word1 = word1 | (word12 << 8); /* b11 b10 b9 b8 */ word2 = word2 | (word22 << 8); word3 = word1 | word2; word1 &= mask; word2 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word1 >>= 1; word1 = word1 + (word2 >> 1); word1 += word3; *((uint32*)(pred_block += 4)) = word1; prev += offset; } return 1; } else /* rnd1 = 0 */ { for (i = B_SIZE; i > 0; i--) { word12 = *((uint32*)prev); /* read b4 b3 b2 b1 */ word22 = *((uint32*)(prev + width)); word1 = *((uint32*)(prev += 4)); /* read b8 b7 b6 b5 */ word2 = *((uint32*)(prev + width)); word12 >>= 24; /* 0 0 0 b4 */ word22 >>= 24; word12 = word12 | (word1 << 8); /* b7 b6 b5 b4 */ word22 = word22 | (word2 << 8); word3 = word12 & word22; word12 &= mask; word22 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 >>= 1; word12 = word12 + (word22 >> 1); word12 += word3; *((uint32*)(pred_block += offset2)) = word12; word12 = *((uint32*)(prev += 4)); /* read b12 b11 b10 b9 */ word22 = *((uint32*)(prev + width)); word1 >>= 24; /* 0 0 0 b8 */ word2 >>= 24; word1 = word1 | (word12 << 8); /* b11 b10 b9 b8 */ word2 = word2 | (word22 << 8); word3 = word1 & word2; word1 &= mask; word2 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word1 >>= 1; word1 = word1 + (word2 >> 1); word1 += word3; *((uint32*)(pred_block += 4)) = word1; prev += offset; } return 1; } /* rnd */ } /* tmp */ } /**********************************************************************************/ int GetPredAdvancedBy1x1( uint8 *prev, /* i */ uint8 *pred_block, /* i */ int width, /* i */ int pred_width_rnd /* i */ ) { uint i; /* loop variable */ int offset, offset2; uint32 x1, x2, x1m, x2m, y1, y2, y1m, y2m; /* new way */ int tmp; int rnd1, rnd2; uint32 mask; /* initialize offset to adjust pixel counter */ /* the next row; full-pel resolution */ offset = width - B_SIZE; /* offset for prev */ offset2 = (pred_width_rnd >> 1) - 8; /* offset for pred_block */ rnd1 = pred_width_rnd & 1; rnd2 = rnd1 + 1; rnd2 |= (rnd2 << 8); rnd2 |= (rnd2 << 16); mask = 0x3F; mask |= (mask << 8); mask |= (mask << 16); /* 0x3f3f3f3f */ tmp = (uint32)prev & 3; pred_block -= 4; /* preset */ if (tmp == 0) /* word-aligned */ { for (i = B_SIZE; i > 0; i--) { x1 = *((uint32*)prev); /* load a3 a2 a1 a0 */ x2 = *((uint32*)(prev + width)); /* load b3 b2 b1 b0, another line */ y1 = *((uint32*)(prev += 4)); /* a7 a6 a5 a4 */ y2 = *((uint32*)(prev + width)); /* b7 b6 b5 b4 */ x1m = (x1 >> 2) & mask; /* zero out last 2 bits */ x2m = (x2 >> 2) & mask; x1 = x1 ^(x1m << 2); x2 = x2 ^(x2m << 2); x1m += x2m; x1 += x2; /* x2m, x2 free */ y1m = (y1 >> 2) & mask; /* zero out last 2 bits */ y2m = (y2 >> 2) & mask; y1 = y1 ^(y1m << 2); y2 = y2 ^(y2m << 2); y1m += y2m; y1 += y2; /* y2m, y2 free */ /* x2m, x2 free */ x2 = *((uint32*)(prev += 4)); /* a11 a10 a9 a8 */ y2 = *((uint32*)(prev + width)); /* b11 b10 b9 b8 */ x2m = (x2 >> 2) & mask; y2m = (y2 >> 2) & mask; x2 = x2 ^(x2m << 2); y2 = y2 ^(y2m << 2); x2m += y2m; x2 += y2; /* y2m, y2 free */ /* now operate on x1m, x1, y1m, y1, x2m, x2 */ /* x1m = a3+b3, a2+b2, a1+b1, a0+b0 */ /* y1m = a7+b7, a6+b6, a5+b5, a4+b4 */ /* x2m = a11+b11, a10+b10, a9+b9, a8+b8 */ /* x1, y1, x2 */ y2m = x1m >> 8; y2 = x1 >> 8; y2m |= (y1m << 24); /* a4+b4, a3+b3, a2+b2, a1+b1 */ y2 |= (y1 << 24); x1m += y2m; /* a3+b3+a4+b4, ....., a0+b0+a1+b1 */ x1 += y2; x1 += rnd2; x1 &= (mask << 2); x1m += (x1 >> 2); *((uint32*)(pred_block += 4)) = x1m; /* save x1m */ y2m = y1m >> 8; y2 = y1 >> 8; y2m |= (x2m << 24); /* a8+b8, a7+b7, a6+b6, a5+b5 */ y2 |= (x2 << 24); y1m += y2m; /* a7+b7+a8+b8, ....., a4+b4+a5+b5 */ y1 += y2; y1 += rnd2; y1 &= (mask << 2); y1m += (y1 >> 2); *((uint32*)(pred_block += 4)) = y1m; /* save y1m */ pred_block += offset2; prev += offset; } return 1; } else if (tmp == 1) { prev--; /* to word-aligned */ for (i = B_SIZE; i > 0; i--) { x1 = *((uint32*)prev); /* load a3 a2 a1 a0 */ x2 = *((uint32*)(prev + width)); /* load b3 b2 b1 b0, another line */ y1 = *((uint32*)(prev += 4)); /* a7 a6 a5 a4 */ y2 = *((uint32*)(prev + width)); /* b7 b6 b5 b4 */ x1m = (x1 >> 2) & mask; /* zero out last 2 bits */ x2m = (x2 >> 2) & mask; x1 = x1 ^(x1m << 2); x2 = x2 ^(x2m << 2); x1m += x2m; x1 += x2; /* x2m, x2 free */ y1m = (y1 >> 2) & mask; /* zero out last 2 bits */ y2m = (y2 >> 2) & mask; y1 = y1 ^(y1m << 2); y2 = y2 ^(y2m << 2); y1m += y2m; y1 += y2; /* y2m, y2 free */ /* x2m, x2 free */ x2 = *((uint32*)(prev += 4)); /* a11 a10 a9 a8 */ y2 = *((uint32*)(prev + width)); /* b11 b10 b9 b8 */ x2m = (x2 >> 2) & mask; y2m = (y2 >> 2) & mask; x2 = x2 ^(x2m << 2); y2 = y2 ^(y2m << 2); x2m += y2m; x2 += y2; /* y2m, y2 free */ /* now operate on x1m, x1, y1m, y1, x2m, x2 */ /* x1m = a3+b3, a2+b2, a1+b1, a0+b0 */ /* y1m = a7+b7, a6+b6, a5+b5, a4+b4 */ /* x2m = a11+b11, a10+b10, a9+b9, a8+b8 */ /* x1, y1, x2 */ x1m >>= 8 ; x1 >>= 8; x1m |= (y1m << 24); /* a4+b4, a3+b3, a2+b2, a1+b1 */ x1 |= (y1 << 24); y2m = (y1m << 16); y2 = (y1 << 16); y2m |= (x1m >> 8); /* a5+b5, a4+b4, a3+b3, a2+b2 */ y2 |= (x1 >> 8); x1 += rnd2; x1m += y2m; /* a4+b4+a5+b5, ....., a1+b1+a2+b2 */ x1 += y2; x1 &= (mask << 2); x1m += (x1 >> 2); *((uint32*)(pred_block += 4)) = x1m; /* save x1m */ y1m >>= 8; y1 >>= 8; y1m |= (x2m << 24); /* a8+b8, a7+b7, a6+b6, a5+b5 */ y1 |= (x2 << 24); y2m = (x2m << 16); y2 = (x2 << 16); y2m |= (y1m >> 8); /* a9+b9, a8+b8, a7+b7, a6+b6,*/ y2 |= (y1 >> 8); y1 += rnd2; y1m += y2m; /* a8+b8+a9+b9, ....., a5+b5+a6+b6 */ y1 += y2; y1 &= (mask << 2); y1m += (y1 >> 2); *((uint32*)(pred_block += 4)) = y1m; /* save y1m */ pred_block += offset2; prev += offset; } return 1; } else if (tmp == 2) { prev -= 2; /* to word-aligned */ for (i = B_SIZE; i > 0; i--) { x1 = *((uint32*)prev); /* load a3 a2 a1 a0 */ x2 = *((uint32*)(prev + width)); /* load b3 b2 b1 b0, another line */ y1 = *((uint32*)(prev += 4)); /* a7 a6 a5 a4 */ y2 = *((uint32*)(prev + width)); /* b7 b6 b5 b4 */ x1m = (x1 >> 2) & mask; /* zero out last 2 bits */ x2m = (x2 >> 2) & mask; x1 = x1 ^(x1m << 2); x2 = x2 ^(x2m << 2); x1m += x2m; x1 += x2; /* x2m, x2 free */ y1m = (y1 >> 2) & mask; /* zero out last 2 bits */ y2m = (y2 >> 2) & mask; y1 = y1 ^(y1m << 2); y2 = y2 ^(y2m << 2); y1m += y2m; y1 += y2; /* y2m, y2 free */ /* x2m, x2 free */ x2 = *((uint32*)(prev += 4)); /* a11 a10 a9 a8 */ y2 = *((uint32*)(prev + width)); /* b11 b10 b9 b8 */ x2m = (x2 >> 2) & mask; y2m = (y2 >> 2) & mask; x2 = x2 ^(x2m << 2); y2 = y2 ^(y2m << 2); x2m += y2m; x2 += y2; /* y2m, y2 free */ /* now operate on x1m, x1, y1m, y1, x2m, x2 */ /* x1m = a3+b3, a2+b2, a1+b1, a0+b0 */ /* y1m = a7+b7, a6+b6, a5+b5, a4+b4 */ /* x2m = a11+b11, a10+b10, a9+b9, a8+b8 */ /* x1, y1, x2 */ x1m >>= 16 ; x1 >>= 16; x1m |= (y1m << 16); /* a5+b5, a4+b4, a3+b3, a2+b2 */ x1 |= (y1 << 16); y2m = (y1m << 8); y2 = (y1 << 8); y2m |= (x1m >> 8); /* a6+b6, a5+b5, a4+b4, a3+b3 */ y2 |= (x1 >> 8); x1 += rnd2; x1m += y2m; /* a5+b5+a6+b6, ....., a2+b2+a3+b3 */ x1 += y2; x1 &= (mask << 2); x1m += (x1 >> 2); *((uint32*)(pred_block += 4)) = x1m; /* save x1m */ y1m >>= 16; y1 >>= 16; y1m |= (x2m << 16); /* a9+b9, a8+b8, a7+b7, a6+b6 */ y1 |= (x2 << 16); y2m = (x2m << 8); y2 = (x2 << 8); y2m |= (y1m >> 8); /* a10+b10, a9+b9, a8+b8, a7+b7,*/ y2 |= (y1 >> 8); y1 += rnd2; y1m += y2m; /* a9+b9+a10+b10, ....., a6+b6+a7+b7 */ y1 += y2; y1 &= (mask << 2); y1m += (y1 >> 2); *((uint32*)(pred_block += 4)) = y1m; /* save y1m */ pred_block += offset2; prev += offset; } return 1; } else /* tmp == 3 */ { prev -= 3; /* to word-aligned */ for (i = B_SIZE; i > 0; i--) { x1 = *((uint32*)prev); /* load a3 a2 a1 a0 */ x2 = *((uint32*)(prev + width)); /* load b3 b2 b1 b0, another line */ y1 = *((uint32*)(prev += 4)); /* a7 a6 a5 a4 */ y2 = *((uint32*)(prev + width)); /* b7 b6 b5 b4 */ x1m = (x1 >> 2) & mask; /* zero out last 2 bits */ x2m = (x2 >> 2) & mask; x1 = x1 ^(x1m << 2); x2 = x2 ^(x2m << 2); x1m += x2m; x1 += x2; /* x2m, x2 free */ y1m = (y1 >> 2) & mask; /* zero out last 2 bits */ y2m = (y2 >> 2) & mask; y1 = y1 ^(y1m << 2); y2 = y2 ^(y2m << 2); y1m += y2m; y1 += y2; /* y2m, y2 free */ /* x2m, x2 free */ x2 = *((uint32*)(prev += 4)); /* a11 a10 a9 a8 */ y2 = *((uint32*)(prev + width)); /* b11 b10 b9 b8 */ x2m = (x2 >> 2) & mask; y2m = (y2 >> 2) & mask; x2 = x2 ^(x2m << 2); y2 = y2 ^(y2m << 2); x2m += y2m; x2 += y2; /* y2m, y2 free */ /* now operate on x1m, x1, y1m, y1, x2m, x2 */ /* x1m = a3+b3, a2+b2, a1+b1, a0+b0 */ /* y1m = a7+b7, a6+b6, a5+b5, a4+b4 */ /* x2m = a11+b11, a10+b10, a9+b9, a8+b8 */ /* x1, y1, x2 */ x1m >>= 24 ; x1 >>= 24; x1m |= (y1m << 8); /* a6+b6, a5+b5, a4+b4, a3+b3 */ x1 |= (y1 << 8); x1m += y1m; /* a6+b6+a7+b7, ....., a3+b3+a4+b4 */ x1 += y1; x1 += rnd2; x1 &= (mask << 2); x1m += (x1 >> 2); *((uint32*)(pred_block += 4)) = x1m; /* save x1m */ y1m >>= 24; y1 >>= 24; y1m |= (x2m << 8); /* a10+b10, a9+b9, a8+b8, a7+b7 */ y1 |= (x2 << 8); y1m += x2m; /* a10+b10+a11+b11, ....., a7+b7+a8+b8 */ y1 += x2; y1 += rnd2; y1 &= (mask << 2); y1m += (y1 >> 2); *((uint32*)(pred_block += 4)) = y1m; /* save y1m */ pred_block += offset2; prev += offset; } return 1; } } ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/get_pred_outside.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /* ------------------------------------------------------------------------------ INPUT AND OUTPUT DEFINITIONS Inputs: xpos = x half-pixel of (x,y) coordinates within a VOP; motion compensated coordinates; native data type ypos = y half-pixel of (x,y) coordinates within a VOP; motion compensated coordinates; native data type comp = pointer to 8-bit compensated prediction values within a VOP; computed by this module (i/o); full-pel resolution; 8-bit data c_prev = pointer to previous 8-bit prediction values within a VOP; values range from (0-255); full-pel resolution; 8-bit data sh_d = pointer to residual values used to compensate the predicted value; values range from (-512 to 511); full-pel resolution; native data type width = width of the VOP in pixels (x axis); full-pel resolution; native data type height = height of the VOP in pixels (y axis); full-pel resolution; native data type rnd1 = rounding value for case when one dimension uses half-pel resolution; native data type rnd2 = rounding value for case when two dimensions uses half-pel resolution; native data type Outputs: returns 1 Local Stores/Buffers/Pointers Needed: None Global Stores/Buffers/Pointers Needed: None Pointers and Buffers Modified: comp = buffer contains newly computed compensated prediction values Local Stores Modified: None Global Stores Modified: None ------------------------------------------------------------------------------ FUNCTION DESCRIPTION Summary: This function performs motion compensated prediction for the case where the motion vector points to a block outside the VOP. The function interpolates the pixels that are outside the VOP using the boundary pixels for the block. Once the values are interpolated, the pixel values are computed for a block in the current VOP. The prediction values are generated by averaging pixel values in the previous VOP; the block position in the previous frame is computed from the current block's motion vector. The computed pixel values are calculated by adding the prediction values to the block residual values. Details: First, this functions determines which VOP boundary(ies) the motion vector is outside, i.e., left, right, top, bottom. xpos is compared to the left and right boundaries; ypos is compared to the top and bottom boundaries. The number of block pixels inside the the boundary in the x and y directions are stored in endx and endy, respectively. If the entire block is inside the x or y boundary, the respectively end is set to 0. After the boundaries are tested, any pixels lying outside a boundary are interpolated from the boundary pixels. For example, if the block is outside the bottom boundary, boundary pixels alone the bottom of the VOP as used to interpolated those pixels lying outside the bottom boundary. The interpolation used is a simple column-wise or row-wise copy of the boundary pixels (inside the block) depending on which boundary the block is outside. In our example, each boundary pixel would be copied column-wise to the pixel beneath it. If the block was outside right boundary, the boundary pixels would be copied row-wise to the pixel to the right of it. If the block was outside both an x and y boundary, the boundary pixels would be copied row-wise for the portion of the block outside the x boundary, and column-wise for the portion of the block outside the y boundary. And so on. Once the pixel interpolation is complete, the motion compensated output values (comp[]) are calculed from the motion compensated prediction (pred[])values and the residual values (sh_d[]) of the current frame. The prediction values are generated by averaging pixel values in the previous VOP; the block position in the previous frame is computed from the current block's motion vector. The computed pixel values are calculated by adding the prediction values to the block residual values. */ /*---------------------------------------------------------------------------- ; INCLUDES ----------------------------------------------------------------------------*/ #include "mp4dec_lib.h" #include "motion_comp.h" #define PAD_CORNER { temp = *prev; \ temp |= (temp<<8); \ temp |= (temp<<16); \ *((uint32*)ptr) = temp; \ *((uint32*)(ptr+4)) = temp; \ *((uint32*)(ptr+=16)) = temp; \ *((uint32*)(ptr+4)) = temp; \ *((uint32*)(ptr+=16)) = temp; \ *((uint32*)(ptr+4)) = temp; \ *((uint32*)(ptr+=16)) = temp; \ *((uint32*)(ptr+4)) = temp; \ *((uint32*)(ptr+=16)) = temp; \ *((uint32*)(ptr+4)) = temp; \ *((uint32*)(ptr+=16)) = temp; \ *((uint32*)(ptr+4)) = temp; \ *((uint32*)(ptr+=16)) = temp; \ *((uint32*)(ptr+4)) = temp; \ *((uint32*)(ptr+=16)) = temp; \ *((uint32*)(ptr+4)) = temp; } #define PAD_ROW { temp = *((uint32*)prev); \ temp2 = *((uint32*)(prev+4)); \ *((uint32*)ptr) = temp;\ *((uint32*)(ptr+4)) = temp2; \ *((uint32*)(ptr+=16)) = temp; \ *((uint32*)(ptr+4)) = temp2;\ *((uint32*)(ptr+=16)) = temp; \ *((uint32*)(ptr+4)) = temp2;\ *((uint32*)(ptr+=16)) = temp; \ *((uint32*)(ptr+4)) = temp2;\ *((uint32*)(ptr+=16)) = temp; \ *((uint32*)(ptr+4)) = temp2;\ *((uint32*)(ptr+=16)) = temp; \ *((uint32*)(ptr+4)) = temp2;\ *((uint32*)(ptr+=16)) = temp; \ *((uint32*)(ptr+4)) = temp2;\ *((uint32*)(ptr+=16)) = temp; \ *((uint32*)(ptr+4)) = temp2;} #define PAD_EXTRA_4x8 { temp = *((uint32*)(prev+8)); \ *((uint32*)ptr) = temp; \ *((uint32*)(ptr+=16)) = temp; \ *((uint32*)(ptr+=16)) = temp; \ *((uint32*)(ptr+=16)) = temp; \ *((uint32*)(ptr+=16)) = temp; \ *((uint32*)(ptr+=16)) = temp; \ *((uint32*)(ptr+=16)) = temp; \ *((uint32*)(ptr+=16)) = temp; } #define PAD_COL { temp = *prev; \ temp|=(temp<<8); temp|=(temp<<16); \ *((uint32*)ptr) = temp; \ *((uint32*)(ptr+4)) = temp; \ temp = *(prev+=16); \ temp|=(temp<<8); temp|=(temp<<16); \ *((uint32*)(ptr+=16)) = temp; \ *((uint32*)(ptr+4)) = temp; \ temp = *(prev+=16); \ temp|=(temp<<8); temp|=(temp<<16); \ *((uint32*)(ptr+=16)) = temp; \ *((uint32*)(ptr+4)) = temp; \ temp = *(prev+=16); \ temp|=(temp<<8); temp|=(temp<<16); \ *((uint32*)(ptr+=16)) = temp; \ *((uint32*)(ptr+4)) = temp; \ temp = *(prev+=16); \ temp|=(temp<<8); temp|=(temp<<16); \ *((uint32*)(ptr+=16)) = temp; \ *((uint32*)(ptr+4)) = temp; \ temp = *(prev+=16); \ temp|=(temp<<8); temp|=(temp<<16); \ *((uint32*)(ptr+=16)) = temp; \ *((uint32*)(ptr+4)) = temp; \ temp = *(prev+=16); \ temp|=(temp<<8); temp|=(temp<<16); \ *((uint32*)(ptr+=16)) = temp; \ *((uint32*)(ptr+4)) = temp; \ temp = *(prev+=16); \ temp|=(temp<<8); temp|=(temp<<16); \ *((uint32*)(ptr+=16)) = temp; \ *((uint32*)(ptr+4)) = temp;} /* copy 8x8 block */ #define COPY_BLOCK { *((uint32*)ptr) = *((uint32*)prev); \ *((uint32*)(ptr+4)) = *((uint32*)(prev+4)); \ *((uint32*)(ptr+=16)) = *((uint32*)(prev+=width)); \ *((uint32*)(ptr+4)) = *((uint32*)(prev+4)); \ *((uint32*)(ptr+=16)) = *((uint32*)(prev+=width)); \ *((uint32*)(ptr+4)) = *((uint32*)(prev+4)); \ *((uint32*)(ptr+=16)) = *((uint32*)(prev+=width)); \ *((uint32*)(ptr+4)) = *((uint32*)(prev+4)); \ *((uint32*)(ptr+=16)) = *((uint32*)(prev+=width)); \ *((uint32*)(ptr+4)) = *((uint32*)(prev+4)); \ *((uint32*)(ptr+=16)) = *((uint32*)(prev+=width)); \ *((uint32*)(ptr+4)) = *((uint32*)(prev+4)); \ *((uint32*)(ptr+=16)) = *((uint32*)(prev+=width)); \ *((uint32*)(ptr+4)) = *((uint32*)(prev+4)); \ *((uint32*)(ptr+=16)) = *((uint32*)(prev+=width)); \ *((uint32*)(ptr+4)) = *((uint32*)(prev+4)); } #define COPY_12x8 { *((uint32*)ptr) = *((uint32*)prev); \ *((uint32*)(ptr+4)) = *((uint32*)(prev+4)); \ *((uint32*)(ptr+8)) = *((uint32*)(prev+8)); \ *((uint32*)(ptr+=16)) = *((uint32*)(prev+=width)); \ *((uint32*)(ptr+4)) = *((uint32*)(prev+4)); \ *((uint32*)(ptr+8)) = *((uint32*)(prev+8)); \ *((uint32*)(ptr+=16)) = *((uint32*)(prev+=width)); \ *((uint32*)(ptr+4)) = *((uint32*)(prev+4)); \ *((uint32*)(ptr+8)) = *((uint32*)(prev+8)); \ *((uint32*)(ptr+=16)) = *((uint32*)(prev+=width)); \ *((uint32*)(ptr+4)) = *((uint32*)(prev+4)); \ *((uint32*)(ptr+8)) = *((uint32*)(prev+8)); \ *((uint32*)(ptr+=16)) = *((uint32*)(prev+=width)); \ *((uint32*)(ptr+4)) = *((uint32*)(prev+4)); \ *((uint32*)(ptr+8)) = *((uint32*)(prev+8)); \ *((uint32*)(ptr+=16)) = *((uint32*)(prev+=width)); \ *((uint32*)(ptr+4)) = *((uint32*)(prev+4)); \ *((uint32*)(ptr+8)) = *((uint32*)(prev+8)); \ *((uint32*)(ptr+=16)) = *((uint32*)(prev+=width)); \ *((uint32*)(ptr+4)) = *((uint32*)(prev+4)); \ *((uint32*)(ptr+8)) = *((uint32*)(prev+8)); \ *((uint32*)(ptr+=16)) = *((uint32*)(prev+=width)); \ *((uint32*)(ptr+4)) = *((uint32*)(prev+4)); \ *((uint32*)(ptr+8)) = *((uint32*)(prev+8)); } /*---------------------------------------------------------------------------- ; FUNCTION CODE ----------------------------------------------------------------------------*/ int GetPredOutside( int xpos, /* i */ int ypos, /* i */ uint8 *c_prev, /* i */ uint8 *pred_block, /* i */ int width, /* i */ int height, /* i */ int rnd1, /* i */ int pred_width ) { /*---------------------------------------------------------------------------- ; Define all local variables ----------------------------------------------------------------------------*/ uint8 *prev; /* pointers to adjacent pixels in the */ uint8 pred[256]; /* storage for padded pixel values, 16x16 */ uint8 *ptr; int xoffset; uint32 temp, temp2; /*---------------------------------------------------------------------------- ; Function body here ----------------------------------------------------------------------------*/ /* saturate xpos and ypos */ if (xpos < -16) xpos = -16; if (xpos > ((width - 1) << 1)) xpos = (width - 1) << 1; if (ypos < -16) ypos = -16; if (ypos > ((height - 1) << 1)) ypos = (height - 1) << 1; if (xpos < 0) { if (ypos < 0) /* pad top left of frame */ { /* copy the block */ ptr = pred + (8 << 4) + 8; prev = c_prev; COPY_BLOCK /* pad the corner */ ptr = pred; prev = pred + (8 << 4) + 8; PAD_CORNER /* pad top */ ptr = pred + 8; prev = pred + (8 << 4) + 8; PAD_ROW /* pad left */ ptr = pred + (8 << 4); prev = pred + (8 << 4) + 8; PAD_COL ptr = pred + (((ypos >> 1) + 8) << 4) + (xpos >> 1) + 8; GetPredAdvBTable[ypos&1][xpos&1](ptr, pred_block, 16, (pred_width << 1) | rnd1); return 1; } else if ((ypos >> 1) < (height - B_SIZE)) /* pad left of frame */ { /* copy block */ ptr = pred + 8; prev = c_prev + (ypos >> 1) * width; COPY_BLOCK /* copy extra line */ *((uint32*)(ptr += 16)) = *((uint32*)(prev += width)); *((uint32*)(ptr + 4)) = *((uint32*)(prev + 4)); /* pad left */ ptr = pred; prev = pred + 8; PAD_COL /* pad extra line */ temp = *(prev += 16); temp |= (temp << 8); temp |= (temp << 16); *((uint32*)(ptr += 16)) = temp; *((uint32*)(ptr + 4)) = temp; ptr = pred + 8 + (xpos >> 1); GetPredAdvBTable[ypos&1][xpos&1](ptr, pred_block, 16, (pred_width << 1) | rnd1); return 1; } else /* pad bottom left of frame */ { /* copy the block */ ptr = pred + 8; /* point to the center */ prev = c_prev + width * (height - 8); COPY_BLOCK /* pad the corner */ ptr = pred + (8 << 4); prev = ptr - 8; PAD_CORNER /* pad bottom */ ptr = pred + (8 << 4) + 8; prev = ptr - 16; PAD_ROW /* pad left */ ptr = pred ; prev = ptr + 8; PAD_COL ptr = pred + 8 + (((ypos >> 1) - (height - 8)) << 4) + (xpos >> 1); GetPredAdvBTable[ypos&1][xpos&1](ptr, pred_block, 16, (pred_width << 1) | rnd1); return 1; } } else if ((xpos >> 1) < (width - B_SIZE)) { if (ypos < 0) /* pad top of frame */ { xoffset = xpos >> 1; xoffset = xoffset & 0x3; /* word align ptr */ /* copy block */ ptr = pred + (8 << 4); prev = c_prev + (xpos >> 1) - xoffset; if (xoffset || (xpos&1)) /* copy extra 4x8 */ { COPY_12x8 } else { COPY_BLOCK } /* pad top */ ptr = pred; prev = pred + (8 << 4); PAD_ROW if (xoffset || (xpos&1)) /* pad extra 4x8 */ { ptr = pred + 8; PAD_EXTRA_4x8 } ptr = pred + (((ypos >> 1) + 8) << 4) + xoffset; GetPredAdvBTable[ypos&1][xpos&1](ptr, pred_block, 16, (pred_width << 1) | rnd1); return 1; } else /* pad bottom of frame */ { xoffset = xpos >> 1; xoffset = xoffset & 0x3; /* word align ptr */ /* copy block */ ptr = pred ; prev = c_prev + width * (height - 8) + (xpos >> 1) - xoffset; if (xoffset || (xpos&1)) { COPY_12x8 } else { COPY_BLOCK } /* pad bottom */ ptr = pred + (8 << 4); prev = ptr - 16; PAD_ROW if (xoffset || (xpos&1)) { ptr = pred + (8 << 4) + 8; PAD_EXTRA_4x8 } ptr = pred + (((ypos >> 1) - (height - 8)) << 4) + xoffset; GetPredAdvBTable[ypos&1][xpos&1](ptr, pred_block, 16, (pred_width << 1) | rnd1); return 1; } } else { if (ypos < 0) /* pad top right of frame */ { /* copy block */ ptr = pred + (8 << 4); prev = c_prev + width - 8; COPY_BLOCK /* pad top-right */ ptr = pred + 8; prev = pred + (8 << 4) + 7; PAD_CORNER /* pad top */ ptr = pred ; prev = pred + (8 << 4); PAD_ROW; /* pad right */ ptr = pred + (8 << 4) + 8; prev = ptr - 1; PAD_COL; ptr = pred + ((8 + (ypos >> 1)) << 4) + (8 - (width - (xpos >> 1))); GetPredAdvBTable[ypos&1][xpos&1](ptr, pred_block, 16, (pred_width << 1) | rnd1); return 1; } else if ((ypos >> 1) < (height - B_SIZE)) /* pad right of frame */ { /* copy block */ ptr = pred; prev = c_prev + (ypos >> 1) * width + width - 8; COPY_BLOCK /* copy extra line */ *((uint32*)(ptr += 16)) = *((uint32*)(prev += width)); *((uint32*)(ptr + 4)) = *((uint32*)(prev + 4)); /* pad right */ ptr = pred + 8; prev = ptr - 1; PAD_COL; /* pad extra line */ temp = *(prev += 16); temp |= (temp << 8); temp |= (temp << 16); *((uint32*)(ptr += 16)) = temp; *((uint32*)(ptr + 4)) = temp; ptr = pred + 8 - (width - (xpos >> 1)); GetPredAdvBTable[ypos&1][xpos&1](ptr, pred_block, 16, (pred_width << 1) | rnd1); return 1; } else /* pad bottom right of frame */ { /* copy block */ ptr = pred; prev = c_prev + width * (height - 8) + width - 8; COPY_BLOCK /* pad bottom-right */ ptr = pred + (8 << 4) + 8; prev = ptr - 17; PAD_CORNER /* pad right */ ptr = pred + 8; prev = ptr - 1; PAD_COL /* pad bottom */ ptr = pred + (8 << 4); prev = ptr - 16; PAD_ROW ptr = pred + 8 - (width - (xpos >> 1)) + ((8 - (height - (ypos >> 1))) << 4); GetPredAdvBTable[ypos&1][xpos&1](ptr, pred_block, 16, (pred_width << 1) | rnd1); return 1; } } } ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/idct.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /* ------------------------------------------------------------------------------ MODULE DESCRIPTION This file contains the functions that transform an 8r8 image block from dequantized DCT coefficients to spatial domain pirel values by calculating inverse discrete cosine transform (IDCT). ------------------------------------------------------------------------------ */ /*---------------------------------------------------------------------------- ; INCLUDES ----------------------------------------------------------------------------*/ #include "mp4dec_lib.h" #include "idct.h" #include "motion_comp.h" #ifndef FAST_IDCT /* ------------------------------------------------------------------------------ FUNCTION NAME: idct ------------------------------------------------------------------------------ INPUT AND OUTPUT DEFINITIONS FOR idct Inputs: blk = pointer to the buffer containing the dequantized DCT coefficients of type int for an 8r8 image block; values range from (-2048, 2047) which defined as standard. Local Stores/Buffers/Pointers Needed: None Global Stores/Buffers/Pointers Needed: None Outputs: None Pointers and Buffers Modified: blk points to the found IDCT values for an 8r8 image block. Local Stores Modified: None Global Stores Modified: None ------------------------------------------------------------------------------ FUNCTION DESCRIPTION FOR idct This function transforms an 8r8 image block from dequantized DCT coefficients (F(u,v)) to spatial domain pirel values (f(r,y)) by performing the two dimensional inverse discrete cosine transform (IDCT). _7_ _7_ C(u) C(v) f(r,y) = \ \ F(u,v)---- ----cos[(2r+1)*u*pi/16]cos[(2y+1)*v*pi/16] /__ /__ 2 2 u=0 v=0 where C(i) = 1/sqrt(2) if i=0 C(i) = 1 otherwise 2-D IDCT can be separated as horizontal(row-wise) and vertical(column-wise) 1-D IDCTs. Therefore, 2-D IDCT values are found by the following two steps: 1. Find horizontal 1-D IDCT values for each row from 8r8 dequantized DCT coefficients by row IDCT operation. _7_ C(u) g(r,v) = \ F(u,v) ---- cos[(2r+1)*u*pi/16] /__ 2 u=0 2. Find vertical 1-D IDCT values for each column from the results of 1 by column IDCT operation. _7_ C(v) f(r,y) = \ g(r,v) ---- cos[(2y+1)*v*pi/16] /__ 2 v=0 ------------------------------------------------------------------------------ REQUIREMENTS FOR idct None ------------------------------------------------------------------------------ */ /* REFERENCES FOR idct */ /* idct.c, inverse fast discrete cosine transform inverse two dimensional DCT, Chen-Wang algorithm (cf. IEEE ASSP-32, pp. 803-816, Aug. 1984) 32-bit integer arithmetic (8 bit coefficients) 11 mults, 29 adds per DCT sE, 18.8.91 coefficients ertended to 12 bit for IEEE1180-1990 compliance sE, 2.1.94 */ /*---------------------------------------------------------------------------- ; Function Code FOR idct ----------------------------------------------------------------------------*/ void idct_intra( int *blk, uint8 *comp, int width ) { /*---------------------------------------------------------------------------- ; Define all local variables ----------------------------------------------------------------------------*/ int i; int32 tmpBLK[64]; int32 *tmpBLK32 = &tmpBLK[0]; int32 r0, r1, r2, r3, r4, r5, r6, r7, r8; /* butterfly nodes */ int32 a; int offset = width - 8; /*---------------------------------------------------------------------------- ; Function body here ----------------------------------------------------------------------------*/ /* two dimensional inverse discrete cosine transform */ /* column (vertical) IDCT */ for (i = B_SIZE - 1; i >= 0; i--) { /* initialize butterfly nodes at first stage */ r1 = blk[B_SIZE * 4 + i] << 11; /* since row IDCT results have net left shift by 3 */ /* this left shift by 8 gives net left shift by 11 */ /* in order to maintain the same scale as that of */ /* coefficients Wi */ r2 = blk[B_SIZE * 6 + i]; r3 = blk[B_SIZE * 2 + i]; r4 = blk[B_SIZE * 1 + i]; r5 = blk[B_SIZE * 7 + i]; r6 = blk[B_SIZE * 5 + i]; r7 = blk[B_SIZE * 3 + i]; if (!(r1 | r2 | r3 | r4 | r5 | r6 | r7)) { /* shortcut */ /* execute if values of g(r,1) to g(r,7) in a column*/ /* are all zeros */ /* make output of IDCT >>3 or scaled by 1/8 and */ /* with the proper rounding */ a = (blk[B_SIZE * 0 + i]) << 3; tmpBLK32[B_SIZE * 0 + i] = a; tmpBLK32[B_SIZE * 1 + i] = a; tmpBLK32[B_SIZE * 2 + i] = a; tmpBLK32[B_SIZE * 3 + i] = a; tmpBLK32[B_SIZE * 4 + i] = a; tmpBLK32[B_SIZE * 5 + i] = a; tmpBLK32[B_SIZE * 6 + i] = a; tmpBLK32[B_SIZE * 7 + i] = a; } else { r0 = (blk[8 * 0 + i] << 11) + 128; /* first stage */ r8 = W7 * (r4 + r5); r4 = (r8 + (W1 - W7) * r4); /* Multiplication with Wi increases the net left */ /* shift from 11 to 14,we have to shift back by 3*/ r5 = (r8 - (W1 + W7) * r5); r8 = W3 * (r6 + r7); r6 = (r8 - (W3 - W5) * r6); r7 = (r8 - (W3 + W5) * r7); /* second stage */ r8 = r0 + r1; r0 -= r1; r1 = W6 * (r3 + r2); r2 = (r1 - (W2 + W6) * r2); r3 = (r1 + (W2 - W6) * r3); r1 = r4 + r6; r4 -= r6; r6 = r5 + r7; r5 -= r7; /* third stage */ r7 = r8 + r3; r8 -= r3; r3 = r0 + r2; r0 -= r2; r2 = (181 * (r4 + r5) + 128) >> 8; /* rounding */ r4 = (181 * (r4 - r5) + 128) >> 8; /* fourth stage */ /* net shift of IDCT is >>3 after the following */ /* shift operation, it makes output of 2-D IDCT */ /* scaled by 1/8, that is scaled twice by */ /* 1/(2*sqrt(2)) for row IDCT and column IDCT. */ /* see detail analysis in design doc. */ tmpBLK32[0 + i] = (r7 + r1) >> 8; tmpBLK32[(1<<3) + i] = (r3 + r2) >> 8; tmpBLK32[(2<<3) + i] = (r0 + r4) >> 8; tmpBLK32[(3<<3) + i] = (r8 + r6) >> 8; tmpBLK32[(4<<3) + i] = (r8 - r6) >> 8; tmpBLK32[(5<<3) + i] = (r0 - r4) >> 8; tmpBLK32[(6<<3) + i] = (r3 - r2) >> 8; tmpBLK32[(7<<3) + i] = (r7 - r1) >> 8; } } /* row (horizontal) IDCT */ for (i = 0 ; i < B_SIZE; i++) { /* initialize butterfly nodes at the first stage */ r1 = ((int32)tmpBLK32[4+(i<<3)]) << 8; /* r1 left shift by 11 is to maintain the same */ /* scale as that of coefficients (W1,...W7) */ /* since blk[4] won't multiply with Wi. */ /* see detail diagram in design document. */ r2 = tmpBLK32[6+(i<<3)]; r3 = tmpBLK32[2+(i<<3)]; r4 = tmpBLK32[1+(i<<3)]; r5 = tmpBLK32[7+(i<<3)]; r6 = tmpBLK32[5+(i<<3)]; r7 = tmpBLK32[3+(i<<3)]; if (!(r1 | r2 | r3 | r4 | r5 | r6 | r7)) { /* shortcut */ /* execute if values of F(1,v) to F(7,v) in a row*/ /* are all zeros */ /* output of row IDCT scaled by 8 */ a = (((int32)tmpBLK32[0+(i<<3)] + 32) >> 6); CLIP_RESULT(a) *comp++ = a; *comp++ = a; *comp++ = a; *comp++ = a; *comp++ = a; *comp++ = a; *comp++ = a; *comp++ = a; comp += offset; } else { /* for proper rounding in the fourth stage */ r0 = (((int32)tmpBLK32[0+(i<<3)]) << 8) + 8192; /* first stage */ r8 = W7 * (r4 + r5) + 4; r4 = (r8 + (W1 - W7) * r4) >> 3; r5 = (r8 - (W1 + W7) * r5) >> 3; r8 = W3 * (r6 + r7) + 4; r6 = (r8 - (W3 - W5) * r6) >> 3; r7 = (r8 - (W3 + W5) * r7) >> 3; /* second stage */ r8 = r0 + r1; r0 -= r1; r1 = W6 * (r3 + r2) + 4; r2 = (r1 - (W2 + W6) * r2) >> 3; r3 = (r1 + (W2 - W6) * r3) >> 3; r1 = r4 + r6; r4 -= r6; r6 = r5 + r7; r5 -= r7; /* third stage */ r7 = r8 + r3; r8 -= r3; r3 = r0 + r2; r0 -= r2; r2 = (181 * (r4 + r5) + 128) >> 8; /* rounding */ r4 = (181 * (r4 - r5) + 128) >> 8; /* fourth stage */ /* net shift of this function is <<3 after the */ /* following shift operation, it makes output of */ /* row IDCT scaled by 8 to retain 3 bits precision*/ a = ((r7 + r1) >> 14); CLIP_RESULT(a) *comp++ = a; a = ((r3 + r2) >> 14); CLIP_RESULT(a) *comp++ = a; a = ((r0 + r4) >> 14); CLIP_RESULT(a) *comp++ = a; a = ((r8 + r6) >> 14); CLIP_RESULT(a) *comp++ = a; a = ((r8 - r6) >> 14); CLIP_RESULT(a) *comp++ = a; a = ((r0 - r4) >> 14); CLIP_RESULT(a) *comp++ = a; a = ((r3 - r2) >> 14); CLIP_RESULT(a) *comp++ = a; a = ((r7 - r1) >> 14); CLIP_RESULT(a) *comp++ = a; comp += offset; } } /*---------------------------------------------------------------------------- ; Return nothing or data or data pointer ----------------------------------------------------------------------------*/ return; } void idct( int *blk, uint8 *pred, uint8 *dst, int width) { /*---------------------------------------------------------------------------- ; Define all local variables ----------------------------------------------------------------------------*/ int i; int32 tmpBLK[64]; int32 *tmpBLK32 = &tmpBLK[0]; int32 r0, r1, r2, r3, r4, r5, r6, r7, r8; /* butterfly nodes */ int32 a; int res; /*---------------------------------------------------------------------------- ; Function body here ----------------------------------------------------------------------------*/ /* two dimensional inverse discrete cosine transform */ /* column (vertical) IDCT */ for (i = B_SIZE - 1; i >= 0; i--) { /* initialize butterfly nodes at first stage */ r1 = blk[B_SIZE * 4 + i] << 11; /* since row IDCT results have net left shift by 3 */ /* this left shift by 8 gives net left shift by 11 */ /* in order to maintain the same scale as that of */ /* coefficients Wi */ r2 = blk[B_SIZE * 6 + i]; r3 = blk[B_SIZE * 2 + i]; r4 = blk[B_SIZE * 1 + i]; r5 = blk[B_SIZE * 7 + i]; r6 = blk[B_SIZE * 5 + i]; r7 = blk[B_SIZE * 3 + i]; if (!(r1 | r2 | r3 | r4 | r5 | r6 | r7)) { /* shortcut */ /* execute if values of g(r,1) to g(r,7) in a column*/ /* are all zeros */ /* make output of IDCT >>3 or scaled by 1/8 and */ /* with the proper rounding */ a = (blk[B_SIZE * 0 + i]) << 3; tmpBLK32[B_SIZE * 0 + i] = a; tmpBLK32[B_SIZE * 1 + i] = a; tmpBLK32[B_SIZE * 2 + i] = a; tmpBLK32[B_SIZE * 3 + i] = a; tmpBLK32[B_SIZE * 4 + i] = a; tmpBLK32[B_SIZE * 5 + i] = a; tmpBLK32[B_SIZE * 6 + i] = a; tmpBLK32[B_SIZE * 7 + i] = a; } else { r0 = (blk[8 * 0 + i] << 11) + 128; /* first stage */ r8 = W7 * (r4 + r5); r4 = (r8 + (W1 - W7) * r4); /* Multiplication with Wi increases the net left */ /* shift from 11 to 14,we have to shift back by 3*/ r5 = (r8 - (W1 + W7) * r5); r8 = W3 * (r6 + r7); r6 = (r8 - (W3 - W5) * r6); r7 = (r8 - (W3 + W5) * r7); /* second stage */ r8 = r0 + r1; r0 -= r1; r1 = W6 * (r3 + r2); r2 = (r1 - (W2 + W6) * r2); r3 = (r1 + (W2 - W6) * r3); r1 = r4 + r6; r4 -= r6; r6 = r5 + r7; r5 -= r7; /* third stage */ r7 = r8 + r3; r8 -= r3; r3 = r0 + r2; r0 -= r2; r2 = (181 * (r4 + r5) + 128) >> 8; /* rounding */ r4 = (181 * (r4 - r5) + 128) >> 8; /* fourth stage */ /* net shift of IDCT is >>3 after the following */ /* shift operation, it makes output of 2-D IDCT */ /* scaled by 1/8, that is scaled twice by */ /* 1/(2*sqrt(2)) for row IDCT and column IDCT. */ /* see detail analysis in design doc. */ tmpBLK32[0 + i] = (r7 + r1) >> 8; tmpBLK32[(1<<3) + i] = (r3 + r2) >> 8; tmpBLK32[(2<<3) + i] = (r0 + r4) >> 8; tmpBLK32[(3<<3) + i] = (r8 + r6) >> 8; tmpBLK32[(4<<3) + i] = (r8 - r6) >> 8; tmpBLK32[(5<<3) + i] = (r0 - r4) >> 8; tmpBLK32[(6<<3) + i] = (r3 - r2) >> 8; tmpBLK32[(7<<3) + i] = (r7 - r1) >> 8; } } /* row (horizontal) IDCT */ for (i = B_SIZE - 1; i >= 0; i--) { /* initialize butterfly nodes at the first stage */ r1 = ((int32)tmpBLK32[4+(i<<3)]) << 8; /* r1 left shift by 11 is to maintain the same */ /* scale as that of coefficients (W1,...W7) */ /* since blk[4] won't multiply with Wi. */ /* see detail diagram in design document. */ r2 = tmpBLK32[6+(i<<3)]; r3 = tmpBLK32[2+(i<<3)]; r4 = tmpBLK32[1+(i<<3)]; r5 = tmpBLK32[7+(i<<3)]; r6 = tmpBLK32[5+(i<<3)]; r7 = tmpBLK32[3+(i<<3)]; if (!(r1 | r2 | r3 | r4 | r5 | r6 | r7)) { /* shortcut */ /* execute if values of F(1,v) to F(7,v) in a row*/ /* are all zeros */ /* output of row IDCT scaled by 8 */ a = (tmpBLK32[0+(i<<3)] + 32) >> 6; blk[0+(i<<3)] = a; blk[1+(i<<3)] = a; blk[2+(i<<3)] = a; blk[3+(i<<3)] = a; blk[4+(i<<3)] = a; blk[5+(i<<3)] = a; blk[6+(i<<3)] = a; blk[7+(i<<3)] = a; } else { /* for proper rounding in the fourth stage */ r0 = (((int32)tmpBLK32[0+(i<<3)]) << 8) + 8192; /* first stage */ r8 = W7 * (r4 + r5) + 4; r4 = (r8 + (W1 - W7) * r4) >> 3; r5 = (r8 - (W1 + W7) * r5) >> 3; r8 = W3 * (r6 + r7) + 4; r6 = (r8 - (W3 - W5) * r6) >> 3; r7 = (r8 - (W3 + W5) * r7) >> 3; /* second stage */ r8 = r0 + r1; r0 -= r1; r1 = W6 * (r3 + r2) + 4; r2 = (r1 - (W2 + W6) * r2) >> 3; r3 = (r1 + (W2 - W6) * r3) >> 3; r1 = r4 + r6; r4 -= r6; r6 = r5 + r7; r5 -= r7; /* third stage */ r7 = r8 + r3; r8 -= r3; r3 = r0 + r2; r0 -= r2; r2 = (181 * (r4 + r5) + 128) >> 8; /* rounding */ r4 = (181 * (r4 - r5) + 128) >> 8; /* fourth stage */ /* net shift of this function is <<3 after the */ /* following shift operation, it makes output of */ /* row IDCT scaled by 8 to retain 3 bits precision*/ blk[0+(i<<3)] = (r7 + r1) >> 14; blk[1+(i<<3)] = (r3 + r2) >> 14; blk[2+(i<<3)] = (r0 + r4) >> 14; blk[3+(i<<3)] = (r8 + r6) >> 14; blk[4+(i<<3)] = (r8 - r6) >> 14; blk[5+(i<<3)] = (r0 - r4) >> 14; blk[6+(i<<3)] = (r3 - r2) >> 14; blk[7+(i<<3)] = (r7 - r1) >> 14; } /* add with prediction , 08/03/05 */ res = (*pred++ + block[0+(i<<3)]); CLIP_RESULT(res); *dst++ = res; res = (*pred++ + block[1+(i<<3)]); CLIP_RESULT(res); *dst++ = res; res = (*pred++ + block[2+(i<<3)]); CLIP_RESULT(res); *dst++ = res; res = (*pred++ + block[3+(i<<3)]); CLIP_RESULT(res); *dst++ = res; res = (*pred++ + block[4+(i<<3)]); CLIP_RESULT(res); *dst++ = res; res = (*pred++ + block[5+(i<<3)]); CLIP_RESULT(res); *dst++ = res; res = (*pred++ + block[6+(i<<3)]); CLIP_RESULT(res); *dst++ = res; res = (*pred++ + block[7+(i<<3)]); CLIP_RESULT(res); *dst++ = res; pred += 8; dst += (width - 8); } /*---------------------------------------------------------------------------- ; Return nothing or data or data pointer ----------------------------------------------------------------------------*/ return; } #endif /*---------------------------------------------------------------------------- ; End Function: idct ----------------------------------------------------------------------------*/ ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/idct.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef idct_h #define idct_h /*---------------------------------------------------------------------------- ; INCLUDES ----------------------------------------------------------------------------*/ #include "mp4dec_lib.h" /*---------------------------------------------------------------------------- ; MACROS ; Define module specific macros here ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; DEFINES ; Include all pre-processor statements here. ----------------------------------------------------------------------------*/ #define INTEGER_IDCT #ifdef FAST_IDCT #ifndef INTEGER_IDCT #define INTEGER_IDCT #endif #endif #ifdef FAST_IDCT #ifdef __cplusplus extern "C" { #endif void idctrow0(int16 *blk, uint8 *pred, uint8 *dst, int width); void idctrow1(int16 *blk, uint8 *pred, uint8 *dst, int width); void idctrow2(int16 *blk, uint8 *pred, uint8 *dst, int width); void idctrow3(int16 *blk, uint8 *pred, uint8 *dst, int width); void idctrow4(int16 *blk, uint8 *pred, uint8 *dst, int width); void idctcol0(int16 *blk); void idctcol1(int16 *blk); void idctcol2(int16 *blk); void idctcol3(int16 *blk); void idctcol4(int16 *blk); void idctrow0_intra(int16 *blk, PIXEL *comp, int width); void idctrow1_intra(int16 *blk, PIXEL *comp, int width); void idctrow2_intra(int16 *blk, PIXEL *comp, int width); void idctrow3_intra(int16 *blk, PIXEL *comp, int width); void idctrow4_intra(int16 *blk, PIXEL *comp, int width); #ifdef __cplusplus } #endif #endif /* this code assumes ">>" to be a two's-complement arithmetic */ /* right shift: (-2)>>1 == -1 , (-3)>>1 == -2 */ /* a positive real constant is converted to an integer scaled by 2048 */ /* or equivalent to left shift by 11 */ #define W1 2841 /* 2048*sqrt(2)*cos(1*pi/16) */ #define W2 2676 /* 2048*sqrt(2)*cos(2*pi/16) */ #define W3 2408 /* 2048*sqrt(2)*cos(3*pi/16) */ #define W5 1609 /* 2048*sqrt(2)*cos(5*pi/16) */ #define W6 1108 /* 2048*sqrt(2)*cos(6*pi/16) */ #define W7 565 /* 2048*sqrt(2)*cos(7*pi/16) */ #define W1mW7 2276 #define W1pW7 3406 #define W5mW3 -799 #define mW3mW5 -4017 #define mW2mW6 -3784 #define W2mW6 1568 /* left shift by 11 is to maintain the accuracy of the decimal point */ /* for the transform coefficients (W1,...W7) */ /*---------------------------------------------------------------------------- ; EXTERNAL VARIABLES REFERENCES ; Declare variables used in this module but defined elsewhere ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; SIMPLE TYPEDEF'S ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; ENUMERATED TYPEDEF'S ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; STRUCTURES TYPEDEF'S ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; GLOBAL FUNCTION DEFINITIONS ; Function Prototype declaration ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; END ----------------------------------------------------------------------------*/ #endif ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/idct_vca.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "mp4def.h" #include "idct.h" #include "motion_comp.h" #ifdef FAST_IDCT /**************************************************************** * vca_idct.c : created 6/1/99 for several options * of hard-coded reduced idct function (using nz_coefs) ******************************************************************/ /*****************************************************/ //pretested version void idctrow0(int16 *blk, uint8 *pred, uint8 *dst, int width) { OSCL_UNUSED_ARG(blk); OSCL_UNUSED_ARG(width); OSCL_UNUSED_ARG(dst); OSCL_UNUSED_ARG(pred); return ; } void idctcol0(int16 *blk) { OSCL_UNUSED_ARG(blk); return ; } void idctrow1(int16 *blk, uint8 *pred, uint8 *dst, int width) { /* shortcut */ int tmp; int i = 8; uint32 pred_word, dst_word; int res, res2; /* preset the offset, such that we can take advantage pre-offset addressing mode */ width -= 4; dst -= width; pred -= 12; blk -= 8; while (i--) { tmp = (*(blk += 8) + 32) >> 6; *blk = 0; pred_word = *((uint32*)(pred += 12)); /* read 4 bytes from pred */ res = tmp + (pred_word & 0xFF); CLIP_RESULT(res); res2 = tmp + ((pred_word >> 8) & 0xFF); CLIP_RESULT(res2); dst_word = (res2 << 8) | res; res = tmp + ((pred_word >> 16) & 0xFF); CLIP_RESULT(res); dst_word |= (res << 16); res = tmp + ((pred_word >> 24) & 0xFF); CLIP_RESULT(res); dst_word |= (res << 24); *((uint32*)(dst += width)) = dst_word; /* save 4 bytes to dst */ pred_word = *((uint32*)(pred += 4)); /* read 4 bytes from pred */ res = tmp + (pred_word & 0xFF); CLIP_RESULT(res); res2 = tmp + ((pred_word >> 8) & 0xFF); CLIP_RESULT(res2); dst_word = (res2 << 8) | res; res = tmp + ((pred_word >> 16) & 0xFF); CLIP_RESULT(res); dst_word |= (res << 16); res = tmp + ((pred_word >> 24) & 0xFF); CLIP_RESULT(res); dst_word |= (res << 24); *((uint32*)(dst += 4)) = dst_word; /* save 4 bytes to dst */ } return; } void idctcol1(int16 *blk) { /* shortcut */ blk[0] = blk[8] = blk[16] = blk[24] = blk[32] = blk[40] = blk[48] = blk[56] = blk[0] << 3; return; } void idctrow2(int16 *blk, uint8 *pred, uint8 *dst, int width) { int32 x0, x1, x2, x4, x5; int i = 8; uint32 pred_word, dst_word; int res, res2; /* preset the offset, such that we can take advantage pre-offset addressing mode */ width -= 4; dst -= width; pred -= 12; blk -= 8; while (i--) { /* shortcut */ x4 = blk[9]; blk[9] = 0; x0 = ((*(blk += 8)) << 8) + 8192; *blk = 0; /* for proper rounding in the fourth stage */ /* first stage */ x5 = (W7 * x4 + 4) >> 3; x4 = (W1 * x4 + 4) >> 3; /* third stage */ x2 = (181 * (x4 + x5) + 128) >> 8; x1 = (181 * (x4 - x5) + 128) >> 8; /* fourth stage */ pred_word = *((uint32*)(pred += 12)); /* read 4 bytes from pred */ res = (x0 + x4) >> 14; ADD_AND_CLIP1(res); res2 = (x0 + x2) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (x0 + x1) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (x0 + x5) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)(dst += width)) = dst_word; /* save 4 bytes to dst */ pred_word = *((uint32*)(pred += 4)); /* read 4 bytes from pred */ res = (x0 - x5) >> 14; ADD_AND_CLIP1(res); res2 = (x0 - x1) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (x0 - x2) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (x0 - x4) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)(dst += 4)) = dst_word; /* save 4 bytes to dst */ } return ; } void idctcol2(int16 *blk) { int32 x0, x1, x3, x5, x7;//, x8; x1 = blk[8]; x0 = ((int32)blk[0] << 11) + 128; /* both upper and lower*/ x7 = W7 * x1; x1 = W1 * x1; x3 = x7; x5 = (181 * (x1 - x7) + 128) >> 8; x7 = (181 * (x1 + x7) + 128) >> 8; blk[0] = (x0 + x1) >> 8; blk[8] = (x0 + x7) >> 8; blk[16] = (x0 + x5) >> 8; blk[24] = (x0 + x3) >> 8; blk[56] = (x0 - x1) >> 8; blk[48] = (x0 - x7) >> 8; blk[40] = (x0 - x5) >> 8; blk[32] = (x0 - x3) >> 8; return ; } void idctrow3(int16 *blk, uint8 *pred, uint8 *dst, int width) { int32 x0, x1, x2, x3, x4, x5, x6, x7, x8; int i = 8; uint32 pred_word, dst_word; int res, res2; /* preset the offset, such that we can take advantage pre-offset addressing mode */ width -= 4; dst -= width; pred -= 12; blk -= 8; while (i--) { x2 = blk[10]; blk[10] = 0; x1 = blk[9]; blk[9] = 0; x0 = ((*(blk += 8)) << 8) + 8192; *blk = 0; /* for proper rounding in the fourth stage */ /* both upper and lower*/ /* both x2orx6 and x0orx4 */ x4 = x0; x6 = (W6 * x2 + 4) >> 3; x2 = (W2 * x2 + 4) >> 3; x8 = x0 - x2; x0 += x2; x2 = x8; x8 = x4 - x6; x4 += x6; x6 = x8; x7 = (W7 * x1 + 4) >> 3; x1 = (W1 * x1 + 4) >> 3; x3 = x7; x5 = (181 * (x1 - x7) + 128) >> 8; x7 = (181 * (x1 + x7) + 128) >> 8; pred_word = *((uint32*)(pred += 12)); /* read 4 bytes from pred */ res = (x0 + x1) >> 14; ADD_AND_CLIP1(res); res2 = (x4 + x7) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (x6 + x5) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (x2 + x3) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)(dst += width)) = dst_word; /* save 4 bytes to dst */ pred_word = *((uint32*)(pred += 4)); /* read 4 bytes from pred */ res = (x2 - x3) >> 14; ADD_AND_CLIP1(res); res2 = (x6 - x5) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (x4 - x7) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (x0 - x1) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)(dst += 4)) = dst_word; /* save 4 bytes to dst */ } return ; } void idctcol3(int16 *blk) { int32 x0, x1, x2, x3, x4, x5, x6, x7, x8; x2 = blk[16]; x1 = blk[8]; x0 = ((int32)blk[0] << 11) + 128; x4 = x0; x6 = W6 * x2; x2 = W2 * x2; x8 = x0 - x2; x0 += x2; x2 = x8; x8 = x4 - x6; x4 += x6; x6 = x8; x7 = W7 * x1; x1 = W1 * x1; x3 = x7; x5 = (181 * (x1 - x7) + 128) >> 8; x7 = (181 * (x1 + x7) + 128) >> 8; blk[0] = (x0 + x1) >> 8; blk[8] = (x4 + x7) >> 8; blk[16] = (x6 + x5) >> 8; blk[24] = (x2 + x3) >> 8; blk[56] = (x0 - x1) >> 8; blk[48] = (x4 - x7) >> 8; blk[40] = (x6 - x5) >> 8; blk[32] = (x2 - x3) >> 8; return; } void idctrow4(int16 *blk, uint8 *pred, uint8 *dst, int width) { int32 x0, x1, x2, x3, x4, x5, x6, x7, x8; int i = 8; uint32 pred_word, dst_word; int res, res2; /* preset the offset, such that we can take advantage pre-offset addressing mode */ width -= 4; dst -= width; pred -= 12; blk -= 8; while (i--) { x2 = blk[10]; blk[10] = 0; x1 = blk[9]; blk[9] = 0; x3 = blk[11]; blk[11] = 0; x0 = ((*(blk += 8)) << 8) + 8192; *blk = 0; /* for proper rounding in the fourth stage */ x4 = x0; x6 = (W6 * x2 + 4) >> 3; x2 = (W2 * x2 + 4) >> 3; x8 = x0 - x2; x0 += x2; x2 = x8; x8 = x4 - x6; x4 += x6; x6 = x8; x7 = (W7 * x1 + 4) >> 3; x1 = (W1 * x1 + 4) >> 3; x5 = (W3 * x3 + 4) >> 3; x3 = (- W5 * x3 + 4) >> 3; x8 = x1 - x5; x1 += x5; x5 = x8; x8 = x7 - x3; x3 += x7; x7 = (181 * (x5 + x8) + 128) >> 8; x5 = (181 * (x5 - x8) + 128) >> 8; pred_word = *((uint32*)(pred += 12)); /* read 4 bytes from pred */ res = (x0 + x1) >> 14; ADD_AND_CLIP1(res); res2 = (x4 + x7) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (x6 + x5) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (x2 + x3) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)(dst += width)) = dst_word; /* save 4 bytes to dst */ pred_word = *((uint32*)(pred += 4)); /* read 4 bytes from pred */ res = (x2 - x3) >> 14; ADD_AND_CLIP1(res); res2 = (x6 - x5) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (x4 - x7) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (x0 - x1) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)(dst += 4)) = dst_word; /* save 4 bytes to dst */ } return ; } void idctcol4(int16 *blk) { int32 x0, x1, x2, x3, x4, x5, x6, x7, x8; x2 = blk[16]; x1 = blk[8]; x3 = blk[24]; x0 = ((int32)blk[0] << 11) + 128; x4 = x0; x6 = W6 * x2; x2 = W2 * x2; x8 = x0 - x2; x0 += x2; x2 = x8; x8 = x4 - x6; x4 += x6; x6 = x8; x7 = W7 * x1; x1 = W1 * x1; x5 = W3 * x3; x3 = -W5 * x3; x8 = x1 - x5; x1 += x5; x5 = x8; x8 = x7 - x3; x3 += x7; x7 = (181 * (x5 + x8) + 128) >> 8; x5 = (181 * (x5 - x8) + 128) >> 8; blk[0] = (x0 + x1) >> 8; blk[8] = (x4 + x7) >> 8; blk[16] = (x6 + x5) >> 8; blk[24] = (x2 + x3) >> 8; blk[56] = (x0 - x1) >> 8; blk[48] = (x4 - x7) >> 8; blk[40] = (x6 - x5) >> 8; blk[32] = (x2 - x3) >> 8; return ; } void idctrow0_intra(int16 *blk, PIXEL * comp, int width) { OSCL_UNUSED_ARG(blk); OSCL_UNUSED_ARG(comp); OSCL_UNUSED_ARG(width); return ; } void idctrow1_intra(int16 *blk, PIXEL *comp, int width) { /* shortcut */ int32 tmp; int i = 8; int offset = width; uint32 word; comp -= offset; while (i--) { tmp = ((blk[0] + 32) >> 6); blk[0] = 0; CLIP_RESULT(tmp) word = (tmp << 8) | tmp; word = (word << 16) | word; *((uint32*)(comp += offset)) = word; *((uint32*)(comp + 4)) = word; blk += B_SIZE; } return; } void idctrow2_intra(int16 *blk, PIXEL *comp, int width) { int32 x0, x1, x2, x4, x5, temp; int i = 8; int offset = width; int32 word; comp -= offset; while (i--) { /* shortcut */ x4 = blk[1]; blk[1] = 0; x0 = ((int32)blk[0] << 8) + 8192; blk[0] = 0; /* for proper rounding in the fourth stage */ /* first stage */ x5 = (W7 * x4 + 4) >> 3; x4 = (W1 * x4 + 4) >> 3; /* third stage */ x2 = (181 * (x4 + x5) + 128) >> 8; x1 = (181 * (x4 - x5) + 128) >> 8; /* fourth stage */ word = ((x0 + x4) >> 14); CLIP_RESULT(word) temp = ((x0 + x2) >> 14); CLIP_RESULT(temp) word = word | (temp << 8); temp = ((x0 + x1) >> 14); CLIP_RESULT(temp) word = word | (temp << 16); temp = ((x0 + x5) >> 14); CLIP_RESULT(temp) word = word | (temp << 24); *((int32*)(comp += offset)) = word; word = ((x0 - x5) >> 14); CLIP_RESULT(word) temp = ((x0 - x1) >> 14); CLIP_RESULT(temp) word = word | (temp << 8); temp = ((x0 - x2) >> 14); CLIP_RESULT(temp) word = word | (temp << 16); temp = ((x0 - x4) >> 14); CLIP_RESULT(temp) word = word | (temp << 24); *((int32*)(comp + 4)) = word; blk += B_SIZE; } return ; } void idctrow3_intra(int16 *blk, PIXEL *comp, int width) { int32 x0, x1, x2, x3, x4, x5, x6, x7, x8, temp; int i = 8; int offset = width; int32 word; comp -= offset; while (i--) { x2 = blk[2]; blk[2] = 0; x1 = blk[1]; blk[1] = 0; x0 = ((int32)blk[0] << 8) + 8192; blk[0] = 0;/* for proper rounding in the fourth stage */ /* both upper and lower*/ /* both x2orx6 and x0orx4 */ x4 = x0; x6 = (W6 * x2 + 4) >> 3; x2 = (W2 * x2 + 4) >> 3; x8 = x0 - x2; x0 += x2; x2 = x8; x8 = x4 - x6; x4 += x6; x6 = x8; x7 = (W7 * x1 + 4) >> 3; x1 = (W1 * x1 + 4) >> 3; x3 = x7; x5 = (181 * (x1 - x7) + 128) >> 8; x7 = (181 * (x1 + x7) + 128) >> 8; word = ((x0 + x1) >> 14); CLIP_RESULT(word) temp = ((x4 + x7) >> 14); CLIP_RESULT(temp) word = word | (temp << 8); temp = ((x6 + x5) >> 14); CLIP_RESULT(temp) word = word | (temp << 16); temp = ((x2 + x3) >> 14); CLIP_RESULT(temp) word = word | (temp << 24); *((int32*)(comp += offset)) = word; word = ((x2 - x3) >> 14); CLIP_RESULT(word) temp = ((x6 - x5) >> 14); CLIP_RESULT(temp) word = word | (temp << 8); temp = ((x4 - x7) >> 14); CLIP_RESULT(temp) word = word | (temp << 16); temp = ((x0 - x1) >> 14); CLIP_RESULT(temp) word = word | (temp << 24); *((int32*)(comp + 4)) = word; blk += B_SIZE; } return ; } void idctrow4_intra(int16 *blk, PIXEL *comp, int width) { int32 x0, x1, x2, x3, x4, x5, x6, x7, x8, temp; int i = 8; int offset = width; int32 word; comp -= offset; while (i--) { x2 = blk[2]; blk[2] = 0; x1 = blk[1]; blk[1] = 0; x3 = blk[3]; blk[3] = 0; x0 = ((int32)blk[0] << 8) + 8192; blk[0] = 0;/* for proper rounding in the fourth stage */ x4 = x0; x6 = (W6 * x2 + 4) >> 3; x2 = (W2 * x2 + 4) >> 3; x8 = x0 - x2; x0 += x2; x2 = x8; x8 = x4 - x6; x4 += x6; x6 = x8; x7 = (W7 * x1 + 4) >> 3; x1 = (W1 * x1 + 4) >> 3; x5 = (W3 * x3 + 4) >> 3; x3 = (- W5 * x3 + 4) >> 3; x8 = x1 - x5; x1 += x5; x5 = x8; x8 = x7 - x3; x3 += x7; x7 = (181 * (x5 + x8) + 128) >> 8; x5 = (181 * (x5 - x8) + 128) >> 8; word = ((x0 + x1) >> 14); CLIP_RESULT(word) temp = ((x4 + x7) >> 14); CLIP_RESULT(temp) word = word | (temp << 8); temp = ((x6 + x5) >> 14); CLIP_RESULT(temp) word = word | (temp << 16); temp = ((x2 + x3) >> 14); CLIP_RESULT(temp) word = word | (temp << 24); *((int32*)(comp += offset)) = word; word = ((x2 - x3) >> 14); CLIP_RESULT(word) temp = ((x6 - x5) >> 14); CLIP_RESULT(temp) word = word | (temp << 8); temp = ((x4 - x7) >> 14); CLIP_RESULT(temp) word = word | (temp << 16); temp = ((x0 - x1) >> 14); CLIP_RESULT(temp) word = word | (temp << 24); *((int32*)(comp + 4)) = word; blk += B_SIZE; } return ; } #endif ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/max_level.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /* * ------------------------------------------------------------------- * * MPEG-4 Simple Profile Video Decoder * * ------------------------------------------------------------------- * * * This software module was originally developed by * * Michael Wollborn (TUH / ACTS-MoMuSyS) * * in the course of development of the MPEG-4 Video (ISO/IEC 14496-2) standard. * This software module is an implementation of a part of one or more MPEG-4 * Video (ISO/IEC 14496-2) tools as specified by the MPEG-4 Video (ISO/IEC * 14496-2) standard. * * ISO/IEC gives users of the MPEG-4 Video (ISO/IEC 14496-2) standard free * license to this software module or modifications thereof for use in hardware * or software products claiming conformance to the MPEG-4 Video (ISO/IEC * 14496-2) standard. * * Those intending to use this software module in hardware or software products * are advised that its use may infringe existing patents. The original * developer of this software module and his/her company, the subsequent * editors and their companies, and ISO/IEC have no liability for use of this * software module or modifications thereof in an implementation. Copyright is * not released for non MPEG-4 Video (ISO/IEC 14496-2) Standard conforming * products. * * ACTS-MoMuSys partners retain full right to use the code for his/her own * purpose, assign or donate the code to a third party and to inhibit third * parties from using the code for non MPEG-4 Video (ISO/IEC 14496-2) Standard * conforming products. This copyright notice must be included in all copies or * derivative works. * * Copyright (c) 1997 * ***************************************************************************** This is a header file for "vlc_decode.c". The table data actually resides in "vlc_tab.c". ------------------------------------------------------------------------------ */ /*---------------------------------------------------------------------------- ; CONTINUE ONLY IF NOT ALREADY DEFINED ----------------------------------------------------------------------------*/ #ifndef max_level_H #define max_level_H /*---------------------------------------------------------------------------- ; INCLUDES ----------------------------------------------------------------------------*/ #include "mp4def.h" /*---------------------------------------------------------------------------- ; MACROS ; Define module specific macros here ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; DEFINES ; Include all pre-processor statements here. ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; EXTERNAL VARIABLES REFERENCES ; Declare variables used in this module but defined elsewhere ----------------------------------------------------------------------------*/ #ifdef __cplusplus extern "C" { #endif extern const int intra_max_level[2][NCOEFF_BLOCK]; extern const int inter_max_level[2][NCOEFF_BLOCK]; extern const int intra_max_run0[28]; extern const int intra_max_run1[9]; extern const int inter_max_run0[13]; extern const int inter_max_run1[4]; /*---------------------------------------------------------------------------- ; SIMPLE TYPEDEF'S ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; ENUMERATED TYPEDEF'S ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; STRUCTURES TYPEDEF'S ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; GLOBAL FUNCTION DEFINITIONS ; Function Prototype declaration ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; END ----------------------------------------------------------------------------*/ #ifdef __cplusplus } #endif #endif ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/mb_motion_comp.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /* ------------------------------------------------------------------------------ INPUT AND OUTPUT DEFINITIONS Inputs: video = pointer to structure of type VideoDecData Local Stores/Buffers/Pointers Needed: roundtab16 = rounding table Global Stores/Buffers/Pointers Needed: None Outputs: None Pointers and Buffers Modified: video->currVop->yChan contents are the newly calculated luminance data video->currVop->uChan contents are the newly calculated chrominance b data video->currVop->vChan contents are the newly calculated chrominance r data video->pstprcTypCur contents are the updated semaphore propagation values Local Stores Modified: None Global Stores Modified: None ------------------------------------------------------------------------------ FUNCTION DESCRIPTION This function performs high level motion compensation on the luminance and chrominance data. It sets up all the parameters required by the functions that perform luminance and chrominance prediction and it initializes the pointer to the post processing semaphores of a given block. It also checks the motion compensation mode in order to determine which luminance or chrominance prediction functions to call and determines how the post processing semaphores are updated. */ /*---------------------------------------------------------------------------- ; INCLUDES ----------------------------------------------------------------------------*/ #include "mp4dec_lib.h" #include "motion_comp.h" /*---------------------------------------------------------------------------- ; MACROS ; Define module specific macros here ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; DEFINES ; Include all pre-processor statements here. Include conditional ; compile variables also. ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; LOCAL FUNCTION DEFINITIONS ; Function Prototype declaration ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; LOCAL STORE/BUFFER/POINTER DEFINITIONS ; Variable declaration - defined here and used outside this module ----------------------------------------------------------------------------*/ /* 09/29/2000 bring this from mp4def.h */ // const static int roundtab4[] = {0,1,1,1}; // const static int roundtab8[] = {0,0,1,1,1,1,1,2}; /*** 10/30 for TPS */ // const static int roundtab12[] = {0,0,0,1,1,1,1,1,1,1,2,2}; /* 10/30 for TPS ***/ const static int roundtab16[] = {0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2}; /*---------------------------------------------------------------------------- ; EXTERNAL FUNCTION REFERENCES ; Declare functions defined elsewhere and referenced in this module ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; EXTERNAL GLOBAL STORE/BUFFER/POINTER REFERENCES ; Declare variables used in this module but defined elsewhere ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; FUNCTION CODE ----------------------------------------------------------------------------*/ /** modified 3 August 2005 to do prediction and put the results in video->mblock->pred_block, no adding with residue */ void MBMotionComp( VideoDecData *video, int CBP ) { /*---------------------------------------------------------------------------- ; Define all local variables ----------------------------------------------------------------------------*/ /* Previous Video Object Plane */ Vop *prev = video->prevVop; /* Current Macroblock (MB) in the VOP */ int mbnum = video->mbnum; /* Number of MB per data row */ int MB_in_width = video->nMBPerRow; int ypos, xpos; PIXEL *c_comp, *c_prev; PIXEL *cu_comp, *cu_prev; PIXEL *cv_comp, *cv_prev; int height, width, pred_width; int imv, mvwidth; int32 offset; uint8 mode; uint8 *pred_block, *pred; /* Motion vector (dx,dy) in half-pel resolution */ int dx, dy; MOT px[4], py[4]; int xpred, ypred; int xsum; int round1; #ifdef PV_POSTPROC_ON // 2/14/2001 /* Total number of pixels in the VOL */ int32 size = (int32) video->nTotalMB << 8; uint8 *pp_dec_y, *pp_dec_u; int ll[4]; int tmp = 0; uint8 msk_deblock = 0; #endif /*---------------------------------------------------------------------------- ; Function body here ----------------------------------------------------------------------------*/ /* Set rounding type */ /* change from array to single 09/29/2000 */ round1 = (int)(1 - video->currVop->roundingType); /* width of luminance data in pixels (y axis) */ width = video->width; /* heigth of luminance data in pixels (x axis) */ height = video->height; /* number of blocks per row */ mvwidth = MB_in_width << 1; /* starting y position in current MB; origin of MB */ ypos = video->mbnum_row << 4 ; /* starting x position in current MB; origin of MB */ xpos = video->mbnum_col << 4 ; /* offset to (x,y) position in current luminance MB */ /* in pixel resolution */ /* ypos*width -> row, +x -> column */ offset = (int32)ypos * width + xpos; /* get mode for current MB */ mode = video->headerInfo.Mode[mbnum]; /* block index */ /* imv = (xpos/8) + ((ypos/8) * mvwidth) */ imv = (offset >> 6) - (xpos >> 6) + (xpos >> 3); if (mode & INTER_1VMASK) { dx = px[0] = px[1] = px[2] = px[3] = video->motX[imv]; dy = py[0] = py[1] = py[2] = py[3] = video->motY[imv]; if ((dx & 3) == 0) { dx = dx >> 1; } else { /* x component of MV is or'ed for rounding (?) */ dx = (dx >> 1) | 1; } /* y component of motion vector; divide by 2 for to */ /* convert to full-pel resolution. */ if ((dy & 3) == 0) { dy = dy >> 1; } else { /* y component of MV is or'ed for rounding (?) */ dy = (dy >> 1) | 1; } } else { px[0] = video->motX[imv]; px[1] = video->motX[imv+1]; px[2] = video->motX[imv+mvwidth]; px[3] = video->motX[imv+mvwidth+1]; xsum = px[0] + px[1] + px[2] + px[3]; dx = PV_SIGN(xsum) * (roundtab16[(PV_ABS(xsum)) & 0xF] + (((PV_ABS(xsum)) >> 4) << 1)); py[0] = video->motY[imv]; py[1] = video->motY[imv+1]; py[2] = video->motY[imv+mvwidth]; py[3] = video->motY[imv+mvwidth+1]; xsum = py[0] + py[1] + py[2] + py[3]; dy = PV_SIGN(xsum) * (roundtab16[(PV_ABS(xsum)) & 0xF] + (((PV_ABS(xsum)) >> 4) << 1)); } /* Pointer to previous luminance frame */ c_prev = prev->yChan; pred_block = video->mblock->pred_block; /* some blocks have no residue or INTER4V */ /*if (mode == MODE_INTER4V) 05/08/15 */ /* Motion Compensation for an 8x8 block within a MB */ /* (4 MV per MB) */ /* Call function that performs luminance prediction */ /* luminance_pred_mode_inter4v(xpos, ypos, px, py, c_prev, video->mblock->pred_block, width, height, round1, mvwidth, &xsum, &ysum);*/ c_comp = video->currVop->yChan + offset; xpred = (int)((xpos << 1) + px[0]); ypred = (int)((ypos << 1) + py[0]); if ((CBP >> 5)&1) { pred = pred_block; pred_width = 16; } else { pred = c_comp; pred_width = width; } /* check whether the MV points outside the frame */ if (xpred >= 0 && xpred <= ((width << 1) - (2*B_SIZE)) && ypred >= 0 && ypred <= ((height << 1) - (2*B_SIZE))) { /*****************************/ /* (x,y) is inside the frame */ /*****************************/ ; GetPredAdvBTable[ypred&1][xpred&1](c_prev + (xpred >> 1) + ((ypred >> 1)*width), pred, width, (pred_width << 1) | round1); } else { /******************************/ /* (x,y) is outside the frame */ /******************************/ GetPredOutside(xpred, ypred, c_prev, pred, width, height, round1, pred_width); } /* Compute prediction values over current luminance MB */ /* (blocks 1); add motion vector prior to input; */ /* add 8 to x_pos to advance to next block */ xpred = (int)(((xpos + B_SIZE) << 1) + px[1]); ypred = (int)((ypos << 1) + py[1]); if ((CBP >> 4)&1) { pred = pred_block + 8; pred_width = 16; } else { pred = c_comp + 8; pred_width = width; } /* check whether the MV points outside the frame */ if (xpred >= 0 && xpred <= ((width << 1) - (2*B_SIZE)) && ypred >= 0 && ypred <= ((height << 1) - (2*B_SIZE))) { /*****************************/ /* (x,y) is inside the frame */ /*****************************/ GetPredAdvBTable[ypred&1][xpred&1](c_prev + (xpred >> 1) + ((ypred >> 1)*width), pred, width, (pred_width << 1) | round1); } else { /******************************/ /* (x,y) is outside the frame */ /******************************/ GetPredOutside(xpred, ypred, c_prev, pred, width, height, round1, pred_width); } /* Compute prediction values over current luminance MB */ /* (blocks 2); add motion vector prior to input */ /* add 8 to y_pos to advance to block on next row */ xpred = (int)((xpos << 1) + px[2]); ypred = (int)(((ypos + B_SIZE) << 1) + py[2]); if ((CBP >> 3)&1) { pred = pred_block + 128; pred_width = 16; } else { pred = c_comp + (width << 3); pred_width = width; } /* check whether the MV points outside the frame */ if (xpred >= 0 && xpred <= ((width << 1) - (2*B_SIZE)) && ypred >= 0 && ypred <= ((height << 1) - (2*B_SIZE))) { /*****************************/ /* (x,y) is inside the frame */ /*****************************/ GetPredAdvBTable[ypred&1][xpred&1](c_prev + (xpred >> 1) + ((ypred >> 1)*width), pred, width, (pred_width << 1) | round1); } else { /******************************/ /* (x,y) is outside the frame */ /******************************/ GetPredOutside(xpred, ypred, c_prev, pred, width, height, round1, pred_width); } /* Compute prediction values over current luminance MB */ /* (blocks 3); add motion vector prior to input; */ /* add 8 to x_pos and y_pos to advance to next block */ /* on next row */ xpred = (int)(((xpos + B_SIZE) << 1) + px[3]); ypred = (int)(((ypos + B_SIZE) << 1) + py[3]); if ((CBP >> 2)&1) { pred = pred_block + 136; pred_width = 16; } else { pred = c_comp + (width << 3) + 8; pred_width = width; } /* check whether the MV points outside the frame */ if (xpred >= 0 && xpred <= ((width << 1) - (2*B_SIZE)) && ypred >= 0 && ypred <= ((height << 1) - (2*B_SIZE))) { /*****************************/ /* (x,y) is inside the frame */ /*****************************/ GetPredAdvBTable[ypred&1][xpred&1](c_prev + (xpred >> 1) + ((ypred >> 1)*width), pred, width, (pred_width << 1) | round1); } else { /******************************/ /* (x,y) is outside the frame */ /******************************/ GetPredOutside(xpred, ypred, c_prev, pred, width, height, round1, pred_width); } /* Call function to set de-blocking and de-ringing */ /* semaphores for luminance */ #ifdef PV_POSTPROC_ON if (video->postFilterType != PV_NO_POST_PROC) { if (mode&INTER_1VMASK) { pp_dec_y = video->pstprcTypCur + imv; ll[0] = 1; ll[1] = mvwidth - 1; ll[2] = 1; ll[3] = -mvwidth - 1; msk_deblock = pp_semaphore_luma(xpred, ypred, pp_dec_y, video->pstprcTypPrv, ll, &tmp, px[0], py[0], mvwidth, width, height); pp_dec_u = video->pstprcTypCur + (size >> 6) + ((imv + (xpos >> 3)) >> 2); pp_semaphore_chroma_inter(xpred, ypred, pp_dec_u, video->pstprcTypPrv, dx, dy, mvwidth, height, size, tmp, msk_deblock); } else { /* Post-processing mode (MBM_INTER8) */ /* deblocking and deringing) */ pp_dec_y = video->pstprcTypCur + imv; *pp_dec_y = 4; *(pp_dec_y + 1) = 4; *(pp_dec_y + mvwidth) = 4; *(pp_dec_y + mvwidth + 1) = 4; pp_dec_u = video->pstprcTypCur + (size >> 6) + ((imv + (xpos >> 3)) >> 2); *pp_dec_u = 4; pp_dec_u[size>>8] = 4; } } #endif /* xpred and ypred calculation for Chrominance is */ /* in full-pel resolution. */ /* Chrominance */ /* width of chrominance data in pixels (y axis) */ width >>= 1; /* heigth of chrominance data in pixels (x axis) */ height >>= 1; /* Pointer to previous chrominance b frame */ cu_prev = prev->uChan; /* Pointer to previous chrominance r frame */ cv_prev = prev->vChan; /* x position in prediction data offset by motion vector */ /* xpred calculation for Chrominance is in full-pel */ /* resolution. */ xpred = xpos + dx; /* y position in prediction data offset by motion vector */ /* ypred calculation for Chrominance is in full-pel */ /* resolution. */ ypred = ypos + dy; cu_comp = video->currVop->uChan + (offset >> 2) + (xpos >> 2); cv_comp = video->currVop->vChan + (offset >> 2) + (xpos >> 2); /* Call function that performs chrominance prediction */ /* chrominance_pred(xpred, ypred, cu_prev, cv_prev, pred_block, width_uv, height_uv, round1);*/ if (xpred >= 0 && xpred <= ((width << 1) - (2*B_SIZE)) && ypred >= 0 && ypred <= ((height << 1) - (2*B_SIZE))) { /*****************************/ /* (x,y) is inside the frame */ /*****************************/ if ((CBP >> 1)&1) { pred = pred_block + 256; pred_width = 16; } else { pred = cu_comp; pred_width = width; } /* Compute prediction for Chrominance b (block[4]) */ GetPredAdvBTable[ypred&1][xpred&1](cu_prev + (xpred >> 1) + ((ypred >> 1)*width), pred, width, (pred_width << 1) | round1); if (CBP&1) { pred = pred_block + 264; pred_width = 16; } else { pred = cv_comp; pred_width = width; } /* Compute prediction for Chrominance r (block[5]) */ GetPredAdvBTable[ypred&1][xpred&1](cv_prev + (xpred >> 1) + ((ypred >> 1)*width), pred, width, (pred_width << 1) | round1); return ; } else { /******************************/ /* (x,y) is outside the frame */ /******************************/ if ((CBP >> 1)&1) { pred = pred_block + 256; pred_width = 16; } else { pred = cu_comp; pred_width = width; } /* Compute prediction for Chrominance b (block[4]) */ GetPredOutside(xpred, ypred, cu_prev, pred, width, height, round1, pred_width); if (CBP&1) { pred = pred_block + 264; pred_width = 16; } else { pred = cv_comp; pred_width = width; } /* Compute prediction for Chrominance r (block[5]) */ GetPredOutside(xpred, ypred, cv_prev, pred, width, height, round1, pred_width); return ; } } /*** special function for skipped macroblock, Aug 15, 2005 */ void SkippedMBMotionComp( VideoDecData *video ) { Vop *prev = video->prevVop; Vop *comp; int ypos, xpos; PIXEL *c_comp, *c_prev; PIXEL *cu_comp, *cu_prev; PIXEL *cv_comp, *cv_prev; int width, width_uv; int32 offset; #ifdef PV_POSTPROC_ON // 2/14/2001 int imv; int32 size = (int32) video->nTotalMB << 8; uint8 *pp_dec_y, *pp_dec_u; uint8 *pp_prev1; int mvwidth = video->nMBPerRow << 1; #endif width = video->width; width_uv = width >> 1; ypos = video->mbnum_row << 4 ; xpos = video->mbnum_col << 4 ; offset = (int32)ypos * width + xpos; /* zero motion compensation for previous frame */ /*mby*width + mbx;*/ c_prev = prev->yChan + offset; /*by*width_uv + bx;*/ cu_prev = prev->uChan + (offset >> 2) + (xpos >> 2); /*by*width_uv + bx;*/ cv_prev = prev->vChan + (offset >> 2) + (xpos >> 2); comp = video->currVop; c_comp = comp->yChan + offset; cu_comp = comp->uChan + (offset >> 2) + (xpos >> 2); cv_comp = comp->vChan + (offset >> 2) + (xpos >> 2); /* Copy previous reconstructed frame into the current frame */ PutSKIPPED_MB(c_comp, c_prev, width); PutSKIPPED_B(cu_comp, cu_prev, width_uv); PutSKIPPED_B(cv_comp, cv_prev, width_uv); /* 10/24/2000 post_processing semaphore generation */ #ifdef PV_POSTPROC_ON // 2/14/2001 if (video->postFilterType != PV_NO_POST_PROC) { imv = (offset >> 6) - (xpos >> 6) + (xpos >> 3); /* Post-processing mode (copy previous MB) */ pp_prev1 = video->pstprcTypPrv + imv; pp_dec_y = video->pstprcTypCur + imv; *pp_dec_y = *pp_prev1; *(pp_dec_y + 1) = *(pp_prev1 + 1); *(pp_dec_y + mvwidth) = *(pp_prev1 + mvwidth); *(pp_dec_y + mvwidth + 1) = *(pp_prev1 + mvwidth + 1); /* chrominance */ /*4*MB_in_width*MB_in_height*/ pp_prev1 = video->pstprcTypPrv + (size >> 6) + ((imv + (xpos >> 3)) >> 2); pp_dec_u = video->pstprcTypCur + (size >> 6) + ((imv + (xpos >> 3)) >> 2); *pp_dec_u = *pp_prev1; pp_dec_u[size>>8] = pp_prev1[size>>8]; } #endif /*---------------------------------------------------------------------------- ; Return nothing or data or data pointer ----------------------------------------------------------------------------*/ return; } ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/mb_utils.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "mp4dec_lib.h" /* ====================================================================== / Function : PutSKIPPED_MB() Date : 04/03/2000 / ====================================================================== */ void PutSKIPPED_MB(uint8 *comp, uint8 *prev, int width) { int32 *temp0, *temp1; int row; row = MB_SIZE; while (row) { temp0 = (int32 *)prev; temp1 = (int32 *)comp; temp1[0] = temp0[0]; temp1[1] = temp0[1]; temp1[2] = temp0[2]; temp1[3] = temp0[3]; comp += width; prev += width; temp0 = (int32 *)prev; temp1 = (int32 *)comp; temp1[0] = temp0[0]; temp1[1] = temp0[1]; temp1[2] = temp0[2]; temp1[3] = temp0[3]; comp += width; prev += width; temp0 = (int32 *)prev; temp1 = (int32 *)comp; temp1[0] = temp0[0]; temp1[1] = temp0[1]; temp1[2] = temp0[2]; temp1[3] = temp0[3]; comp += width; prev += width; temp0 = (int32 *)prev; temp1 = (int32 *)comp; temp1[0] = temp0[0]; temp1[1] = temp0[1]; temp1[2] = temp0[2]; temp1[3] = temp0[3]; comp += width; prev += width; row -= 4; } } /* ====================================================================== / Function : PutSKIPPED_B() Date : 04/03/2000 / ====================================================================== */ void PutSKIPPED_B(uint8 *comp, uint8 *prev, int width) { int32 *temp0, *temp1; int row; row = B_SIZE; while (row) { temp0 = (int32 *)prev; temp1 = (int32 *)comp; temp1[0] = temp0[0]; temp1[1] = temp0[1]; comp += width; prev += width; temp0 = (int32 *)prev; temp1 = (int32 *)comp; temp1[0] = temp0[0]; temp1[1] = temp0[1]; comp += width; prev += width; temp0 = (int32 *)prev; temp1 = (int32 *)comp; temp1[0] = temp0[0]; temp1[1] = temp0[1]; comp += width; prev += width; temp0 = (int32 *)prev; temp1 = (int32 *)comp; temp1[0] = temp0[0]; temp1[1] = temp0[1]; comp += width; prev += width; row -= 4; } } ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/mbtype_mode.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ const static int MBtype_mode[] = { MODE_INTER, MODE_INTER_Q, MODE_INTER4V, MODE_INTRA, MODE_INTRA_Q, #ifdef PV_ANNEX_IJKT_SUPPORT MODE_INTER4V_Q, #endif MODE_SKIPPED }; #ifdef PV_ANNEX_IJKT_SUPPORT const static int16 DQ_tab_Annex_T_10[32] = {0, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3}; const static int16 DQ_tab_Annex_T_11[32] = {0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, -5}; const static int16 MQ_chroma_QP_table[32] = {0, 1, 2, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 13, 13, 13, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15 }; #endif ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/motion_comp.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef motion_comp_h #define motion_comp_h /*---------------------------------------------------------------------------- ; INCLUDES ----------------------------------------------------------------------------*/ #include "mp4dec_lib.h" /*---------------------------------------------------------------------------- ; MACROS ; Define module specific macros here ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; DEFINES ; Include all pre-processor statements here. ----------------------------------------------------------------------------*/ /* CBP Mask defines used in chrominance prediction */ #define CBP_MASK_CHROMA_BLK4 0x2 #define CBP_MASK_CHROMA_BLK5 0x1 /* CBP Mask defines used in luminance prediction (MODE_INTER4V) */ #define CBP_MASK_BLK0_MODE_INTER4V 0x20 #define CBP_MASK_BLK1_MODE_INTER4V 0x10 #define CBP_MASK_BLK2_MODE_INTER4V 0x08 #define CBP_MASK_BLK3_MODE_INTER4V 0x04 /* CBP Mask defines used in luminance prediction (MODE_INTER or MODE_INTER_Q) */ #define CBP_MASK_MB_MODE_INTER 0x3c /*---------------------------------------------------------------------------- ; EXTERNAL VARIABLES REFERENCES ; Declare variables used in this module but defined elsewhere ----------------------------------------------------------------------------*/ #ifdef __cplusplus extern "C" { #endif #define CLIP_RESULT(x) if(x & -256){x = 0xFF & (~(x>>31));} #define ADD_AND_CLIP1(x) x += (pred_word&0xFF); CLIP_RESULT(x); #define ADD_AND_CLIP2(x) x += ((pred_word>>8)&0xFF); CLIP_RESULT(x); #define ADD_AND_CLIP3(x) x += ((pred_word>>16)&0xFF); CLIP_RESULT(x); #define ADD_AND_CLIP4(x) x += ((pred_word>>24)&0xFF); CLIP_RESULT(x); #define ADD_AND_CLIP(x,y) { x9 = ~(x>>8); \ if(x9!=-1){ \ x9 = ((uint32)x9)>>24; \ y = x9|(y<<8); \ } \ else \ { \ y = x|(y<<8); \ } \ } static int (*const GetPredAdvBTable[2][2])(uint8*, uint8*, int, int) = { {&GetPredAdvancedBy0x0, &GetPredAdvancedBy0x1}, {&GetPredAdvancedBy1x0, &GetPredAdvancedBy1x1} }; /*---------------------------------------------------------------------------- ; SIMPLE TYPEDEF'S ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; ENUMERATED TYPEDEF'S ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; STRUCTURES TYPEDEF'S ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; GLOBAL FUNCTION DEFINITIONS ; Function Prototype declaration ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; END ----------------------------------------------------------------------------*/ #endif #ifdef __cplusplus } #endif ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/mp4dec_lib.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef _MP4DECLIB_H_ #define _MP4DECLIB_H_ /*---------------------------------------------------------------------------- ; INCLUDES ----------------------------------------------------------------------------*/ #include "oscl_mem.h" #include "mp4def.h" /* typedef */ #include "mp4lib_int.h" /* main video structure */ /*---------------------------------------------------------------------------- ; MACROS ; Define module specific macros here ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; DEFINES ; Include all pre-processor statements here. ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; EXTERNAL VARIABLES REFERENCES ; Declare variables used in this module but defined elsewhere ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; SIMPLE TYPEDEF'S ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; ENUMERATED TYPEDEF'S ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; STRUCTURES TYPEDEF'S ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; GLOBAL FUNCTION DEFINITIONS ; Function Prototype declaration ----------------------------------------------------------------------------*/ #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ /* defined in pvdec_api.c, these function are not supposed to be */ /* exposed to programmers outside PacketVideo. 08/15/2000. */ uint VideoDecoderErrorDetected(VideoDecData *video); #ifdef ENABLE_LOG void m4vdec_dprintf(char *format, ...); #define mp4dec_log(message) m4vdec_dprintf(message) #else #define mp4dec_log(message) #endif /*--------------------------------------------------------------------------*/ /* defined in frame_buffer.c */ PV_STATUS FillFrameBufferNew(BitstreamDecVideo *stream); PV_STATUS FillFrameBuffer(BitstreamDecVideo *stream, int short_header); /*--------------------------------------------------------------------------*/ /* defined in dc_ac_pred.c */ int cal_dc_scaler(int QP, int type); PV_STATUS PV_DecodePredictedIntraDC(int compnum, BitstreamDecVideo *stream, int16 *IntraDC_delta); void doDCACPrediction(VideoDecData *video, int comp, int16 *q_block, int *direction); #ifdef PV_ANNEX_IJKT_SUPPORT void doDCACPrediction_I(VideoDecData *video, int comp, int16 *q_block); #endif /*--------------------------------------------------------------------------*/ /* defined in block_idct.c */ void MBlockIDCTAdd(VideoDecData *video, int nz_coefs[]); void BlockIDCT(uint8 *dst, uint8 *pred, int16 *blk, int width, int nzcoefs, uint8 *bitmapcol, uint8 bitmaprow); void MBlockIDCT(VideoDecData *video); void BlockIDCT_intra(MacroBlock *mblock, PIXEL *c_comp, int comp, int width_offset); /*--------------------------------------------------------------------------*/ /* defined in combined_decode.c */ PV_STATUS DecodeFrameCombinedMode(VideoDecData *video); PV_STATUS GetMBheader(VideoDecData *video, int16 *QP); PV_STATUS GetMBData(VideoDecData *video); /*--------------------------------------------------------------------------*/ /* defined in datapart_decode.c */ PV_STATUS DecodeFrameDataPartMode(VideoDecData *video); PV_STATUS GetMBheaderDataPart_DQUANT_DC(VideoDecData *video, int16 *QP); PV_STATUS GetMBheaderDataPart_P(VideoDecData *video); PV_STATUS DecodeDataPart_I_VideoPacket(VideoDecData *video, int slice_counter); PV_STATUS DecodeDataPart_P_VideoPacket(VideoDecData *video, int slice_counter); PV_STATUS GetMBData_DataPart(VideoDecData *video); /*--------------------------------------------------------------------------*/ /* defined in packet_util.c */ PV_STATUS PV_ReadVideoPacketHeader(VideoDecData *video, int *next_MB); PV_STATUS RecoverPacketError(BitstreamDecVideo *stream, int marker_length, int32 *nextVop); PV_STATUS RecoverGOBError(BitstreamDecVideo *stream, int marker_length, int32 *vopPos); PV_STATUS PV_GobHeader(VideoDecData *video); #ifdef PV_ANNEX_IJKT_SUPPORT PV_STATUS PV_H263SliceHeader(VideoDecData *videoInt, int *next_MB); #endif /*--------------------------------------------------------------------------*/ /* defined in motion_comp.c */ void MBMotionComp(VideoDecData *video, int CBP); void SkippedMBMotionComp(VideoDecData *video); /*--------------------------------------------------------------------------*/ /* defined in chrominance_pred.c */ void chrominance_pred( int xpred, /* i */ int ypred, /* i */ uint8 *cu_prev, /* i */ uint8 *cv_prev, /* i */ uint8 *pred_block, /* i */ int width_uv, /* i */ int height_uv, /* i */ int round1 ); /*--------------------------------------------------------------------------*/ /* defined in luminance_pred_mode_inter.c */ void luminance_pred_mode_inter( int xpred, /* i */ int ypred, /* i */ uint8 *c_prev, /* i */ uint8 *pred_block, /* i */ int width, /* i */ int height, /* i */ int round1 ); /*--------------------------------------------------------------------------*/ /* defined in luminance_pred_mode_inter4v.c */ void luminance_pred_mode_inter4v( int xpos, /* i */ int ypos, /* i */ MOT *px, /* i */ MOT *py, /* i */ uint8 *c_prev, /* i */ uint8 *pred_block, /* i */ int width, /* i */ int height, /* i */ int round1, /* i */ int mvwidth, /* i */ int *xsum_ptr, /* i/o */ int *ysum_ptr /* i/o */ ); /*--------------------------------------------------------------------------*/ /* defined in pp_semaphore_chroma_inter.c */ #ifdef PV_POSTPROC_ON void pp_semaphore_chroma_inter( int xpred, /* i */ int ypred, /* i */ uint8 *pp_dec_u, /* i/o */ uint8 *pstprcTypPrv, /* i */ int dx, /* i */ int dy, /* i */ int mvwidth, /* i */ int height, /* i */ int32 size, /* i */ int mv_loc, /* i */ uint8 msk_deblock /* i */ ); /*--------------------------------------------------------------------------*/ /* defined in pp_semaphore_luma.c */ uint8 pp_semaphore_luma( int xpred, /* i */ int ypred, /* i */ uint8 *pp_dec_y, /* i/o */ uint8 *pstprcTypPrv, /* i */ int *ll, /* i */ int *mv_loc, /* i/o */ int dx, /* i */ int dy, /* i */ int mvwidth, /* i */ int width, /* i */ int height /* i */ ); #endif /*--------------------------------------------------------------------------*/ /* defined in get_pred_adv_mb_add.c */ int GetPredAdvancedMB( int xpos, int ypos, uint8 *c_prev, uint8 *pred_block, int width, int rnd1 ); /*--------------------------------------------------------------------------*/ /* defined in get_pred_adv_b_add.c */ int GetPredAdvancedBy0x0( uint8 *c_prev, /* i */ uint8 *pred_block, /* i */ int width, /* i */ int pred_width_rnd /* i */ ); int GetPredAdvancedBy0x1( uint8 *c_prev, /* i */ uint8 *pred_block, /* i */ int width, /* i */ int pred_width_rnd /* i */ ); int GetPredAdvancedBy1x0( uint8 *c_prev, /* i */ uint8 *pred_block, /* i */ int width, /* i */ int pred_width_rnd /* i */ ); int GetPredAdvancedBy1x1( uint8 *c_prev, /* i */ uint8 *pred_block, /* i */ int width, /* i */ int pred_width_rnd /* i */ ); /*--------------------------------------------------------------------------*/ /* defined in get_pred_outside.c */ int GetPredOutside( int xpos, int ypos, uint8 *c_prev, uint8 *pred_block, int width, int height, int rnd1, int pred_width ); /*--------------------------------------------------------------------------*/ /* defined in find_pmvsErrRes.c */ void mv_prediction(VideoDecData *video, int block, MOT *mvx, MOT *mvy); /*--------------------------------------------------------------------------*/ /*--------------------------------------------------------------------------*/ /* defined in mb_utils.c */ void Copy_MB_into_Vop(uint8 *comp, int yChan[][NCOEFF_BLOCK], int width); void Copy_B_into_Vop(uint8 *comp, int cChan[], int width); void PutSKIPPED_MB(uint8 *comp, uint8 *c_prev, int width); void PutSKIPPED_B(uint8 *comp, uint8 *c_prev, int width); /*--------------------------------------------------------------------------*/ /* defined in vop.c */ PV_STATUS DecodeGOVHeader(BitstreamDecVideo *stream, uint32 *time_base); PV_STATUS DecodeVOLHeader(VideoDecData *video, int layer); PV_STATUS DecodeVOPHeader(VideoDecData *video, Vop *currVop, Bool use_ext_tiemstamp); PV_STATUS DecodeShortHeader(VideoDecData *video, Vop *currVop); PV_STATUS DecodeH263Header(VideoDecData *video, Vop *currVop); PV_STATUS PV_DecodeVop(VideoDecData *video); uint32 CalcVopDisplayTime(Vol *currVol, Vop *currVop, int shortVideoHeader); /*--------------------------------------------------------------------------*/ /* defined in post_proc.c */ #ifdef PV_ANNEX_IJKT_SUPPORT void H263_Deblock(uint8 *rec, int width, int height, int16 *QP_store, uint8 *mode, int chr, int T); #endif int PostProcSemaphore(int16 *q_block); void PostFilter(VideoDecData *video, int filer_type, uint8 *output); void FindMaxMin(uint8 *ptr, int *min, int *max, int incr); void DeringAdaptiveSmoothMMX(uint8 *img, int incr, int thres, int mxdf); void AdaptiveSmooth_NoMMX(uint8 *Rec_Y, int v0, int h0, int v_blk, int h_blk, int thr, int width, int max_diff); void Deringing_Luma(uint8 *Rec_Y, int width, int height, int16 *QP_store, int Combined, uint8 *pp_mod); void Deringing_Chroma(uint8 *Rec_C, int width, int height, int16 *QP_store, int Combined, uint8 *pp_mod); void CombinedHorzVertFilter(uint8 *rec, int width, int height, int16 *QP_store, int chr, uint8 *pp_mod); void CombinedHorzVertFilter_NoSoftDeblocking(uint8 *rec, int width, int height, int16 *QP_store, int chr, uint8 *pp_mod); void CombinedHorzVertRingFilter(uint8 *rec, int width, int height, int16 *QP_store, int chr, uint8 *pp_mod); /*--------------------------------------------------------------------------*/ /* defined in conceal.c */ void ConcealTexture_I(VideoDecData *video, int32 startFirstPartition, int mb_start, int mb_stop, int slice_counter); void ConcealTexture_P(VideoDecData *video, int mb_start, int mb_stop, int slice_counter); void ConcealPacket(VideoDecData *video, int mb_start, int mb_stop, int slice_counter); void CopyVopMB(Vop *curr, uint8 *prev, int mbnum, int width, int height); /* define in vlc_dequant.c , 09/18/2000*/ #ifdef PV_SUPPORT_MAIN_PROFILE int VlcDequantMpegIntraBlock(void *video, int comp, int switched, uint8 *bitmapcol, uint8 *bitmaprow); int VlcDequantMpegInterBlock(void *video, int comp, uint8 *bitmapcol, uint8 *bitmaprow); #endif int VlcDequantH263IntraBlock(VideoDecData *video, int comp, int switched, uint8 *bitmapcol, uint8 *bitmaprow); int VlcDequantH263IntraBlock_SH(VideoDecData *video, int comp, uint8 *bitmapcol, uint8 *bitmaprow); int VlcDequantH263InterBlock(VideoDecData *video, int comp, uint8 *bitmapcol, uint8 *bitmaprow); #ifdef __cplusplus } #endif /* __cplusplus */ /*---------------------------------------------------------------------------- ; END ----------------------------------------------------------------------------*/ #endif ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/mp4def.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2010 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef _PVDECDEF_H_ #define _PVDECDEF_H_ #include "mp4dec_api.h" typedef enum { PV_SUCCESS, PV_FAIL, PV_MB_STUFFING, /* hit Macroblock_Stuffing */ PV_END_OF_VOP, /* hit End_of_Video_Object_Plane */ PV_END_OF_MB /* hit End_of_Macroblock */ #ifdef PV_TOLERATE_VOL_ERRORS , PV_BAD_VOLHEADER #endif } PV_STATUS; typedef uint8 PIXEL; typedef int16 MOT; /* : "int" type runs faster on RISC machine */ #define TRUE 1 #define FALSE 0 #define PV_ABS(x) (((x)<0)? -(x) : (x)) #define PV_SIGN(x) (((x)<0)? -1 : 1) #define PV_SIGN0(a) (((a)<0)? -1 : (((a)>0) ? 1 : 0)) #define PV_MAX(a,b) ((a)>(b)? (a):(b)) #define PV_MIN(a,b) ((a)<(b)? (a):(b)) #define PV_MEDIAN(A,B,C) ((A) > (B) ? ((A) < (C) ? (A) : (B) > (C) ? (B) : (C)): (B) < (C) ? (B) : (C) > (A) ? (C) : (A)) /* You don't want to use ((x>UB)?UB:(xUB) x = UB // Setting up the default values if not already defined by CML2 #define PV_MPEG4 0x0 #define PV_H263 0x1 #define PV_FLV1 0x2 #define MODE_INTRA 0x08 //01000 #define MODE_INTRA_Q 0x09 //01001 #define MODE_SKIPPED 0x10 //10000 #define MODE_INTER4V 0x14 //10100 #define MODE_INTER 0x16 //10110 #define MODE_INTER_Q 0x17 //10111 #define MODE_INTER4V_Q 0x15 //10101 #define INTER_1VMASK 0x2 #define Q_MASK 0x1 #define INTRA_MASK 0x8 #define INTER_MASK 0x4 #define I_VOP 0 #define P_VOP 1 #define B_VOP 2 #define LUMINANCE_DC_TYPE 1 #define CHROMINANCE_DC_TYPE 2 #define START_CODE_LENGTH 32 /* 11/30/98 */ #define NoMarkerFound -1 #define FoundRM 1 /* Resync Marker */ #define FoundVSC 2 /* VOP_START_CODE. */ #define FoundGSC 3 /* GROUP_START_CODE */ #define FoundEOB 4 /* EOB_CODE */ /* PacketVideo "absolution timestamp" object. 06/13/2000 */ #define PVTS_START_CODE 0x01C4 #define PVTS_START_CODE_LENGTH 32 /* session layer and vop layer start codes */ #define VISUAL_OBJECT_SEQUENCE_START_CODE 0x01B0 #define VISUAL_OBJECT_SEQUENCE_END_CODE 0x01B1 #define VISUAL_OBJECT_START_CODE 0x01B5 #define VO_START_CODE 0x8 #define VO_HEADER_LENGTH 32 /* lengtho of VO header: VO_START_CODE + VO_ID */ #define SOL_START_CODE 0x01BE #define SOL_START_CODE_LENGTH 32 #define VOL_START_CODE 0x12 #define VOL_START_CODE_LENGTH 28 #define VOP_START_CODE 0x1B6 #define VOP_START_CODE_LENGTH 32 #define GROUP_START_CODE 0x01B3 #define GROUP_START_CODE_LENGTH 32 #define VOP_ID_CODE_LENGTH 5 #define VOP_TEMP_REF_CODE_LENGTH 16 #define USER_DATA_START_CODE 0x01B2 #define USER_DATA_START_CODE_LENGTH 32 #define START_CODE_PREFIX 0x01 #define START_CODE_PREFIX_LENGTH 24 #define SHORT_VIDEO_START_MARKER 0x20 #define SHORT_VIDEO_START_MARKER_LENGTH 22 #define SHORT_VIDEO_END_MARKER 0x3F #define FLV1_VIDEO_START_MARKER 0x10 #define FLV1_VIDEO_START_MARKER_LENGTH 21 #define GOB_RESYNC_MARKER 0x01 #define GOB_RESYNC_MARKER_LENGTH 17 /* motion and resync markers used in error resilient mode */ #define DC_MARKER 438273 #define DC_MARKER_LENGTH 19 #define MOTION_MARKER_COMB 126977 #define MOTION_MARKER_COMB_LENGTH 17 #define MOTION_MARKER_SEP 81921 #define MOTION_MARKER_SEP_LENGTH 17 #define RESYNC_MARKER 1 #define RESYNC_MARKER_LENGTH 17 #define SPRITE_NOT_USED 0 #define STATIC_SPRITE 1 #define ONLINE_SPRITE 2 #define GMC_SPRITE 3 /* macroblock and block size */ #define MB_SIZE 16 #define NCOEFF_MB (MB_SIZE*MB_SIZE) #define B_SIZE 8 #define NCOEFF_BLOCK (B_SIZE*B_SIZE) #define NCOEFF_Y NCOEFF_MB #define NCOEFF_U NCOEFF_BLOCK #define NCOEFF_V NCOEFF_BLOCK #define BLK_PER_MB 4 /* Number of blocks per MB */ /* VLC decoding related definitions */ #define VLC_ERROR (-1) #define VLC_ESCAPE 7167 /* macro utility */ #define ZERO_OUT_64BYTES(x) { *((uint32*)x) = *(((uint32*)(x))+1) = \ *(((uint32*)(x))+2) = *(((uint32*)(x))+3) = \ *(((uint32*)(x))+4) = *(((uint32*)(x))+5) = \ *(((uint32*)(x))+6) = *(((uint32*)(x))+7) = \ *(((uint32*)(x))+8) = *(((uint32*)(x))+9) = \ *(((uint32*)(x))+10) = *(((uint32*)(x))+11) = \ *(((uint32*)(x))+12) = *(((uint32*)(x))+13) = \ *(((uint32*)(x))+14) = *(((uint32*)(x))+15) = 0; } #endif /* _PVDECDEF_H_ */ ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/mp4lib_int.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef _MP4LIB_INT_H_ #define _MP4LIB_INT_H_ #include "mp4def.h" #include "mp4dec_api.h" // extra structure #undef ENABLE_LOG #define BITRATE_AVERAGE_WINDOW 4 #define FRAMERATE_SCALE ((BITRATE_AVERAGE_WINDOW-1)*10000L) #define FAST_IDCT /* , for fast Variable complexity IDCT */ //#define PV_DEC_EXTERNAL_IDCT /* for separate IDCT (i.e. no direct access to output frame) */ #define PV_ANNEX_IJKT_SUPPORT #define mid_gray 1024 typedef struct tagBitstream { /* function that reteive data from outside the library. 04/11/2000 */ /* In frame-based decoding mode, this shall be NULL. 08/29/2000 */ uint32 curr_word; uint32 next_word; uint8 *bitstreamBuffer; /* pointer to buffer memory */ int32 read_point; /* starting point in the buffer to be read to cache */ int incnt; /* bit left in cached */ int incnt_next; uint32 bitcnt; /* total bit read so-far (from inbfr)*/ int32 data_end_pos; /*should be added , 06/07/2000 */ int searched_frame_boundary; } BitstreamDecVideo, *LPBitstreamDecVideo; /* complexity estimation parameters */ typedef struct tagComplexity_Est { uint8 text_1; /* texture_complexity_estimation_set_1 */ uint8 text_2; /* texture_complexity_estimation_set_2 */ uint8 mc; /* motion_compensation_complexity */ } Complexity_Est; typedef struct tagVop { PIXEL *yChan; /* The Y component */ PIXEL *uChan; /* The U component */ PIXEL *vChan; /* The V component */ uint32 timeStamp; /* Vop TimeStamp in msec */ /* Actual syntax elements for VOP (standard) */ int predictionType; /* VOP prediction type */ uint timeInc; /* VOP time increment (relative to last mtb) */ int vopCoded; int roundingType; int intraDCVlcThr; int16 quantizer; /* VOP quantizer */ int fcodeForward; /* VOP dynamic range of motion vectors */ int fcodeBackward; /* VOP dynamic range of motion vectors */ int refSelectCode; /* enhancement layer reference select code */ /* H.263 parameters */ int gobNumber; int gobFrameID; int temporalRef; /* temporal reference, roll over at 256 */ int ETR; } Vop; typedef struct tagVol { int volID; /* VOL identifier (for tracking) */ uint timeIncrementResolution;/* VOL time increment */ int nbitsTimeIncRes; /* number of bits for time increment */ uint timeInc_offset; /* timeInc offset for multiple VOP in a packet */ uint32 moduloTimeBase; /* internal decoder clock */ int fixedVopRate; BitstreamDecVideo *bitstream; /* library bitstream buffer (input buffer) */ int complexity_estDisable; /* VOL disable complexity estimation */ int complexity_estMethod; /* VOL complexity estimation method */ Complexity_Est complexity; /* complexity estimation flags */ /* Error Resilience Flags */ int errorResDisable; /* VOL disable error resilence mode */ /* (Use Resynch markers) */ int useReverseVLC; /* VOL reversible VLCs */ int dataPartitioning; /* VOL data partitioning */ /* Bit depth */ uint bitsPerPixel; // int mid_gray; /* 2^(bits_per_pixel+2) */ /* Quantization related parameters */ int quantPrecision; /* Quantizer precision */ uint quantType; /* MPEG-4 or H.263 Quantization Type */ /* Added loaded quant mat, 05/22/2000 */ int loadIntraQuantMat; /* Load intra quantization matrix */ int loadNonIntraQuantMat; /* Load nonintra quantization matrix */ int iqmat[64]; /* Intra quant.matrix */ int niqmat[64]; /* Non-intra quant.matrix */ /* Parameters used for scalability */ int scalability; /* VOL scalability (flag) */ int scalType; /* temporal = 0, spatial = 1, both = 2 */ int refVolID; /* VOL id of reference VOL */ int refSampDir; /* VOL resol. of ref. VOL */ int horSamp_n; /* VOL hor. resampling of ref. VOL given by */ int horSamp_m; /* sampfac = hor_samp_n/hor_samp_m */ int verSamp_n; /* VOL ver. resampling of ref. VOL given by */ int verSamp_m; /* sampfac = ver_samp_n/ver_samp_m */ int enhancementType; /* VOL type of enhancement layer */ /* profile and level */ int32 profile_level_id; /* 8-bit profile and level */ // 6/17/04 } Vol; typedef int16 typeMBStore[6][NCOEFF_BLOCK]; typedef struct tagMacroBlock { typeMBStore block; /* blocks */ /* ACDC */ uint8 pred_block[384]; /* prediction block, Aug 3,2005 */ uint8 bitmapcol[6][8]; uint8 bitmaprow[6]; int no_coeff[6]; int DCScalarLum; /* Luminance DC Scalar */ int DCScalarChr; /* Chrominance DC Scalar */ #ifdef PV_ANNEX_IJKT_SUPPORT int direction; #endif } MacroBlock; typedef struct tagHeaderInfoDecVideo { uint8 *Mode; /* Modes INTRA/INTER/etc. */ uint8 *CBP; /* MCBPC/CBPY stuff */ } HeaderInfoDecVideo; /************************************************************/ /* VLC structures */ /************************************************************/ typedef struct tagTcoef { uint last; uint run; int level; uint sign; } Tcoef, *LPTcoef; typedef struct tagVLCtab { int32 val; int32 len; } VLCtab, *LPVLCtab; typedef struct tagVLCshorttab { int16 val; int16 len; } VLCshorttab, *LPVLCshorttab ; /* for space saving, Antoine Nguyen*/ typedef struct tagVLCtab2 { uint8 run; uint8 level; uint8 last; uint8 len; } VLCtab2, *LPVLCtab2; /* 10/24/2000 */ /* This type is designed for fast access of DC/AC */ /* prediction data. If the compiler is smart */ /* enough, it will use shifting for indexing. */ /* 04/14/2000. */ typedef int16 typeDCStore[6]; /* ACDC */ typedef int16 typeDCACStore[4][8]; /* Global structure that can be passed around */ typedef struct tagVideoDecData { BitstreamDecVideo *bitstream; /* library bitstream buffer (input buffer) */ /* Data For Layers (Scalability) */ Vol **vol; /* Data stored for each VOL */ /* Data used for reconstructing frames */ Vop *currVop; /* Current VOP (frame) */ Vop *prevVop; /* Previous VOP (frame) */ /* Data used to facilitate multiple layer decoding. 05/04/2000 */ Vop *prevEnhcVop; /* New change to rid of memcpy(). 04/24/2001 */ Vop **vopHeader; /* one for each layer. 08/29/2000 */ /* I/O structures */ MacroBlock *mblock; /* Macroblock data structure */ uint8 *acPredFlag; /* */ /* scratch memory used in data partitioned mode */ typeDCStore *predDC; /* The DC coeffs for each MB */ typeDCACStore *predDCAC_row; typeDCACStore *predDCAC_col; int usePrevQP; /* running QP decision switch */ uint8 *sliceNo; /* Slice indicator for each MB */ /* changed this to a 1D */ /* array for optimization */ MOT *motX; /* Motion vector in X direction */ MOT *motY; /* Motion vector in Y direction */ HeaderInfoDecVideo headerInfo; /* MB Header information */ int16 *QPMB; /* Quantizer value for each MB */ uint8 *pstprcTypCur; /* Postprocessing type for current frame */ uint8 *pstprcTypPrv; /* Postprocessing type for previous frame */ /* scratch memory used in all modes */ int mbnum; /* Macroblock number */ uint mbnum_row; int mbnum_col; /* I added these variables since they are used a lot. 04/13/2000 */ int nMBPerRow, nMBPerCol; /* number of MBs in each row & column */ int nTotalMB; /* for short video header */ int nMBinGOB; /* number of MBs in GOB, 05/22/00 */ int nGOBinVop; /* number of GOB in Vop 05/22/00 */ /* VOL Dimensions */ int width; /* Width */ int height; /* Height */ int displayWidth; /* Handle image whose size is not a multiple of 16. */ int displayHeight; /* This is the actual size. 08/09/2000 */ int32 size; /* Miscellaneous data points to be passed */ int frame_idx; /* Current frame ID */ int frameRate; /* Output frame Rate (over 10 seconds) */ int32 duration; uint32 currTimestamp; int currLayer; /* Current frame layer */ int shortVideoHeader; /* shortVideoHeader mode */ int intra_acdcPredDisable; /* VOL disable INTRA DC prediction */ int numberOfLayers; /* Number of Layers */ /* Frame to be used for concealment 07/07/2001 */ uint8 *concealFrame; int vop_coding_type; /* framerate and bitrate statistics counters. 08/23/2000 */ int32 nBitsPerVop[BITRATE_AVERAGE_WINDOW]; uint32 prevTimestamp[BITRATE_AVERAGE_WINDOW]; int nBitsForMBID; /* how many bits required for MB number? */ /* total data memory used by the docder library. 08/23/2000 */ int32 memoryUsage; /* flag to turn on/off error concealment or soft decoding */ int errorConcealment; /* Application controls */ VideoDecControls *videoDecControls; int postFilterType; /* Postfilter mode 04/25/00 */ PV_STATUS(*vlcDecCoeffIntra)(BitstreamDecVideo *stream, Tcoef *pTcoef/*, int intra_luma*/); PV_STATUS(*vlcDecCoeffInter)(BitstreamDecVideo *stream, Tcoef *pTcoef); int initialized; /* Annex IJKT */ int deblocking; int slice_structure; int modified_quant; int advanced_INTRA; int16 QP_CHR; /* ANNEX_T */ } VideoDecData; /* for fast VLC+Dequant 10/12/2000*/ typedef int (*VlcDequantBlockFuncP)(void *video, int comp, int switched, uint8 *bitmaprow, uint8 *bitmapcol); ////////////////////////////////////////////////////////////// // Decoder structures // ////////////////////////////////////////////////////////////// #endif /* _MP4LIB_INT_H_ */ ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/packet_util.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "mp4dec_lib.h" #include "vlc_decode.h" #include "bitstream.h" /***********************************************************CommentBegin****** * 04/13/2000 : initial modification to the new PV-Decoder * Lib format. * 04/16/2001 : Removed PV_END_OF_BUFFER case, error resilience ***********************************************************CommentEnd********/ PV_STATUS PV_ReadVideoPacketHeader(VideoDecData *video, int *next_MB) { PV_STATUS status; Vol *currVol = video->vol[video->currLayer]; Vop *currVop = video->currVop; BitstreamDecVideo *stream = video->bitstream; int fcode_forward; int resync_marker_length; int nbits = video->nBitsForMBID; uint32 tmpvar32; uint tmpvar16; int16 quantizer; int nTotalMB = video->nTotalMB; fcode_forward = currVop->fcodeForward; resync_marker_length = 17; if (currVop->predictionType != I_VOP) resync_marker_length = 16 + fcode_forward; status = PV_BitstreamShowBitsByteAlign(stream, resync_marker_length, &tmpvar32); /* if (status != PV_SUCCESS && status != PV_END_OF_BUFFER) return status; */ if (tmpvar32 == RESYNC_MARKER) { // DecNextStartCode(stream); PV_BitstreamByteAlign(stream); BitstreamReadBits32(stream, resync_marker_length); *next_MB = (int) BitstreamReadBits16(stream, nbits); // if (*next_MB <= video->mbnum) /* needs more investigation */ // *next_MB = video->mbnum+1; if (*next_MB >= nTotalMB) /* fix 04/05/01 */ { *next_MB = video->mbnum + 1; if (*next_MB >= nTotalMB) /* this check is needed */ *next_MB = nTotalMB - 1; } quantizer = (int16) BitstreamReadBits16(stream, currVol->quantPrecision); if (quantizer == 0) return PV_FAIL; /* 04/03/01 */ currVop->quantizer = quantizer; /* if we have HEC, read some redundant VOP header information */ /* this part needs improvement 04/05/01 */ if (BitstreamRead1Bits(stream)) { int time_base = -1; /* modulo_time_base (? bits) */ do { time_base++; tmpvar16 = BitstreamRead1Bits(stream); } while (tmpvar16 == 1); /* marker bit */ BitstreamRead1Bits(stream); /* vop_time_increment (1-15 bits) */ BitstreamReadBits16(stream, currVol->nbitsTimeIncRes); /* marker bit */ BitstreamRead1Bits(stream); /* vop_prediction_type (2 bits) */ BitstreamReadBits16(stream, 2); /* Added intra_dc_vlc_thr reading */ BitstreamReadBits16(stream, 3); /* fcodes */ if (currVop->predictionType != I_VOP) { fcode_forward = (int) BitstreamReadBits16(stream, 3); if (currVop->predictionType == B_VOP) { BitstreamReadBits16(stream, 3); } } } } else { PV_BitstreamByteAlign(stream); /* */ status = BitstreamCheckEndBuffer(stream); /* return end_of_VOP 03/30/01 */ if (status != PV_SUCCESS) { return status; } status = BitstreamShowBits32HC(stream, &tmpvar32); /* 07/07/01 */ /* -16 = 0xFFFFFFF0*/ if ((tmpvar32 & 0xFFFFFFF0) == VISUAL_OBJECT_SEQUENCE_START_CODE) /* start code mask 00 00 01 */ { /* we don't have to check for legl stuffing here. 05/08/2000 */ return PV_END_OF_VOP; } else { return PV_FAIL; } } return PV_SUCCESS; } /***********************************************************CommentBegin****** * 3/10/00 : initial modification to the * new PV-Decoder Lib format. * 04/17/01 : remove PV_END_OF_BUFFER, error checking ***********************************************************CommentEnd********/ PV_STATUS PV_GobHeader(VideoDecData *video) { uint32 tmpvar; Vop *currVop = video->currVop; BitstreamDecVideo *stream = video->bitstream; int quantPrecision = 5; int16 quantizer; BitstreamShowBits32(stream, GOB_RESYNC_MARKER_LENGTH, &tmpvar); if (tmpvar != GOB_RESYNC_MARKER) { PV_BitstreamShowBitsByteAlign(stream, GOB_RESYNC_MARKER_LENGTH, &tmpvar); if (tmpvar != GOB_RESYNC_MARKER) { return PV_FAIL; } else PV_BitstreamByteAlign(stream); /* if bytealigned GOBHEADER search is performed */ /* then no more noforcestuffing */ } /* we've got a GOB header info here */ BitstreamShowBits32(stream, GOB_RESYNC_MARKER_LENGTH + 5, &tmpvar); tmpvar &= 0x1F; if (tmpvar == 0) { return PV_END_OF_VOP; } if (tmpvar == 31) { PV_BitstreamFlushBits(stream, GOB_RESYNC_MARKER_LENGTH + 5); BitstreamByteAlignNoForceStuffing(stream); return PV_END_OF_VOP; } PV_BitstreamFlushBits(stream, GOB_RESYNC_MARKER_LENGTH + 5); currVop->gobNumber = (int) tmpvar; if (currVop->gobNumber >= video->nGOBinVop) return PV_FAIL; currVop->gobFrameID = (int) BitstreamReadBits16(stream, 2); quantizer = (int16) BitstreamReadBits16(stream, quantPrecision); if (quantizer == 0) return PV_FAIL; /* 04/03/01 */ currVop->quantizer = quantizer; return PV_SUCCESS; } #ifdef PV_ANNEX_IJKT_SUPPORT PV_STATUS PV_H263SliceHeader(VideoDecData *video, int *next_MB) { PV_STATUS status; uint32 tmpvar; Vop *currVop = video->currVop; BitstreamDecVideo *stream = video->bitstream; int nTotalMB = video->nTotalMB; int16 quantizer; PV_BitstreamShowBitsByteAlignNoForceStuffing(stream, 17, &tmpvar); if (tmpvar == RESYNC_MARKER) { BitstreamByteAlignNoForceStuffing(stream); PV_BitstreamFlushBits(stream, 17); if (!BitstreamRead1Bits(stream)) { return PV_FAIL; } *next_MB = BitstreamReadBits16(stream, video->nBitsForMBID); if (*next_MB >= nTotalMB) /* fix 04/05/01 */ { *next_MB = video->mbnum + 1; if (*next_MB >= nTotalMB) /* this check is needed */ *next_MB = nTotalMB - 1; } /* we will not parse sebp2 for large pictures 3GPP */ quantizer = (int16) BitstreamReadBits16(stream, 5); if (quantizer == 0) return PV_FAIL; currVop->quantizer = quantizer; if (!BitstreamRead1Bits(stream)) { return PV_FAIL; } currVop->gobFrameID = (int) BitstreamReadBits16(stream, 2); } else { status = BitstreamCheckEndBuffer(stream); /* return end_of_VOP 03/30/01 */ if (status != PV_SUCCESS) { return status; } PV_BitstreamShowBitsByteAlign(stream, SHORT_VIDEO_START_MARKER_LENGTH, &tmpvar); if (tmpvar == SHORT_VIDEO_START_MARKER) { /* we don't have to check for legal stuffing here. 05/08/2000 */ return PV_END_OF_VOP; } else { return PV_FAIL; } } return PV_SUCCESS; } #endif ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/post_filter.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "mp4dec_lib.h" #ifdef PV_ANNEX_IJKT_SUPPORT #include "motion_comp.h" #include "mbtype_mode.h" const static int STRENGTH_tab[] = {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11, 11, 12, 12, 12}; #endif #ifdef PV_POSTPROC_ON /*---------------------------------------------------------------------------- ; FUNCTION CODE ----------------------------------------------------------------------------*/ void PostFilter( VideoDecData *video, int filter_type, uint8 *output) { /*---------------------------------------------------------------------------- ; Define all local variables ----------------------------------------------------------------------------*/ uint8 *pp_mod; int16 *QP_store; int combined_with_deblock_filter; int nTotalMB = video->nTotalMB; int width, height; int32 size; int softDeblocking; uint8 *decodedFrame = video->videoDecControls->outputFrame; /*---------------------------------------------------------------------------- ; Function body here ----------------------------------------------------------------------------*/ width = video->width; height = video->height; size = (int32)width * height; oscl_memcpy(output, decodedFrame, size); oscl_memcpy(output + size, decodedFrame + size, (size >> 2)); oscl_memcpy(output + size + (size >> 2), decodedFrame + size + (size >> 2), (size >> 2)); if (filter_type == 0) return; /* The softDecoding cutoff corresponds to ~93000 bps for QCIF 15fps clip */ if (PVGetDecBitrate(video->videoDecControls) > (100*video->frameRate*(size >> 12))) // MC_sofDeblock softDeblocking = FALSE; else softDeblocking = TRUE; combined_with_deblock_filter = filter_type & PV_DEBLOCK; QP_store = video->QPMB; /* Luma */ pp_mod = video->pstprcTypCur; if ((filter_type & PV_DEBLOCK) && (filter_type & PV_DERING)) { CombinedHorzVertRingFilter(output, width, height, QP_store, 0, pp_mod); } else { if (filter_type & PV_DEBLOCK) { if (softDeblocking) { CombinedHorzVertFilter(output, width, height, QP_store, 0, pp_mod); } else { CombinedHorzVertFilter_NoSoftDeblocking(output, width, height, QP_store, 0, pp_mod); } } if (filter_type & PV_DERING) { Deringing_Luma(output, width, height, QP_store, combined_with_deblock_filter, pp_mod); } } /* Chroma */ pp_mod += (nTotalMB << 2); output += size; if ((filter_type & PV_DEBLOCK) && (filter_type & PV_DERING)) { CombinedHorzVertRingFilter(output, (int)(width >> 1), (int)(height >> 1), QP_store, (int) 1, pp_mod); } else { if (filter_type & PV_DEBLOCK) { if (softDeblocking) { CombinedHorzVertFilter(output, (int)(width >> 1), (int)(height >> 1), QP_store, (int) 1, pp_mod); } else { CombinedHorzVertFilter_NoSoftDeblocking(output, (int)(width >> 1), (int)(height >> 1), QP_store, (int) 1, pp_mod); } } if (filter_type & PV_DERING) { Deringing_Chroma(output, (int)(width >> 1), (int)(height >> 1), QP_store, combined_with_deblock_filter, pp_mod); } } pp_mod += nTotalMB; output += (size >> 2); if ((filter_type & PV_DEBLOCK) && (filter_type & PV_DERING)) { CombinedHorzVertRingFilter(output, (int)(width >> 1), (int)(height >> 1), QP_store, (int) 1, pp_mod); } else { if (filter_type & PV_DEBLOCK) { if (softDeblocking) { CombinedHorzVertFilter(output, (int)(width >> 1), (int)(height >> 1), QP_store, (int) 1, pp_mod); } else { CombinedHorzVertFilter_NoSoftDeblocking(output, (int)(width >> 1), (int)(height >> 1), QP_store, (int) 1, pp_mod); } } if (filter_type & PV_DERING) { Deringing_Chroma(output, (int)(width >> 1), (int)(height >> 1), QP_store, combined_with_deblock_filter, pp_mod); } } /* swap current pp_mod to prev_frame pp_mod */ pp_mod = video->pstprcTypCur; video->pstprcTypCur = video->pstprcTypPrv; video->pstprcTypPrv = pp_mod; /*---------------------------------------------------------------------------- ; Return nothing or data or data pointer ----------------------------------------------------------------------------*/ return; } #endif #ifdef PV_ANNEX_IJKT_SUPPORT void H263_Deblock(uint8 *rec, int width, int height, int16 *QP_store, uint8 *mode, int chr, int annex_T) { /*---------------------------------------------------------------------------- ; Define all local variables ----------------------------------------------------------------------------*/ int i, j, k; uint8 *rec_y; int tmpvar; int mbnum, strength, A_D, d1_2, d1, d2, A, B, C, D, b_size; int d, offset, nMBPerRow, nMBPerCol, width2 = (width << 1); /* MAKE SURE I-VOP INTRA MACROBLOCKS ARE SET TO NON-SKIPPED MODE*/ mbnum = 0; if (chr) { nMBPerRow = width >> 3; nMBPerCol = height >> 3; b_size = 8; } else { nMBPerRow = width >> 4; nMBPerCol = height >> 4; b_size = 16; } /********************************* VERTICAL FILTERING ****************************/ /* vertical filtering of mid sections no need to check neighboring QP's etc */ if (!chr) { rec_y = rec + (width << 3); for (i = 0; i < (height >> 4); i++) { for (j = 0; j < (width >> 4); j++) { if (mode[mbnum] != MODE_SKIPPED) { k = 16; strength = STRENGTH_tab[QP_store[mbnum]]; while (k--) { A = *(rec_y - width2); D = *(rec_y + width); A_D = A - D; C = *rec_y; B = *(rec_y - width); d = (((C - B) << 2) + A_D); if (d < 0) { d1 = -(-d >> 3); if (d1 < -(strength << 1)) { d1 = 0; } else if (d1 < -strength) { d1 = -d1 - (strength << 1); } d1_2 = -d1 >> 1; } else { d1 = d >> 3; if (d1 > (strength << 1)) { d1 = 0; } else if (d1 > strength) { d1 = (strength << 1) - d1; } d1_2 = d1 >> 1; } if (A_D < 0) { d2 = -(-A_D >> 2); if (d2 < -d1_2) { d2 = -d1_2; } } else { d2 = A_D >> 2; if (d2 > d1_2) { d2 = d1_2; } } *(rec_y - width2) = A - d2; tmpvar = B + d1; CLIP_RESULT(tmpvar) *(rec_y - width) = tmpvar; tmpvar = C - d1; CLIP_RESULT(tmpvar) *rec_y = tmpvar; *(rec_y + width) = D + d2; rec_y++; } } else { rec_y += b_size; } mbnum++; } rec_y += (15 * width); } } /* VERTICAL boundary blocks */ rec_y = rec + width * b_size; mbnum = nMBPerRow; for (i = 0; i < nMBPerCol - 1; i++) { for (j = 0; j < nMBPerRow; j++) { if (mode[mbnum] != MODE_SKIPPED || mode[mbnum - nMBPerRow] != MODE_SKIPPED) { k = b_size; if (mode[mbnum] != MODE_SKIPPED) { strength = STRENGTH_tab[(annex_T ? MQ_chroma_QP_table[QP_store[mbnum]] : QP_store[mbnum])]; } else { strength = STRENGTH_tab[(annex_T ? MQ_chroma_QP_table[QP_store[mbnum - nMBPerRow]] : QP_store[mbnum - nMBPerRow])]; } while (k--) { A = *(rec_y - width2); D = *(rec_y + width); A_D = A - D; C = *rec_y; B = *(rec_y - width); d = (((C - B) << 2) + A_D); if (d < 0) { d1 = -(-d >> 3); if (d1 < -(strength << 1)) { d1 = 0; } else if (d1 < -strength) { d1 = -d1 - (strength << 1); } d1_2 = -d1 >> 1; } else { d1 = d >> 3; if (d1 > (strength << 1)) { d1 = 0; } else if (d1 > strength) { d1 = (strength << 1) - d1; } d1_2 = d1 >> 1; } if (A_D < 0) { d2 = -(-A_D >> 2); if (d2 < -d1_2) { d2 = -d1_2; } } else { d2 = A_D >> 2; if (d2 > d1_2) { d2 = d1_2; } } *(rec_y - width2) = A - d2; tmpvar = B + d1; CLIP_RESULT(tmpvar) *(rec_y - width) = tmpvar; tmpvar = C - d1; CLIP_RESULT(tmpvar) *rec_y = tmpvar; *(rec_y + width) = D + d2; rec_y++; } } else { rec_y += b_size; } mbnum++; } rec_y += ((b_size - 1) * width); } /***************************HORIZONTAL FILTERING ********************************************/ mbnum = 0; /* HORIZONTAL INNER */ if (!chr) { rec_y = rec + 8; offset = width * b_size - b_size; for (i = 0; i < nMBPerCol; i++) { for (j = 0; j < nMBPerRow; j++) { if (mode[mbnum] != MODE_SKIPPED) { k = 16; strength = STRENGTH_tab[QP_store[mbnum]]; while (k--) { A = *(rec_y - 2); D = *(rec_y + 1); A_D = A - D; C = *rec_y; B = *(rec_y - 1); d = (((C - B) << 2) + A_D); if (d < 0) { d1 = -(-d >> 3); if (d1 < -(strength << 1)) { d1 = 0; } else if (d1 < -strength) { d1 = -d1 - (strength << 1); } d1_2 = -d1 >> 1; } else { d1 = d >> 3; if (d1 > (strength << 1)) { d1 = 0; } else if (d1 > strength) { d1 = (strength << 1) - d1; } d1_2 = d1 >> 1; } if (A_D < 0) { d2 = -(-A_D >> 2); if (d2 < -d1_2) { d2 = -d1_2; } } else { d2 = A_D >> 2; if (d2 > d1_2) { d2 = d1_2; } } *(rec_y - 2) = A - d2; tmpvar = B + d1; CLIP_RESULT(tmpvar) *(rec_y - 1) = tmpvar; tmpvar = C - d1; CLIP_RESULT(tmpvar) *rec_y = tmpvar; *(rec_y + 1) = D + d2; rec_y += width; } rec_y -= offset; } else { rec_y += b_size; } mbnum++; } rec_y += (15 * width); } } /* HORIZONTAL EDGE */ rec_y = rec + b_size; offset = width * b_size - b_size; mbnum = 1; for (i = 0; i < nMBPerCol; i++) { for (j = 0; j < nMBPerRow - 1; j++) { if (mode[mbnum] != MODE_SKIPPED || mode[mbnum-1] != MODE_SKIPPED) { k = b_size; if (mode[mbnum] != MODE_SKIPPED) { strength = STRENGTH_tab[(annex_T ? MQ_chroma_QP_table[QP_store[mbnum]] : QP_store[mbnum])]; } else { strength = STRENGTH_tab[(annex_T ? MQ_chroma_QP_table[QP_store[mbnum - 1]] : QP_store[mbnum - 1])]; } while (k--) { A = *(rec_y - 2); D = *(rec_y + 1); A_D = A - D; C = *rec_y; B = *(rec_y - 1); d = (((C - B) << 2) + A_D); if (d < 0) { d1 = -(-d >> 3); if (d1 < -(strength << 1)) { d1 = 0; } else if (d1 < -strength) { d1 = -d1 - (strength << 1); } d1_2 = -d1 >> 1; } else { d1 = d >> 3; if (d1 > (strength << 1)) { d1 = 0; } else if (d1 > strength) { d1 = (strength << 1) - d1; } d1_2 = d1 >> 1; } if (A_D < 0) { d2 = -(-A_D >> 2); if (d2 < -d1_2) { d2 = -d1_2; } } else { d2 = A_D >> 2; if (d2 > d1_2) { d2 = d1_2; } } *(rec_y - 2) = A - d2; tmpvar = B + d1; CLIP_RESULT(tmpvar) *(rec_y - 1) = tmpvar; tmpvar = C - d1; CLIP_RESULT(tmpvar) *rec_y = tmpvar; *(rec_y + 1) = D + d2; rec_y += width; } rec_y -= offset; } else { rec_y += b_size; } mbnum++; } rec_y += ((width * (b_size - 1)) + b_size); mbnum++; } return; } #endif ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/post_proc.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef post_proc_H #define post_proc_H /*---------------------------------------------------------------------------- ; INCLUDES ----------------------------------------------------------------------------*/ #include "mp4dec_lib.h" /*---------------------------------------------------------------------------- ; MACROS ; Define module specific macros here ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; DEFINES ; Include all pre-processor statements here. ----------------------------------------------------------------------------*/ #define UPDATE_PV_MAXPV_MIN(p,max,min) if ((p) > max) max=(p); else if ((p) < min) min = (p); #define INDEX(x,thr) (((x)>=thr)?1:0) #define BLKSIZE 8 #define MBSIZE 16 #define DERING_THR 16 /* version for fast Deblock filtering*/ #define KTh 4 /*threshold for soft filtering*/ #define KThH 4 /*threshold for hard filtering */ #define NoMMX /*---------------------------------------------------------------------------- ; EXTERNAL VARIABLES REFERENCES ; Declare variables used in this module but defined elsewhere ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; SIMPLE TYPEDEF'S ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; ENUMERATED TYPEDEF'S ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; STRUCTURES TYPEDEF'S ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; GLOBAL FUNCTION DEFINITIONS ; Function Prototype declaration ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; END ----------------------------------------------------------------------------*/ #endif ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/post_proc_semaphore.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /* ------------------------------------------------------------------------------ INPUT AND OUTPUT DEFINITIONS Inputs: q_block = pointer to buffer of inverse quantized DCT coefficients of type int for intra-VOP mode or buffer of residual data of type int for inter-VOP mode Local Stores/Buffers/Pointers Needed: None Global Stores/Buffers/Pointers Needed: None Outputs: postmode = post processing semaphore with the vertical deblocking, horizontal deblocking, and deringing bits set up accordingly Pointers and Buffers Modified: None Local Stores Modified: None Global Stores Modified: None ------------------------------------------------------------------------------ FUNCTION DESCRIPTION This function sets up the postmode semaphore based on the contents of the buffer pointed to by q_block. The function starts out with the assumption that all entries of q_block, except for the first entry (q_block[0]), are zero. This case can induce horizontal and vertical blocking artifacts, therefore, both horizontal and vertical deblocking bits are enabled. The following conditions are tested when setting up the horizontal/vertical deblocking and deringing bits: 1. When only the elements of the top row of the B_SIZE x B_SIZE block (q_block[n], n = 0,..., B_SIZE-1) are non-zero, vertical blocking artifacts may result, therefore, only the vertical deblocking bit is enabled. Otherwise, the vertical deblocking bit is disabled. 2. When only the elements of the far left column of the B_SIZE x B_SIZE block (q_block[n*B_SIZE], n = 0, ..., B_SIZE-1) are non-zero, horizontal blocking artifacts may result, therefore, only the horizontal deblocking bit is enabled. Otherwise, the horizontal deblocking bit is disabled. 3. If any non-zero elements exist in positions other than q_block[0], q_block[1], or q_block[B_SIZE], the deringing bit is enabled. Otherwise, it is disabled. The 3 least significant bits of postmode defines vertical or horizontal deblocking and deringing. The valid values are shown below: ------------------------------------------------------- | Type | Enabled | Disabled | ------------------------------------------------------- | Vertical Deblocking (Bit #0) | 1 | 0 | ------------------------------------------------------- | Horizontal Deblocking (Bit #1) | 1 | 0 | ------------------------------------------------------- | Deringing (Bit #2) | 1 | 0 | ------------------------------------------------------- */ /*---------------------------------------------------------------------------- ; INCLUDES ----------------------------------------------------------------------------*/ #include "mp4dec_lib.h" #include "mp4def.h" #include "post_proc.h" /*---------------------------------------------------------------------------- ; MACROS ; Define module specific macros here ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; DEFINES ; Include all pre-processor statements here. Include conditional ; compile variables also. ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; LOCAL FUNCTION DEFINITIONS ; Function Prototype declaration ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; LOCAL STORE/BUFFER/POINTER DEFINITIONS ; Variable declaration - defined here and used outside this module ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; EXTERNAL FUNCTION REFERENCES ; Declare functions defined elsewhere and referenced in this module ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; EXTERNAL GLOBAL STORE/BUFFER/POINTER REFERENCES ; Declare variables used in this module but defined elsewhere ----------------------------------------------------------------------------*/ #ifdef PV_POSTPROC_ON /*---------------------------------------------------------------------------- ; FUNCTION CODE ----------------------------------------------------------------------------*/ int PostProcSemaphore( int16 *q_block) { /*---------------------------------------------------------------------------- ; Define all local variables ----------------------------------------------------------------------------*/ int i, j; /* Set default value to vertical and horizontal deblocking enabled */ /* Initial assumption is that only q_block[0] element is non-zero, */ /* therefore, vertical and horizontal deblocking bits are set to 1 */ int postmode = 0x3; /*---------------------------------------------------------------------------- ; Function body here ----------------------------------------------------------------------------*/ /* Vertical deblocking bit is enabled when only the entire top row of */ /* the B_SIZE x B_SIZE block, i.e., q_block[n], n = 0,..., B_SIZE-1, */ /* are non-zero. Since initial assumption is that all elements, except */ /* q_block[0], is zero, we need to check the remaining elements in the */ /* top row to determine if all or some are non-zero. */ if (q_block[1] != 0) { /* At this point, q_block[0] and q_block[1] are non-zero, while */ /* q_block[n], n = 2,..., B_SIZE-1, are zero. Therefore, we */ /* need to disable vertical deblocking */ postmode &= 0xE; } for (i = 2; i < B_SIZE; i++) { if (q_block[i]) { /* Check if q_block[n], n = 2,..., B_SIZE-1, are non-zero.*/ /* If any of them turn out to be non-zero, we need to */ /* disable vertical deblocking. */ postmode &= 0xE; /* Deringing is enabled if any nonzero elements exist in */ /* positions other than q_block[0], q_block[1] or */ /* q_block[B_SIZE]. */ postmode |= 0x4; break; } } /* Horizontal deblocking bit is enabled when only the entire far */ /* left column, i.e., q_block[n*B_SIZE], n = 0, ..., B_SIZE-1, */ /* are non-zero. Since initial assumption is that all elements, */ /* except q_block[0], is zero, we need to check the remaining */ /* elements in the far left column to determine if all or some */ /* are non-zero. */ if (q_block[B_SIZE]) { /* At this point, only q_block[0] and q_block[B_SIZE] are non-zero, */ /* while q_block[n*B_SIZE], n = 2, 3,..., B_SIZE-1, are zero. */ /* Therefore, we need to disable horizontal deblocking. */ postmode &= 0xD; } for (i = 16; i < NCOEFF_BLOCK; i += B_SIZE) { if (q_block[i]) { /* Check if q_block[n], n = 2*B_SIZE,...,(B_SIZE-1)*B_SIZE, */ /* are non-zero. If any of them turn out to be non-zero, */ /* we need to disable horizontal deblocking. */ postmode &= 0xD; /* Deringing is enabled if any nonzero elements exist in */ /* positions other than q_block[0], q_block[1] or */ /* q_block[B_SIZE]. */ postmode |= 0x4; break; } } /* At this point, only the first row and far left column elements */ /* have been tested. If deringing bit is still not set at this */ /* point, check the rest of q_block to determine if the elements */ /* are non-zero. If all elements, besides q_block[0], q_block[1], */ /* or q_block[B_SIZE] are non-zero, deringing bit must be set */ if ((postmode & 0x4) == 0) { for (i = 1; i < B_SIZE; i++) { for (j = 1; j < B_SIZE; j++) { if (q_block[(i<<3)+j]) { /* At this point, q_block[0] and another q_block */ /* element are non-zero, therefore, we need to */ /* disable vertical and horizontal deblocking */ postmode &= 0xC; /* Deringing is enabled if any nonzero elements exist in */ /* positions other than q_block[0], q_block[1] or */ /* q_block[B_SIZE]. */ postmode |= 0x4; /* Set outer FOR loop count to B_SIZE to get out of */ /* outer FOR loop */ i = B_SIZE; /* Get out of inner FOR loop */ break; } } } } /*---------------------------------------------------------------------------- ; Return nothing or data or data pointer ----------------------------------------------------------------------------*/ return (postmode); } #endif ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/pp_semaphore_chroma_inter.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /* ------------------------------------------------------------------------------ INPUT AND OUTPUT DEFINITIONS Inputs: xpred = x-axis coordinate of the block used for prediction (int) ypred = y-axis coordinate of the block used for prediction (int) pp_dec_u = pointer to the post processing semaphore for chrominance (uint8) pstprcTypPrv = pointer the previous frame's post processing type (uint8) dx = horizontal component of the motion vector (int) dy = vertical component of the motion vector (int) mvwidth = number of blocks per row in the luminance VOP (int) height = luminance VOP height in pixels (int) size = total number of pixel in the current luminance VOP (int) mv_loc = flag indicating location of the motion compensated (x,y) position with respect to the luminance MB (int); 0 -> inside MB, 1 -> outside MB msk_deblock = flag indicating whether to perform deblocking (msk_deblock = 0) or not (msk_deblock = 1) (uint8) Local Stores/Buffers/Pointers Needed: None Global Stores/Buffers/Pointers Needed: None Outputs: None Pointers and Buffers Modified: pp_dec_u contents are the updated semaphore propagation data Local Stores Modified: None Global Stores Modified: None ------------------------------------------------------------------------------ FUNCTION DESCRIPTION This functions performs post processing semaphore propagation processing after chrominance prediction in interframe processing mode. */ /*---------------------------------------------------------------------------- ; INCLUDES ----------------------------------------------------------------------------*/ #include "mp4dec_api.h" #include "mp4def.h" /*---------------------------------------------------------------------------- ; MACROS ; Define module specific macros here ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; DEFINES ; Include all pre-processor statements here. Include conditional ; compile variables also. ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; LOCAL FUNCTION DEFINITIONS ; Function Prototype declaration ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; LOCAL STORE/BUFFER/POINTER DEFINITIONS ; Variable declaration - defined here and used outside this module ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; EXTERNAL FUNCTION REFERENCES ; Declare functions defined elsewhere and referenced in this module ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; EXTERNAL GLOBAL STORE/BUFFER/POINTER REFERENCES ; Declare variables used in this module but defined elsewhere ----------------------------------------------------------------------------*/ #ifdef PV_POSTPROC_ON #ifdef __cplusplus extern "C" { #endif /*---------------------------------------------------------------------------- ; FUNCTION CODE ----------------------------------------------------------------------------*/ void pp_semaphore_chroma_inter( int xpred, /* i */ int ypred, /* i */ uint8 *pp_dec_u, /* i/o */ uint8 *pstprcTypPrv, /* i */ int dx, /* i */ int dy, /* i */ int mvwidth, /* i */ int height, /* i */ int32 size, /* i */ int mv_loc, /* i */ uint8 msk_deblock /* i */ ) { /*---------------------------------------------------------------------------- ; Define all local variables ----------------------------------------------------------------------------*/ int mmvy, mmvx, nmvy, nmvx; uint8 *pp_prev1, *pp_prev2, *pp_prev3, *pp_prev4; /*---------------------------------------------------------------------------- ; Function body here ----------------------------------------------------------------------------*/ /* 09/28/2000, modify semaphore propagation to */ /* accommodate smart indexing */ mmvx = xpred >> 4; /* block x coor */ nmvx = mmvx; mmvy = ypred >> 4; /* block y coor */ nmvy = mmvy; /* Check if MV is outside the frame */ if (mv_loc == 1) { /* Perform boundary check */ if (nmvx < 0) { nmvx = 0; } else if (nmvx > mvwidth - 1) { nmvx = mvwidth - 1; } if (nmvy < 0) { nmvy = 0; } else if (nmvy > (height >> 4) - 1) { nmvy = (height >> 4) - 1; } } /* Calculate pointer to first chrominance b semaphores in */ /* pstprcTypPrv, i.e., first chrominance b semaphore is in */ /* (pstprcTypPrv + (size>>6)). */ /* Since total number of chrominance blocks per row in a VOP */ /* is half of the total number of luminance blocks per row in a */ /* VOP, we use (mvwidth >> 1) when calculating the row offset. */ pp_prev1 = pstprcTypPrv + (size >> 6) + nmvx + nmvy * (mvwidth >> 1) ; /* Check if MV is a multiple of 16 */ /* 1/5/01, make sure it doesn't go out of bound */ if (((dy&0xF) != 0) && (mmvy + 1 < (height >> 4) - 1)) { /* dy is not a multiple of 16 */ /* pp_prev3 is the block below pp_prev1 block */ pp_prev3 = pp_prev1 + (mvwidth >> 1); } else { /* dy is a multiple of 16 */ pp_prev3 = pp_prev1; } /* 1/5/01, make sure it doesn't go out of bound */ if (((dx&0xF) != 0) && (mmvx + 1 < (mvwidth >> 1) - 1)) { /* dx is not a multiple of 16 */ /* pp_prev2 is the block to the right of pp_prev1 block */ pp_prev2 = pp_prev1 + 1; /* pp_prev4 is the block to the right of the block */ /* below pp_prev1 block */ pp_prev4 = pp_prev3 + 1; } else { /* dx is a multiple of 16 */ pp_prev2 = pp_prev1; pp_prev4 = pp_prev3; } /* Advance offset to location of first Chrominance R semaphore in */ /* pstprcTypPrv. Since the number of pixels in a Chrominance VOP */ /* is (number of pixels in Luminance VOP/4), and there are 64 */ /* pixels in an 8x8 Chrominance block, the offset can be */ /* calculated as: */ /* mv_loc = (number of pixels in Luminance VOP/(4*64)) */ /* = size/256 = size>>8 */ mv_loc = (size >> 8); /* 11/3/00, change the propagation for deblocking */ if (msk_deblock == 0) { /* Deblocking semaphore propagation for Chrominance */ /* b semaphores */ *(pp_dec_u) = 0; /* Advance offset to point to Chrominance r semaphores */ pp_dec_u += mv_loc; /* Deblocking semaphore propagation for Chrominance */ /* r semaphores */ *(pp_dec_u) = 0; } else { /* Deringing semaphore propagation for Chrominance B block */ if ((*(pp_dec_u)&4) == 0) { *(pp_dec_u) |= ((*(pp_prev1) | *(pp_prev2) | *(pp_prev3) | *(pp_prev4)) & 0x4); } /* Advance offset to point to Chrominance r semaphores */ pp_dec_u += mv_loc; pp_prev1 += mv_loc; pp_prev2 += mv_loc; pp_prev3 += mv_loc; pp_prev4 += mv_loc; /* Deringing semaphore propagation for Chrominance R */ if ((*(pp_dec_u)&4) == 0) { *(pp_dec_u) |= ((*(pp_prev1) | *(pp_prev2) | *(pp_prev3) | *(pp_prev4)) & 0x4); } } /*---------------------------------------------------------------------------- ; Return nothing or data or data pointer ----------------------------------------------------------------------------*/ return; } #ifdef __cplusplus } #endif #endif ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/pp_semaphore_luma.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /* ------------------------------------------------------------------------------ INPUT AND OUTPUT DEFINITIONS Inputs: xpred = x-axis coordinate of the MB used for prediction (int) ypred = y-axis coordinate of the MB used for prediction (int) pp_dec_y = pointer to the post processing semaphore for current luminance frame (uint8) pstprcTypPrv = pointer the previous frame's post processing type (uint8) ll = pointer to the buffer (int) mv_loc = flag indicating location of the motion compensated (x,y) position with respect to the luminance MB (int); 0 -> inside MB, 1 -> outside MB dx = horizontal component of the motion vector (int) dy = vertical component of the motion vector (int) mvwidth = number of blocks per row (int) width = luminance VOP width in pixels (int) height = luminance VOP height in pixels (int) Local Stores/Buffers/Pointers Needed: None Global Stores/Buffers/Pointers Needed: None Outputs: msk_deblock = flag that indicates whether deblocking is to be performed (msk_deblock = 0) or not (msk_deblock = 1) (uint8) Pointers and Buffers Modified: pp_dec_y contents are the updated semapohore propagation data Local Stores Modified: None Global Stores Modified: None ------------------------------------------------------------------------------ FUNCTION DESCRIPTION This functions performs post processing semaphore propagation processing after luminance prediction. */ /*---------------------------------------------------------------------------- ; INCLUDES ----------------------------------------------------------------------------*/ #include "mp4dec_api.h" #include "mp4def.h" /*---------------------------------------------------------------------------- ; MACROS ; Define module specific macros here ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; DEFINES ; Include all pre-processor statements here. Include conditional ; compile variables also. ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; LOCAL FUNCTION DEFINITIONS ; Function Prototype declaration ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; LOCAL STORE/BUFFER/POINTER DEFINITIONS ; Variable declaration - defined here and used outside this module ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; EXTERNAL FUNCTION REFERENCES ; Declare functions defined elsewhere and referenced in this module ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; EXTERNAL GLOBAL STORE/BUFFER/POINTER REFERENCES ; Declare variables used in this module but defined elsewhere ----------------------------------------------------------------------------*/ #ifdef PV_POSTPROC_ON #ifdef __cplusplus extern "C" { #endif /*---------------------------------------------------------------------------- ; FUNCTION CODE ----------------------------------------------------------------------------*/ uint8 pp_semaphore_luma( int xpred, /* i */ int ypred, /* i */ uint8 *pp_dec_y, /* i/o */ uint8 *pstprcTypPrv, /* i */ int *ll, /* i */ int *mv_loc, /* i/o */ int dx, /* i */ int dy, /* i */ int mvwidth, /* i */ int width, /* i */ int height /* i */ ) { /*---------------------------------------------------------------------------- ; Define all local variables ----------------------------------------------------------------------------*/ int kk, mmvy, mmvx, nmvx, nmvy; uint8 *pp_prev1, *pp_prev2, *pp_prev3, *pp_prev4; uint8 msk_deblock = 0; /* 11/3/00 */ /*---------------------------------------------------------------------------- ; Function body here ----------------------------------------------------------------------------*/ /* Interframe Processing - 1 MV per MB */ /* check whether the MV points outside the frame */ if (xpred >= 0 && xpred <= ((width << 1) - (2*MB_SIZE)) && ypred >= 0 && ypred <= ((height << 1) - (2*MB_SIZE))) { /*****************************/ /* (x,y) is inside the frame */ /*****************************/ /* 10/24/2000 post_processing semaphore */ /* generation */ /* 10/23/2000 no boundary checking*/ *mv_loc = 0; /* Calculate block x coordinate. Divide by 16 is for */ /* converting half-pixel resolution to block */ mmvx = xpred >> 4; /* Calculate block y coordinate. Divide by 16 is for */ /* converting half-pixel resolution to block */ mmvy = ypred >> 4; /* Find post processing semaphore location for block */ /* used for prediction, i.e., */ /* pp_prev1 = &pstprcTypPrv[mmvy*mvwidth][mmvx] */ pp_prev1 = pstprcTypPrv + mmvx + mmvy * mvwidth; /* Check if MV is a multiple of 16 */ if ((dx&0xF) != 0) { /* dx is not a multiple of 16 */ /* pp_prev2 is the block to the right of */ /* pp_prev1 block */ pp_prev2 = pp_prev1 + 1; if ((dy&0xF) != 0) { /* dy is not a multiple of 16 */ /* pp_prev3 is the block below */ /* pp_prev1 block */ pp_prev3 = pp_prev1 + mvwidth; } else { /* dy is a multiple of 16 */ pp_prev3 = pp_prev1; } /* pp_prev4 is the block to the right of */ /* pp_prev3 block. */ pp_prev4 = pp_prev3 + 1; } else { /* dx is a multiple of 16 */ pp_prev2 = pp_prev1; if ((dy&0xF) != 0) { /* dy is not a multiple of 16 */ /* pp_prev3 is the block below */ /* pp_prev1 block. */ pp_prev3 = pp_prev1 + mvwidth; } else { /* dy is a multiple of 16 */ pp_prev3 = pp_prev1; msk_deblock = 0x3; } pp_prev4 = pp_prev3; } /* Perform post processing semaphore propagation for each */ /* of the 4 blocks in a MB. */ for (kk = 0; kk < 4; kk++) { /* Deringing semaphore propagation */ if ((*(pp_dec_y) & 4) == 0) { *(pp_dec_y) |= ((*(pp_prev1) | *(pp_prev2) | *(pp_prev3) | *(pp_prev4)) & 0x4); } /* Deblocking semaphore propagation */ /* 11/3/00, change the propagation for deblocking */ if (msk_deblock == 0) { *(pp_dec_y) = 0; } pp_dec_y += ll[kk]; pp_prev1 += ll[kk]; pp_prev2 += ll[kk]; pp_prev3 += ll[kk]; pp_prev4 += ll[kk]; } } else { /******************************/ /* (x,y) is outside the frame */ /******************************/ /* 10/24/2000 post_processing semaphore */ /* generation */ /* 10/23/2000 boundary checking*/ *mv_loc = 1; /* Perform post processing semaphore propagation for each */ /* of the 4 blocks in a MB. */ for (kk = 0; kk < 4; kk++) { /* Calculate block x coordinate and round (?). */ /* Divide by 16 is for converting half-pixel */ /* resolution to block. */ mmvx = (xpred + ((kk & 1) << 3)) >> 4; nmvx = mmvx; /* Calculate block y coordinate and round (?). */ /* Divide by 16 is for converting half-pixel */ /* resolution to block. */ mmvy = (ypred + ((kk & 2) << 2)) >> 4; nmvy = mmvy; /* Perform boundary checking */ if (nmvx < 0) { nmvx = 0; } else if (nmvx > mvwidth - 1) { nmvx = mvwidth - 1; } if (nmvy < 0) { nmvy = 0; } else if (nmvy > (height >> 3) - 1) { nmvy = (height >> 3) - 1; } /* Find post processing semaphore location for block */ /* used for prediction, i.e., */ /* pp_prev1 = &pstprcTypPrv[nmvy*mvwidth][nmvx] */ pp_prev1 = pstprcTypPrv + nmvx + nmvy * mvwidth; /* Check if x component of MV is a multiple of 16 */ /* and check if block x coordinate is out of bounds */ if (((dx&0xF) != 0) && (mmvx + 1 < mvwidth - 1)) { /* dx is not a multiple of 16 and the block */ /* x coordinate is within the bounds */ /* pp_prev2 is the block to the right of */ /* pp_prev1 block */ pp_prev2 = pp_prev1 + 1; /* Check if y component of MV is a multiple */ /* of 16 and check if block y coordinate is */ /* out of bounds */ if (((dy&0xF) != 0) && (mmvy + 1 < (height >> 3) - 1)) { /* dy is not a multiple of 16 and */ /* the block y coordinate is */ /* within the bounds */ /* pp_prev3 is the block below */ /* pp_prev1 block */ pp_prev3 = pp_prev1 + mvwidth; /* all prediction are from different blocks */ msk_deblock = 0x3; } else { /* dy is a multiple of 16 or the block */ /* y coordinate is out of bounds */ pp_prev3 = pp_prev1; } /* pp_prev4 is the block to the right of */ /* pp_prev3 block. */ pp_prev4 = pp_prev3 + 1; } else { /* dx is a multiple of 16 or the block x */ /* coordinate is out of bounds */ pp_prev2 = pp_prev1; /* Check if y component of MV is a multiple */ /* of 16 and check if block y coordinate is */ /* out of bounds */ if (((dy&0xF) != 0) && (mmvy + 1 < (height >> 3) - 1)) { /* dy is not a multiple of 16 and */ /* the block y coordinate is */ /* within the bounds */ /* pp_prev3 is the block below */ /* pp_prev1 block. */ pp_prev3 = pp_prev1 + mvwidth; } else { /* dy is a multiple of 16 or the block */ /* y coordinate is out of bounds */ pp_prev3 = pp_prev1; } pp_prev4 = pp_prev3; } /* Deringing semaphore propagation */ if ((*(pp_dec_y)&4) == 0) { *(pp_dec_y) |= ((*(pp_prev1) | *(pp_prev2) | *(pp_prev3) | *(pp_prev4)) & 0x4); } /* Deblocking semaphore propagation */ /* 11/3/00, change the propaga= */ /* tion for deblocking */ if (msk_deblock == 0) { *(pp_dec_y) = 0; } pp_dec_y += ll[kk]; } } /*---------------------------------------------------------------------------- ; Return nothing or data or data pointer ----------------------------------------------------------------------------*/ return (msk_deblock); } #ifdef __cplusplus } #endif #endif ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/pvdec_api.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "mp4dec_lib.h" #include "vlc_decode.h" #include "bitstream.h" #define OSCL_DISABLE_WARNING_CONDITIONAL_IS_CONSTANT #include "osclconfig_compiler_warnings.h" #ifdef DEC_INTERNAL_MEMORY_OPT #define QCIF_MBS 99 #define QCIF_BS (4*QCIF_MBS) #define QCIF_MB_ROWS 11 extern uint8 IMEM_sliceNo[QCIF_MBS]; extern uint8 IMEM_acPredFlag[QCIF_MBS]; extern uint8 IMEM_headerInfo_Mode[QCIF_MBS]; extern uint8 IMEM_headerInfo_CBP[QCIF_MBS]; extern int IMEM_headerInfo_QPMB[QCIF_MBS]; extern MacroBlock IMEM_mblock; extern MOT IMEM_motX[QCIF_BS]; extern MOT IMEM_motY[QCIF_BS]; extern BitstreamDecVideo IMEM_BitstreamDecVideo[4]; extern typeDCStore IMEM_predDC[QCIF_MBS]; extern typeDCACStore IMEM_predDCAC_col[QCIF_MB_ROWS+1]; extern VideoDecData IMEM_VideoDecData[1]; extern Vop IMEM_currVop[1]; extern Vop IMEM_prevVop[1]; extern PIXEL IMEM_currVop_yChan[QCIF_MBS*128*3]; extern PIXEL IMEM_prevVop_yChan[QCIF_MBS*128*3]; extern uint8 IMEM_pstprcTypCur[6*QCIF_MBS]; extern uint8 IMEM_pstprcTypPrv[6*QCIF_MBS]; extern Vop IMEM_vopHEADER[2]; extern Vol IMEM_VOL[2]; extern Vop IMEM_vopHeader[2][1]; extern Vol IMEM_vol[2][1]; #endif /* ======================================================================== */ /* Function : PVInitVideoDecoder() */ /* Date : 04/11/2000, 08/29/2000 */ /* Purpose : Initialization of the MPEG-4 video decoder library. */ /* The return type is Bool instead of PV_STATUS because */ /* we don't want to expose PV_STATUS to (outside) programmers */ /* that use our decoder library SDK. */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVInitVideoDecoder(VideoDecControls *decCtrl, uint8 *volbuf[], int32 *volbuf_size, int nLayers, int width, int height, MP4DecodingMode mode) { VideoDecData *video = (VideoDecData *) decCtrl->videoDecoderData; Bool status = PV_TRUE; int idx; BitstreamDecVideo *stream; oscl_memset(decCtrl, 0, sizeof(VideoDecControls)); /* fix a size bug. 03/28/2001 */ decCtrl->nLayers = nLayers; for (idx = 0; idx < nLayers; idx++) { decCtrl->volbuf[idx] = volbuf[idx]; decCtrl->volbuf_size[idx] = volbuf_size[idx]; } /* memory allocation & initialization */ #ifdef DEC_INTERNAL_MEMORY_OPT video = IMEM_VideoDecData; #else video = (VideoDecData *) oscl_malloc(sizeof(VideoDecData)); #endif if (video != NULL) { oscl_memset(video, 0, sizeof(VideoDecData)); video->memoryUsage = sizeof(VideoDecData); video->numberOfLayers = nLayers; #ifdef DEC_INTERNAL_MEMORY_OPT video->vol = (Vol **) IMEM_VOL; #else video->vol = (Vol **) oscl_malloc(nLayers * sizeof(Vol *)); #endif if (video->vol == NULL) status = PV_FALSE; video->memoryUsage += nLayers * sizeof(Vol *); /* we need to setup this pointer for the application to */ /* pass it around. */ decCtrl->videoDecoderData = (void *) video; video->videoDecControls = decCtrl; /* yes. we have a cyclic */ /* references here :) */ /* Allocating Vop space, this has to change when we add */ /* spatial scalability to the decoder */ #ifdef DEC_INTERNAL_MEMORY_OPT video->currVop = IMEM_currVop; if (video->currVop == NULL) status = PV_FALSE; else oscl_memset(video->currVop, 0, sizeof(Vop)); video->prevVop = IMEM_prevVop; if (video->prevVop == NULL) status = PV_FALSE; else oscl_memset(video->prevVop, 0, sizeof(Vop)); video->memoryUsage += (sizeof(Vop) * 2); video->vopHeader = (Vop **) IMEM_vopHEADER; #else video->currVop = (Vop *) oscl_malloc(sizeof(Vop)); if (video->currVop == NULL) status = PV_FALSE; else oscl_memset(video->currVop, 0, sizeof(Vop)); video->prevVop = (Vop *) oscl_malloc(sizeof(Vop)); if (video->prevVop == NULL) status = PV_FALSE; else oscl_memset(video->prevVop, 0, sizeof(Vop)); video->memoryUsage += (sizeof(Vop) * 2); video->vopHeader = (Vop **) oscl_malloc(sizeof(Vop *) * nLayers); #endif if (video->vopHeader == NULL) status = PV_FALSE; else oscl_memset(video->vopHeader, 0, sizeof(Vop *)*nLayers); video->memoryUsage += (sizeof(Vop *) * nLayers); video->initialized = PV_FALSE; /* Decode the header to get all information to allocate data */ if (status == PV_TRUE) { /* initialize decoded frame counter. 04/24/2001 */ video->frame_idx = -1; for (idx = 0; idx < nLayers; idx++) { #ifdef DEC_INTERNAL_MEMORY_OPT video->vopHeader[idx] = IMEM_vopHeader[idx]; #else video->vopHeader[idx] = (Vop *) oscl_malloc(sizeof(Vop)); #endif if (video->vopHeader[idx] == NULL) { status = PV_FALSE; break; } else { oscl_memset(video->vopHeader[idx], 0, sizeof(Vop)); video->vopHeader[idx]->timeStamp = 0; video->memoryUsage += (sizeof(Vop)); } #ifdef DEC_INTERNAL_MEMORY_OPT video->vol[idx] = IMEM_vol[idx]; video->memoryUsage += sizeof(Vol); oscl_memset(video->vol[idx], 0, sizeof(Vol)); if (video->vol[idx] == NULL) status = PV_FALSE; stream = IMEM_BitstreamDecVideo; #else video->vol[idx] = (Vol *) oscl_malloc(sizeof(Vol)); if (video->vol[idx] == NULL) { status = PV_FALSE; break; } else { video->memoryUsage += sizeof(Vol); oscl_memset(video->vol[idx], 0, sizeof(Vol)); } stream = (BitstreamDecVideo *) oscl_malloc(sizeof(BitstreamDecVideo)); #endif video->memoryUsage += sizeof(BitstreamDecVideo); if (stream == NULL) { status = PV_FALSE; break; } else { int32 buffer_size; if ((buffer_size = BitstreamOpen(stream, idx)) < 0) { mp4dec_log("InitVideoDecoder(): Can't allocate bitstream buffer.\n"); status = PV_FALSE; break; } video->memoryUsage += buffer_size; video->vol[idx]->bitstream = stream; video->vol[idx]->volID = idx; video->vol[idx]->timeInc_offset = 0; /* 11/12/01 */ video->vlcDecCoeffIntra = &VlcDecTCOEFShortHeader; video->vlcDecCoeffInter = &VlcDecTCOEFShortHeader; if (mode == MPEG4_MODE) { /* Set up VOL header bitstream for frame-based decoding. 08/30/2000 */ BitstreamReset(stream, decCtrl->volbuf[idx], decCtrl->volbuf_size[idx]); switch (DecodeVOLHeader(video, idx)) { case PV_SUCCESS : if (status == PV_TRUE) status = PV_TRUE; /* we want to make sure that if first layer is bad, second layer is good return PV_FAIL */ else status = PV_FALSE; break; #ifdef PV_TOLERATE_VOL_ERRORS case PV_BAD_VOLHEADER: status = PV_TRUE; break; #endif default : status = PV_FALSE; break; } } else { video->shortVideoHeader = PV_H263; } if (video->shortVideoHeader) { if (mode != FLV_MODE) { mode = H263_MODE; } else { video->shortVideoHeader = PV_FLV1; } /* Set max width and height. In H.263 mode, we use */ /* volbuf_size[0] to pass in width and volbuf_size[1] */ /* to pass in height. 04/23/2001 */ video->prevVop->temporalRef = 0; /* 11/12/01 */ /* Compute some convenience variables: 04/23/2001 */ video->vol[idx]->quantType = 0; video->vol[idx]->quantPrecision = 5; video->vol[idx]->errorResDisable = 1; video->vol[idx]->dataPartitioning = 0; video->vol[idx]->useReverseVLC = 0; video->intra_acdcPredDisable = 1; video->vol[idx]->scalability = 0; video->size = (int32)width * height; video->displayWidth = video->width = width; video->displayHeight = video->height = height; #ifdef PV_ANNEX_IJKT_SUPPORT video->modified_quant = 0; video->advanced_INTRA = 0; video->deblocking = 0; video->slice_structure = 0; #endif } } } } if (status != PV_FALSE) { if (mode == MPEG4_MODE /* || width !=0 && height !=0 */) { status = PVAllocVideoData(decCtrl, width, height, nLayers); video->initialized = PV_TRUE; } } } else { status = PV_FALSE; } if (status == PV_FALSE) PVCleanUpVideoDecoder(decCtrl); return status; } Bool PVAllocVideoData(VideoDecControls *decCtrl, int width, int height, int nLayers) { VideoDecData *video = (VideoDecData *) decCtrl->videoDecoderData; Bool status = PV_TRUE; int nTotalMB; int nMBPerRow; int32 size; if (video->shortVideoHeader) { video->displayWidth = video->width = width; video->displayHeight = video->height = height; video->nMBPerRow = video->nMBinGOB = video->width / MB_SIZE; video->nMBPerCol = video->nGOBinVop = video->height / MB_SIZE; video->nTotalMB = video->nMBPerRow * video->nMBPerCol; } size = (int32)sizeof(PIXEL) * video->width * video->height; #ifdef PV_MEMORY_POOL decCtrl->size = size; #else #ifdef DEC_INTERNAL_MEMORY_OPT video->currVop->yChan = IMEM_currVop_yChan; /* Allocate memory for all VOP OKA 3/2/1*/ if (video->currVop->yChan == NULL) status = PV_FALSE; video->currVop->uChan = video->currVop->yChan + size; video->currVop->vChan = video->currVop->uChan + (size >> 2); video->prevVop->yChan = IMEM_prevVop_yChan; /* Allocate memory for all VOP OKA 3/2/1*/ if (video->prevVop->yChan == NULL) status = PV_FALSE; video->prevVop->uChan = video->prevVop->yChan + size; video->prevVop->vChan = video->prevVop->uChan + (size >> 2); #else video->currVop->yChan = (PIXEL *) oscl_malloc(size * 3 / 2); /* Allocate memory for all VOP OKA 3/2/1*/ if (video->currVop->yChan == NULL) status = PV_FALSE; video->currVop->uChan = video->currVop->yChan + size; video->currVop->vChan = video->currVop->uChan + (size >> 2); video->prevVop->yChan = (PIXEL *) oscl_malloc(size * 3 / 2); /* Allocate memory for all VOP OKA 3/2/1*/ if (video->prevVop->yChan == NULL) status = PV_FALSE; video->prevVop->uChan = video->prevVop->yChan + size; video->prevVop->vChan = video->prevVop->uChan + (size >> 2); #endif video->memoryUsage += (size * 3); #endif // MEMORY_POOL /* Note that baseVop, enhcVop is only used to hold enhancement */ /* layer header information. 05/04/2000 */ if (nLayers > 1) { video->prevEnhcVop = (Vop *) oscl_malloc(sizeof(Vop)); video->memoryUsage += (sizeof(Vop)); if (video->prevEnhcVop == NULL) { status = PV_FALSE; } else { oscl_memset(video->prevEnhcVop, 0, sizeof(Vop)); #ifndef PV_MEMORY_POOL video->prevEnhcVop->yChan = (PIXEL *) oscl_malloc(size * 3 / 2); /* Allocate memory for all VOP OKA 3/2/1*/ if (video->prevEnhcVop->yChan == NULL) status = PV_FALSE; video->prevEnhcVop->uChan = video->prevEnhcVop->yChan + size; video->prevEnhcVop->vChan = video->prevEnhcVop->uChan + (size >> 2); video->memoryUsage += (3 * size / 2); #endif } } /* Allocating space for slices, AC prediction flag, and */ /* AC/DC prediction storage */ nTotalMB = video->nTotalMB; nMBPerRow = video->nMBPerRow; #ifdef DEC_INTERNAL_MEMORY_OPT video->sliceNo = (uint8 *)(IMEM_sliceNo); if (video->sliceNo == NULL) status = PV_FALSE; video->memoryUsage += nTotalMB; video->acPredFlag = (uint8 *)(IMEM_acPredFlag); if (video->acPredFlag == NULL) status = PV_FALSE; video->memoryUsage += (nTotalMB); video->predDC = (typeDCStore *)(IMEM_predDC); if (video->predDC == NULL) status = PV_FALSE; video->memoryUsage += (nTotalMB * sizeof(typeDCStore)); video->predDCAC_col = (typeDCACStore *)(IMEM_predDCAC_col); if (video->predDCAC_col == NULL) status = PV_FALSE; video->memoryUsage += ((nMBPerRow + 1) * sizeof(typeDCACStore)); video->predDCAC_row = video->predDCAC_col + 1; video->headerInfo.Mode = (uint8 *)(IMEM_headerInfo_Mode); if (video->headerInfo.Mode == NULL) status = PV_FALSE; video->memoryUsage += nTotalMB; video->headerInfo.CBP = (uint8 *)(IMEM_headerInfo_CBP); if (video->headerInfo.CBP == NULL) status = PV_FALSE; video->memoryUsage += nTotalMB; video->QPMB = (int *)(IMEM_headerInfo_QPMB); if (video->QPMB == NULL) status = PV_FALSE; video->memoryUsage += (nTotalMB * sizeof(int)); video->mblock = &IMEM_mblock; if (video->mblock == NULL) status = PV_FALSE; oscl_memset(video->mblock->block, 0, sizeof(int16)*6*NCOEFF_BLOCK); // Aug 23,2005 video->memoryUsage += sizeof(MacroBlock); video->motX = (MOT *)(IMEM_motX); if (video->motX == NULL) status = PV_FALSE; video->motY = (MOT *)(IMEM_motY); if (video->motY == NULL) status = PV_FALSE; video->memoryUsage += (sizeof(MOT) * 8 * nTotalMB); #else video->sliceNo = (uint8 *) oscl_malloc(nTotalMB); if (video->sliceNo == NULL) status = PV_FALSE; video->memoryUsage += nTotalMB; video->acPredFlag = (uint8 *) oscl_malloc(nTotalMB * sizeof(uint8)); if (video->acPredFlag == NULL) status = PV_FALSE; video->memoryUsage += (nTotalMB); video->predDC = (typeDCStore *) oscl_malloc(nTotalMB * sizeof(typeDCStore)); if (video->predDC == NULL) status = PV_FALSE; video->memoryUsage += (nTotalMB * sizeof(typeDCStore)); video->predDCAC_col = (typeDCACStore *) oscl_malloc((nMBPerRow + 1) * sizeof(typeDCACStore)); if (video->predDCAC_col == NULL) status = PV_FALSE; video->memoryUsage += ((nMBPerRow + 1) * sizeof(typeDCACStore)); /* element zero will be used for storing vertical (col) AC coefficients */ /* the rest will be used for storing horizontal (row) AC coefficients */ video->predDCAC_row = video->predDCAC_col + 1; /* ACDC */ /* Allocating HeaderInfo structure & Quantizer array */ video->headerInfo.Mode = (uint8 *) oscl_malloc(nTotalMB); if (video->headerInfo.Mode == NULL) status = PV_FALSE; video->memoryUsage += nTotalMB; video->headerInfo.CBP = (uint8 *) oscl_malloc(nTotalMB); if (video->headerInfo.CBP == NULL) status = PV_FALSE; video->memoryUsage += nTotalMB; video->QPMB = (int16 *) oscl_malloc(nTotalMB * sizeof(int16)); if (video->QPMB == NULL) status = PV_FALSE; video->memoryUsage += (nTotalMB * sizeof(int)); /* Allocating macroblock space */ video->mblock = (MacroBlock *) oscl_malloc(sizeof(MacroBlock)); if (video->mblock == NULL) { status = PV_FALSE; } else { oscl_memset(video->mblock->block, 0, sizeof(int16)*6*NCOEFF_BLOCK); // Aug 23,2005 video->memoryUsage += sizeof(MacroBlock); } /* Allocating motion vector space */ video->motX = (MOT *) oscl_malloc(sizeof(MOT) * 4 * nTotalMB); if (video->motX == NULL) status = PV_FALSE; video->motY = (MOT *) oscl_malloc(sizeof(MOT) * 4 * nTotalMB); if (video->motY == NULL) status = PV_FALSE; video->memoryUsage += (sizeof(MOT) * 8 * nTotalMB); #endif #ifdef PV_POSTPROC_ON /* Allocating space for post-processing Mode */ #ifdef DEC_INTERNAL_MEMORY_OPT video->pstprcTypCur = IMEM_pstprcTypCur; video->memoryUsage += (nTotalMB * 6); if (video->pstprcTypCur == NULL) { status = PV_FALSE; } else { oscl_memset(video->pstprcTypCur, 0, 4*nTotalMB + 2*nTotalMB); } video->pstprcTypPrv = IMEM_pstprcTypPrv; video->memoryUsage += (nTotalMB * 6); if (video->pstprcTypPrv == NULL) { status = PV_FALSE; } else { oscl_memset(video->pstprcTypPrv, 0, nTotalMB*6); } #else video->pstprcTypCur = (uint8 *) oscl_malloc(nTotalMB * 6); video->memoryUsage += (nTotalMB * 6); if (video->pstprcTypCur == NULL) { status = PV_FALSE; } else { oscl_memset(video->pstprcTypCur, 0, 4*nTotalMB + 2*nTotalMB); } video->pstprcTypPrv = (uint8 *) oscl_malloc(nTotalMB * 6); video->memoryUsage += (nTotalMB * 6); if (video->pstprcTypPrv == NULL) { status = PV_FALSE; } else { oscl_memset(video->pstprcTypPrv, 0, nTotalMB*6); } #endif #endif /* initialize the decoder library */ video->prevVop->predictionType = I_VOP; video->prevVop->timeStamp = 0; #ifndef PV_MEMORY_POOL oscl_memset(video->prevVop->yChan, 16, sizeof(uint8)*size); /* 10/31/01 */ oscl_memset(video->prevVop->uChan, 128, sizeof(uint8)*size / 2); oscl_memset(video->currVop->yChan, 0, sizeof(uint8)*size*3 / 2); if (nLayers > 1) { oscl_memset(video->prevEnhcVop->yChan, 0, sizeof(uint8)*size*3 / 2); video->prevEnhcVop->timeStamp = 0; } video->concealFrame = video->prevVop->yChan; /* 07/07/2001 */ decCtrl->outputFrame = video->prevVop->yChan; /* 06/19/2002 */ #endif /* always start from base layer */ video->currLayer = 0; return status; } /* ======================================================================== */ /* Function : PVResetVideoDecoder() */ /* Date : 01/14/2002 */ /* Purpose : Reset video timestamps */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* ======================================================================== */ Bool PVResetVideoDecoder(VideoDecControls *decCtrl) { VideoDecData *video = (VideoDecData *) decCtrl->videoDecoderData; int idx; for (idx = 0; idx < decCtrl->nLayers; idx++) { video->vopHeader[idx]->timeStamp = 0; } video->prevVop->timeStamp = 0; if (decCtrl->nLayers > 1) video->prevEnhcVop->timeStamp = 0; oscl_memset(video->mblock->block, 0, sizeof(int16)*6*NCOEFF_BLOCK); // Aug 23,2005 return PV_TRUE; } /* ======================================================================== */ /* Function : PVCleanUpVideoDecoder() */ /* Date : 04/11/2000, 08/29/2000 */ /* Purpose : Cleanup of the MPEG-4 video decoder library. */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVCleanUpVideoDecoder(VideoDecControls *decCtrl) { int idx; VideoDecData *video = (VideoDecData *) decCtrl->videoDecoderData; #ifdef DEC_INTERNAL_MEMORY_OPT if (video) { #ifdef PV_POSTPROC_ON video->pstprcTypCur = NULL; video->pstprcTypPrv = NULL; #endif video->acPredFlag = NULL; video->sliceNo = NULL; video->motX = NULL; video->motY = NULL; video->mblock = NULL; video->QPMB = NULL; video->predDC = NULL; video->predDCAC_row = NULL; video->predDCAC_col = NULL; video->headerInfo.Mode = NULL; video->headerInfo.CBP = NULL; if (video->numberOfLayers > 1) { if (video->prevEnhcVop) { video->prevEnhcVop->uChan = NULL; video->prevEnhcVop->vChan = NULL; if (video->prevEnhcVop->yChan) oscl_free(video->prevEnhcVop->yChan); oscl_free(video->prevEnhcVop); } } if (video->currVop) { video->currVop->uChan = NULL; video->currVop->vChan = NULL; if (video->currVop->yChan) video->currVop->yChan = NULL; video->currVop = NULL; } if (video->prevVop) { video->prevVop->uChan = NULL; video->prevVop->vChan = NULL; if (video->prevVop->yChan) video->prevVop->yChan = NULL; video->prevVop = NULL; } if (video->vol) { for (idx = 0; idx < video->numberOfLayers; idx++) { if (video->vol[idx]) { BitstreamClose(video->vol[idx]->bitstream); video->vol[idx]->bitstream = NULL; video->vol[idx] = NULL; } video->vopHeader[idx] = NULL; } video->vol = NULL; video->vopHeader = NULL; } video = NULL; decCtrl->videoDecoderData = NULL; } #else if (video) { #ifdef PV_POSTPROC_ON if (video->pstprcTypCur) oscl_free(video->pstprcTypCur); if (video->pstprcTypPrv) oscl_free(video->pstprcTypPrv); #endif if (video->predDC) oscl_free(video->predDC); video->predDCAC_row = NULL; if (video->predDCAC_col) oscl_free(video->predDCAC_col); if (video->motX) oscl_free(video->motX); if (video->motY) oscl_free(video->motY); if (video->mblock) oscl_free(video->mblock); if (video->QPMB) oscl_free(video->QPMB); if (video->headerInfo.Mode) oscl_free(video->headerInfo.Mode); if (video->headerInfo.CBP) oscl_free(video->headerInfo.CBP); if (video->sliceNo) oscl_free(video->sliceNo); if (video->acPredFlag) oscl_free(video->acPredFlag); if (video->numberOfLayers > 1) { if (video->prevEnhcVop) { video->prevEnhcVop->uChan = NULL; video->prevEnhcVop->vChan = NULL; if (video->prevEnhcVop->yChan) oscl_free(video->prevEnhcVop->yChan); oscl_free(video->prevEnhcVop); } } if (video->currVop) { #ifndef PV_MEMORY_POOL video->currVop->uChan = NULL; video->currVop->vChan = NULL; if (video->currVop->yChan) oscl_free(video->currVop->yChan); #endif oscl_free(video->currVop); } if (video->prevVop) { #ifndef PV_MEMORY_POOL video->prevVop->uChan = NULL; video->prevVop->vChan = NULL; if (video->prevVop->yChan) oscl_free(video->prevVop->yChan); #endif oscl_free(video->prevVop); } if (video->vol) { for (idx = 0; idx < video->numberOfLayers; idx++) { if (video->vol[idx]) { if (video->vol[idx]->bitstream) { BitstreamClose(video->vol[idx]->bitstream); oscl_free(video->vol[idx]->bitstream); } oscl_free(video->vol[idx]); } } oscl_free(video->vol); } for (idx = 0; idx < video->numberOfLayers; idx++) { if (video->vopHeader[idx]) oscl_free(video->vopHeader[idx]); } if (video->vopHeader) oscl_free(video->vopHeader); oscl_free(video); decCtrl->videoDecoderData = NULL; } #endif return PV_TRUE; } /* ======================================================================== */ /* Function : PVGetVideoDimensions() */ /* Date : 040505 */ /* Purpose : */ /* In/out : */ /* Return : the display_width and display_height of */ /* the frame in the current layer. */ /* Note : This is not a macro or inline function because we do */ /* not want to expose our internal data structure. */ /* Modified : */ /* ======================================================================== */ OSCL_EXPORT_REF void PVGetVideoDimensions(VideoDecControls *decCtrl, int32 *display_width, int32 *display_height) { VideoDecData *video = (VideoDecData *)decCtrl->videoDecoderData; *display_width = video->displayWidth; *display_height = video->displayHeight; } /* ======================================================================== */ /* Function : PVGetVideoTimeStamp() */ /* Date : 04/27/2000, 08/29/2000 */ /* Purpose : */ /* In/out : */ /* Return : current time stamp in millisecond. */ /* Note : */ /* Modified : */ /* ======================================================================== */ uint32 PVGetVideoTimeStamp(VideoDecControls *decCtrl) { VideoDecData *video = (VideoDecData *)decCtrl->videoDecoderData; return video->currTimestamp; } /* ======================================================================== */ /* Function : PVSetPostProcType() */ /* Date : 07/07/2000 */ /* Purpose : */ /* In/out : */ /* Return : Set post-processing filter type. */ /* Note : */ /* Modified : . 08/29/2000 changes the name for consistency. */ /* ======================================================================== */ OSCL_EXPORT_REF void PVSetPostProcType(VideoDecControls *decCtrl, int mode) { VideoDecData *video = (VideoDecData *)decCtrl->videoDecoderData; video->postFilterType = mode; } /* ======================================================================== */ /* Function : PVGetDecBitrate() */ /* Date : 08/23/2000 */ /* Purpose : */ /* In/out : */ /* Return : This function returns the average bits per second. */ /* Note : */ /* Modified : */ /* ======================================================================== */ int PVGetDecBitrate(VideoDecControls *decCtrl) { VideoDecData *video = (VideoDecData *)decCtrl->videoDecoderData; int idx; int32 sum = 0; for (idx = 0; idx < BITRATE_AVERAGE_WINDOW; idx++) { sum += video->nBitsPerVop[idx]; } sum = (sum * video->frameRate) / (10 * BITRATE_AVERAGE_WINDOW); return (int) sum; } /* ======================================================================== */ /* Function : PVGetDecFramerate() */ /* Date : 08/23/2000 */ /* Purpose : */ /* In/out : */ /* Return : This function returns the average frame per 10 second. */ /* Note : The fps can be calculated by PVGetDecFramerate()/10 */ /* Modified : */ /* ======================================================================== */ int PVGetDecFramerate(VideoDecControls *decCtrl) { VideoDecData *video = (VideoDecData *)decCtrl->videoDecoderData; return video->frameRate; } /* ======================================================================== */ /* Function : PVGetOutputFrame() */ /* Date : 05/07/2001 */ /* Purpose : */ /* In/out : */ /* Return : This function returns the pointer to the output frame */ /* Note : */ /* Modified : */ /* ======================================================================== */ uint8 *PVGetDecOutputFrame(VideoDecControls *decCtrl) { return decCtrl->outputFrame; } /* ======================================================================== */ /* Function : PVGetLayerID() */ /* Date : 07/09/2001 */ /* Purpose : */ /* In/out : */ /* Return : This function returns decoded frame layer id (BASE/ENHANCE) */ /* Note : */ /* Modified : */ /* ======================================================================== */ int PVGetLayerID(VideoDecControls *decCtrl) { VideoDecData *video = (VideoDecData *)decCtrl->videoDecoderData; return video->currLayer; } /* ======================================================================== */ /* Function : PVGetDecMemoryUsage() */ /* Date : 08/23/2000 */ /* Purpose : */ /* In/out : */ /* Return : This function returns the amount of memory used. */ /* Note : */ /* Modified : */ /* ======================================================================== */ int32 PVGetDecMemoryUsage(VideoDecControls *decCtrl) { VideoDecData *video = (VideoDecData *)decCtrl->videoDecoderData; return video->memoryUsage; } /* ======================================================================== */ /* Function : PVGetDecBitstreamMode() */ /* Date : 08/23/2000 */ /* Purpose : */ /* In/out : */ /* Return : This function returns the decoding mode of the baselayer */ /* bitstream. */ /* Note : */ /* Modified : */ /* ======================================================================== */ OSCL_EXPORT_REF MP4DecodingMode PVGetDecBitstreamMode(VideoDecControls *decCtrl) { VideoDecData *video = (VideoDecData *)decCtrl->videoDecoderData; if (video->shortVideoHeader) { if (video->shortVideoHeader & PV_FLV1) { return FLV_MODE; } else { return H263_MODE; } } else { return MPEG4_MODE; } } /* ======================================================================== */ /* Function : PVExtractVolHeader() */ /* Date : 08/29/2000 */ /* Purpose : */ /* In/out : */ /* Return : Extract vol header of the bitstream from buffer[]. */ /* Note : */ /* Modified : */ /* ======================================================================== */ Bool PVExtractVolHeader(uint8 *video_buffer, uint8 *vol_header, int32 *vol_header_size) { int idx = -1; uint8 start_code_prefix[] = { 0x00, 0x00, 0x01 }; uint8 h263_prefix[] = { 0x00, 0x00, 0x80 }; if (oscl_memcmp(h263_prefix, video_buffer, 3) == 0) /* we have short header stream */ { oscl_memcpy(vol_header, video_buffer, 32); *vol_header_size = 32; return TRUE; } else { if (oscl_memcmp(start_code_prefix, video_buffer, 3) || (video_buffer[3] != 0xb0 && video_buffer[3] >= 0x20)) return FALSE; do { idx++; while (oscl_memcmp(start_code_prefix, video_buffer + idx, 3)) { idx++; if (idx + 3 >= *vol_header_size) goto quit; } } while (video_buffer[idx+3] != 0xb3 && video_buffer[idx+3] != 0xb6); oscl_memcpy(vol_header, video_buffer, idx); *vol_header_size = idx; return TRUE; } quit: oscl_memcpy(vol_header, video_buffer, *vol_header_size); return FALSE; } /* ======================================================================== */ /* Function : PVLocateFrameHeader() */ /* Date : 04/8/2005 */ /* Purpose : */ /* In/out : */ /* Return : Return the offset to the first SC in the buffer */ /* Note : */ /* Modified : */ /* ======================================================================== */ int32 PVLocateFrameHeader(uint8 *ptr, int32 size) { int count = 0; int32 i = size; if (size < 1) { return 0; } while (i--) { if ((count > 1) && (*ptr == 0x01)) { i += 2; break; } if (*ptr++) count = 0; else count++; } return (size - (i + 1)); } /* ======================================================================== */ /* Function : PVLocateH263FrameHeader() */ /* Date : 04/8/2005 */ /* Purpose : */ /* In/out : */ /* Return : Return the offset to the first SC in the buffer */ /* Note : */ /* Modified : */ /* ======================================================================== */ int32 PVLocateH263FrameHeader(uint8 *ptr, int32 size) { int count = 0; int32 i = size; if (size < 1) { return 0; } while (i--) { if ((count > 1) && ((*ptr & 0xFC) == 0x80)) { i += 2; break; } if (*ptr++) count = 0; else count++; } return (size - (i + 1)); } /* ======================================================================== */ /* Function : PVDecodeVideoFrame() */ /* Date : 08/29/2000 */ /* Purpose : Decode one video frame and return a YUV-12 image. */ /* In/out : */ /* Return : */ /* Note : */ /* Modified : 04/17/2001 removed PV_EOS, PV_END_OF_BUFFER */ /* : 08/22/2002 break up into 2 functions PVDecodeVopHeader and */ /* PVDecodeVopBody */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVDecodeVideoFrame(VideoDecControls *decCtrl, uint8 *buffer[], uint32 timestamp[], int32 buffer_size[], uint use_ext_timestamp[], uint8 *currYUV) { PV_STATUS status = PV_FAIL; VopHeaderInfo header_info; status = (PV_STATUS)PVDecodeVopHeader(decCtrl, buffer, timestamp, buffer_size, &header_info, use_ext_timestamp, currYUV); if (status != PV_TRUE) return PV_FALSE; if (PVDecodeVopBody(decCtrl, buffer_size) != PV_TRUE) { return PV_FALSE; } return PV_TRUE; } /* ======================================================================== */ /* Function : PVDecodeVopHeader() */ /* Date : 08/22/2002 */ /* Purpose : Determine target layer and decode vop header, modified from */ /* original PVDecodeVideoFrame. */ /* In/out : */ /* Return : */ /* Note : */ /* Modified : */ /* ======================================================================== */ Bool PVDecodeVopHeader(VideoDecControls *decCtrl, uint8 *buffer[], uint32 timestamp[], int32 buffer_size[], VopHeaderInfo *header_info, uint use_ext_timestamp [], uint8 *currYUV) { VideoDecData *video = (VideoDecData *) decCtrl->videoDecoderData; Vol *currVol; Vop *currVop = video->currVop; Vop **vopHeader = video->vopHeader; BitstreamDecVideo *stream; int target_layer; #ifdef PV_SUPPORT_TEMPORAL_SCALABILITY PV_STATUS status = PV_FAIL; int idx; int32 display_time; /* decide which frame to decode next */ if (decCtrl->nLayers > 1) { display_time = target_layer = -1; for (idx = 0; idx < decCtrl->nLayers; idx++) { /* do we have data for this layer? */ if (buffer_size[idx] <= 0) { timestamp[idx] = -1; continue; } /* did the application provide a timestamp for this vop? */ if (timestamp[idx] < 0) { if (vopHeader[idx]->timeStamp < 0) { /* decode the timestamp in the bitstream */ video->currLayer = idx; stream = video->vol[idx]->bitstream; BitstreamReset(stream, buffer[idx], buffer_size[idx]); while ((status = DecodeVOPHeader(video, vopHeader[idx], FALSE)) != PV_SUCCESS) { /* Try to find a VOP header in the buffer. 08/30/2000. */ if (PVSearchNextM4VFrame(stream) != PV_SUCCESS) { /* if we don't have data for enhancement layer, */ /* don't just stop. 09/07/2000. */ buffer_size[idx] = 0; break; } } if (status == PV_SUCCESS) { vopHeader[idx]->timeStamp = timestamp[idx] = CalcVopDisplayTime(video->vol[idx], vopHeader[idx], video->shortVideoHeader); if (idx == 0) vopHeader[idx]->refSelectCode = 1; } } else { /* We've decoded this vop header in the previous run already. */ timestamp[idx] = vopHeader[idx]->timeStamp; } } /* Use timestamps to select the next VOP to be decoded */ if (timestamp[idx] >= 0 && (display_time < 0 || display_time > timestamp[idx])) { display_time = timestamp[idx]; target_layer = idx; } else if (display_time == timestamp[idx]) { /* we have to handle either SNR or spatial scalability here. */ } } if (target_layer < 0) return PV_FALSE; /* set up for decoding the target layer */ video->currLayer = target_layer; currVol = video->vol[target_layer]; video->bitstream = stream = currVol->bitstream; /* We need to decode the vop header if external timestamp */ /* is provided. 10/04/2000 */ if (vopHeader[target_layer]->timeStamp < 0) { stream = video->vol[target_layer]->bitstream; BitstreamReset(stream, buffer[target_layer], buffer_size[target_layer]); while (DecodeVOPHeader(video, vopHeader[target_layer], TRUE) != PV_SUCCESS) { /* Try to find a VOP header in the buffer. 08/30/2000. */ if (PVSearchNextM4VFrame(stream) != PV_SUCCESS) { /* if we don't have data for enhancement layer, */ /* don't just stop. 09/07/2000. */ buffer_size[target_layer] = 0; break; } } video->vol[target_layer]->timeInc_offset = vopHeader[target_layer]->timeInc; video->vol[target_layer]->moduloTimeBase = timestamp[target_layer]; vopHeader[target_layer]->timeStamp = timestamp[target_layer]; if (target_layer == 0) vopHeader[target_layer]->refSelectCode = 1; } } else /* base layer only decoding */ { #endif video->currLayer = target_layer = 0; currVol = video->vol[0]; video->bitstream = stream = currVol->bitstream; if (buffer_size[0] <= 0) return PV_FALSE; BitstreamReset(stream, buffer[0], buffer_size[0]); if (video->shortVideoHeader) { while (DecodeShortHeader(video, vopHeader[0]) != PV_SUCCESS) { if (PVSearchNextH263Frame(stream) != PV_SUCCESS) { /* There is no vop header in the buffer, */ /* clean bitstream buffer. 2/5/2001 */ buffer_size[0] = 0; if (video->initialized == PV_FALSE) { video->displayWidth = video->width = 0; video->displayHeight = video->height = 0; } return PV_FALSE; } } if (video->initialized == PV_FALSE) { if (PVAllocVideoData(decCtrl, video->width, video->height, 1) == PV_FALSE) { video->displayWidth = video->width = 0; video->displayHeight = video->height = 0; return PV_FALSE; } video->initialized = PV_TRUE; } if (use_ext_timestamp[0]) { /* MTB for H263 is absolute TR */ /* following line is equivalent to round((timestamp[0]*30)/1001); 11/13/2001 */ video->vol[0]->moduloTimeBase = 30 * ((timestamp[0] + 17) / 1001) + (30 * ((timestamp[0] + 17) % 1001) / 1001); vopHeader[0]->timeStamp = timestamp[0]; } else vopHeader[0]->timeStamp = CalcVopDisplayTime(currVol, vopHeader[0], video->shortVideoHeader); } else { while (DecodeVOPHeader(video, vopHeader[0], FALSE) != PV_SUCCESS) { /* Try to find a VOP header in the buffer. 08/30/2000. */ if (PVSearchNextM4VFrame(stream) != PV_SUCCESS) { /* There is no vop header in the buffer, */ /* clean bitstream buffer. 2/5/2001 */ buffer_size[0] = 0; return PV_FALSE; } } if (use_ext_timestamp[0]) { video->vol[0]->timeInc_offset = vopHeader[0]->timeInc; video->vol[0]->moduloTimeBase = timestamp[0]; /* 11/12/2001 */ vopHeader[0]->timeStamp = timestamp[0]; } else { vopHeader[0]->timeStamp = CalcVopDisplayTime(currVol, vopHeader[0], video->shortVideoHeader); } } /* set up some base-layer only parameters */ vopHeader[0]->refSelectCode = 1; #ifdef PV_SUPPORT_TEMPORAL_SCALABILITY } #endif timestamp[target_layer] = video->currTimestamp = vopHeader[target_layer]->timeStamp; #ifdef PV_MEMORY_POOL vopHeader[target_layer]->yChan = (PIXEL *)currYUV; vopHeader[target_layer]->uChan = (PIXEL *)currYUV + decCtrl->size; vopHeader[target_layer]->vChan = (PIXEL *)(vopHeader[target_layer]->uChan) + (decCtrl->size >> 2); #else vopHeader[target_layer]->yChan = currVop->yChan; vopHeader[target_layer]->uChan = currVop->uChan; vopHeader[target_layer]->vChan = currVop->vChan; #endif oscl_memcpy(currVop, vopHeader[target_layer], sizeof(Vop)); #ifdef PV_SUPPORT_TEMPORAL_SCALABILITY vopHeader[target_layer]->timeStamp = -1; #endif /* put header info into the structure */ header_info->currLayer = target_layer; header_info->timestamp = video->currTimestamp; header_info->frameType = (MP4FrameType)currVop->predictionType; header_info->refSelCode = vopHeader[target_layer]->refSelectCode; header_info->quantizer = currVop->quantizer; /***************************************/ return PV_TRUE; } /* ======================================================================== */ /* Function : PVDecodeVopBody() */ /* Date : 08/22/2002 */ /* Purpose : Decode vop body after the header is decoded, modified from */ /* original PVDecodeVideoFrame. */ /* In/out : */ /* Return : */ /* Note : */ /* Modified : */ /* ======================================================================== */ Bool PVDecodeVopBody(VideoDecControls *decCtrl, int32 buffer_size[]) { PV_STATUS status = PV_FAIL; VideoDecData *video = (VideoDecData *) decCtrl->videoDecoderData; int target_layer = video->currLayer; Vol *currVol = video->vol[target_layer]; Vop *currVop = video->currVop; Vop *prevVop = video->prevVop; Vop *tempVopPtr; int bytes_consumed = 0; /* Record how many bits we used in the buffer. 04/24/2001 */ int idx; if (currVop->vopCoded == 0) /* 07/03/2001 */ { PV_BitstreamByteAlign(currVol->bitstream); /* We should always clear up bitstream buffer. 10/10/2000 */ bytes_consumed = (getPointer(currVol->bitstream) + 7) >> 3; if (bytes_consumed > currVol->bitstream->data_end_pos) { bytes_consumed = currVol->bitstream->data_end_pos; } if (bytes_consumed < buffer_size[target_layer]) { /* If we only consume part of the bits in the buffer, take those */ /* out. 04/24/2001 */ /* oscl_memcpy(buffer[target_layer], buffer[target_layer]+bytes_consumed, (buffer_size[target_layer]-=bytes_consumed)); */ buffer_size[target_layer] -= bytes_consumed; } else { buffer_size[target_layer] = 0; } #ifdef PV_MEMORY_POOL if (target_layer) { if (video->prevEnhcVop->timeStamp > video->prevVop->timeStamp) { video->prevVop = video->prevEnhcVop; } } oscl_memcpy(currVop->yChan, video->prevVop->yChan, (decCtrl->size*3) / 2); video->prevVop = prevVop; video->concealFrame = currVop->yChan; /* 07/07/2001 */ video->vop_coding_type = currVop->predictionType; /* 07/09/01 */ decCtrl->outputFrame = currVop->yChan; /* Swap VOP pointers. No enhc. frame oscl_memcpy() anymore! 04/24/2001 */ if (target_layer) { tempVopPtr = video->prevEnhcVop; video->prevEnhcVop = video->currVop; video->currVop = tempVopPtr; } else { tempVopPtr = video->prevVop; video->prevVop = video->currVop; video->currVop = tempVopPtr; } #else if (target_layer) /* this is necessary to avoid flashback problems 06/21/2002*/ { video->prevEnhcVop->timeStamp = currVop->timeStamp; } else { video->prevVop->timeStamp = currVop->timeStamp; } #endif video->vop_coding_type = currVop->predictionType; /* 07/09/01 */ /* the following is necessary to avoid displaying an notCoded I-VOP at the beginning of a session or after random positioning 07/03/02*/ if (currVop->predictionType == I_VOP) { video->vop_coding_type = P_VOP; } return PV_TRUE; } /* ======================================================= */ /* Decode vop body (if there is no error in the header!) */ /* ======================================================= */ /* first, we need to select a reference frame */ if (decCtrl->nLayers > 1) { if (currVop->predictionType == I_VOP) { /* do nothing here */ } else if (currVop->predictionType == P_VOP) { switch (currVop->refSelectCode) { case 0 : /* most recently decoded enhancement vop */ /* Setup video->prevVop before we call PV_DecodeVop(). 04/24/2001 */ if (video->prevEnhcVop->timeStamp >= video->prevVop->timeStamp) video->prevVop = video->prevEnhcVop; break; case 1 : /* most recently displayed base-layer vop */ if (target_layer) { if (video->prevEnhcVop->timeStamp > video->prevVop->timeStamp) video->prevVop = video->prevEnhcVop; } break; case 2 : /* next base-layer vop in display order */ break; case 3 : /* temporally coincident base-layer vop (no MV's) */ break; } } else /* we have a B-Vop */ { mp4dec_log("DecodeVideoFrame(): B-VOP not supported.\n"); } } /* This is for the calculation of the frame rate and bitrate. */ idx = ++video->frame_idx % BITRATE_AVERAGE_WINDOW; /* Calculate bitrate for this layer. 08/23/2000 */ status = PV_DecodeVop(video); video->nBitsPerVop[idx] = getPointer(currVol->bitstream); video->prevTimestamp[idx] = currVop->timeStamp; /* restore video->prevVop after PV_DecodeVop(). 04/24/2001 */ // if (currVop->refSelectCode == 0) video->prevVop = prevVop; video->prevVop = prevVop; /* Estimate the frame rate. 08/23/2000 */ video->duration = video->prevTimestamp[idx]; video->duration -= video->prevTimestamp[(++idx)%BITRATE_AVERAGE_WINDOW]; if (video->duration > 0) { /* Only update framerate when the timestamp is right */ video->frameRate = (int)(FRAMERATE_SCALE) / video->duration; } /* We should always clear up bitstream buffer. 10/10/2000 */ bytes_consumed = (getPointer(currVol->bitstream) + 7) >> 3; /* 11/4/03 */ if (bytes_consumed > currVol->bitstream->data_end_pos) { bytes_consumed = currVol->bitstream->data_end_pos; } if (bytes_consumed < buffer_size[target_layer]) { /* If we only consume part of the bits in the buffer, take those */ /* out. 04/24/2001 */ /* oscl_memcpy(buffer[target_layer], buffer[target_layer]+bytes_consumed, (buffer_size[target_layer]-=bytes_consumed)); */ buffer_size[target_layer] -= bytes_consumed; } else { buffer_size[target_layer] = 0; } switch (status) { case PV_FAIL : return PV_FALSE; /* this will take care of concealment if we lose whole frame */ case PV_END_OF_VOP : /* we may want to differenciate PV_END_OF_VOP and PV_SUCCESS */ /* in the future. 05/10/2000 */ case PV_SUCCESS : /* Nohting is wrong :). */ video->concealFrame = video->currVop->yChan; /* 07/07/2001 */ video->vop_coding_type = video->currVop->predictionType; /* 07/09/01 */ decCtrl->outputFrame = video->currVop->yChan; /* Swap VOP pointers. No enhc. frame oscl_memcpy() anymore! 04/24/2001 */ if (target_layer) { tempVopPtr = video->prevEnhcVop; video->prevEnhcVop = video->currVop; video->currVop = tempVopPtr; } else { tempVopPtr = video->prevVop; video->prevVop = video->currVop; video->currVop = tempVopPtr; } break; default : /* This will never happen */ break; } return PV_TRUE; } #ifdef PV_MEMORY_POOL OSCL_EXPORT_REF void PVSetReferenceYUV(VideoDecControls *decCtrl, uint8 *YUV) { VideoDecData *video = (VideoDecData *)decCtrl->videoDecoderData; video->prevVop->yChan = (PIXEL *)YUV; video->prevVop->uChan = (PIXEL *)YUV + decCtrl->size; video->prevVop->vChan = (PIXEL *)video->prevVop->uChan + (decCtrl->size >> 2); oscl_memset(video->prevVop->yChan, 16, sizeof(uint8)*decCtrl->size); /* 10/31/01 */ oscl_memset(video->prevVop->uChan, 128, sizeof(uint8)*decCtrl->size / 2); video->concealFrame = video->prevVop->yChan; /* 07/07/2001 */ decCtrl->outputFrame = video->prevVop->yChan; /* 06/19/2002 */ } #endif /* ======================================================================== */ /* Function : VideoDecoderErrorDetected() */ /* Date : 06/20/2000 */ /* Purpose : */ /* In/out : */ /* Return : This function will be called everytime an error int the */ /* bitstream is detected. */ /* Note : */ /* Modified : */ /* ======================================================================== */ uint VideoDecoderErrorDetected(VideoDecData * video) { OSCL_UNUSED_ARG(video); /* This is only used for trapping bitstream error for debuging */ return 0; } #ifdef ENABLE_LOG #include #include /* ======================================================================== */ /* Function : m4vdec_dprintf() */ /* Date : 08/15/2000 */ /* Purpose : This is a function that logs messages in the mpeg4 video */ /* decoder. We can call the standard PacketVideo PVMessage */ /* from inside this function if necessary. */ /* In/out : */ /* Return : */ /* Note : To turn on the logging, LOG_MP4DEC_MESSAGE must be defined */ /* when compiling this file (only this file). */ /* Modified : */ /* ======================================================================== */ void m4vdec_dprintf(char *format, ...) { FILE *log_fp; va_list args; va_start(args, format); /* open the log file */ log_fp = fopen("\\mp4dec_log.txt", "a+"); if (log_fp == NULL) return; /* output the message */ vfprintf(log_fp, format, args); fclose(log_fp); va_end(args); } #endif /* ======================================================================== */ /* Function : IsIntraFrame() */ /* Date : 05/29/2000 */ /* Purpose : */ /* In/out : */ /* Return : The most recently decoded frame is an Intra frame. */ /* Note : */ /* Modified : */ /* ======================================================================== */ Bool IsIntraFrame(VideoDecControls *decCtrl) { VideoDecData *video = (VideoDecData *)decCtrl->videoDecoderData; return (video->vop_coding_type == I_VOP); } /* ======================================================================== */ /* Function : PVDecPostProcess() */ /* Date : 01/09/2002 */ /* Purpose : PostProcess one video frame and return a YUV-12 image. */ /* In/out : */ /* Return : */ /* Note : */ /* Modified : */ /* ======================================================================== */ void PVDecPostProcess(VideoDecControls *decCtrl, uint8 *outputYUV) { uint8 *outputBuffer; #ifdef PV_POSTPROC_ON VideoDecData *video = (VideoDecData *) decCtrl->videoDecoderData; int32 tmpvar; if (outputYUV) { outputBuffer = outputYUV; } else { if (video->postFilterType) { outputBuffer = video->currVop->yChan; } else { outputBuffer = decCtrl->outputFrame; } } if (video->postFilterType) { /* Post-processing, */ PostFilter(video, video->postFilterType, outputBuffer); } else { if (outputYUV) { /* Copy decoded frame to the output buffer. */ tmpvar = (int32)video->width * video->height; oscl_memcpy(outputBuffer, decCtrl->outputFrame, tmpvar*3 / 2); /* 3/3/01 */ } } #else OSCL_UNUSED_ARG(outputYUV); outputBuffer = decCtrl->outputFrame; #endif decCtrl->outputFrame = outputBuffer; return; } /* ======================================================================== */ /* Function : PVDecSetReference(VideoDecControls *decCtrl, uint8 *refYUV, */ /* int32 timestamp) */ /* Date : 07/22/2003 */ /* Purpose : Get YUV reference frame from external source. */ /* In/out : YUV 4-2-0 frame containing new reference frame in the same */ /* : dimension as original, i.e., doesn't have to be multiple of 16 !!!. */ /* Return : */ /* Note : */ /* Modified : */ /* ======================================================================== */ Bool PVDecSetReference(VideoDecControls *decCtrl, uint8 *refYUV, uint32 timestamp) { VideoDecData *video = (VideoDecData *) decCtrl->videoDecoderData; Vop *prevVop = video->prevVop; int width = video->width; uint8 *dstPtr, *orgPtr, *dstPtr2, *orgPtr2; int32 size = (int32)width * video->height; /* set new parameters */ prevVop->timeStamp = timestamp; prevVop->predictionType = I_VOP; dstPtr = prevVop->yChan; orgPtr = refYUV; oscl_memcpy(dstPtr, orgPtr, size); dstPtr = prevVop->uChan; dstPtr2 = prevVop->vChan; orgPtr = refYUV + size; orgPtr2 = orgPtr + (size >> 2); oscl_memcpy(dstPtr, orgPtr, (size >> 2)); oscl_memcpy(dstPtr2, orgPtr2, (size >> 2)); video->concealFrame = video->prevVop->yChan; video->vop_coding_type = I_VOP; decCtrl->outputFrame = video->prevVop->yChan; return PV_TRUE; } /* ======================================================================== */ /* Function : PVDecSetEnhReference(VideoDecControls *decCtrl, uint8 *refYUV, */ /* int32 timestamp) */ /* Date : 07/23/2003 */ /* Purpose : Get YUV enhance reference frame from external source. */ /* In/out : YUV 4-2-0 frame containing new reference frame in the same */ /* : dimension as original, i.e., doesn't have to be multiple of 16 !!!. */ /* Return : */ /* Note : */ /* Modified : */ /* ======================================================================== */ Bool PVDecSetEnhReference(VideoDecControls *decCtrl, uint8 *refYUV, uint32 timestamp) { VideoDecData *video = (VideoDecData *) decCtrl->videoDecoderData; Vop *prevEnhcVop = video->prevEnhcVop; uint8 *dstPtr, *orgPtr, *dstPtr2, *orgPtr2; int32 size = (int32) video->width * video->height; if (video->numberOfLayers <= 1) return PV_FALSE; /* set new parameters */ prevEnhcVop->timeStamp = timestamp; prevEnhcVop->predictionType = I_VOP; dstPtr = prevEnhcVop->yChan; orgPtr = refYUV; oscl_memcpy(dstPtr, orgPtr, size); dstPtr = prevEnhcVop->uChan; dstPtr2 = prevEnhcVop->vChan; orgPtr = refYUV + size; orgPtr2 = orgPtr + (size >> 2); oscl_memcpy(dstPtr, orgPtr, (size >> 2)); oscl_memcpy(dstPtr2, orgPtr2, (size >> 2)); video->concealFrame = video->prevEnhcVop->yChan; video->vop_coding_type = I_VOP; decCtrl->outputFrame = video->prevEnhcVop->yChan; return PV_TRUE; } /* ======================================================================== */ /* Function : PVGetVolInfo() */ /* Date : 08/06/2003 */ /* Purpose : Get the vol info(only base-layer). */ /* In/out : */ /* Return : */ /* Note : */ /* Modified : 06/24/2004 */ /* ======================================================================== */ Bool PVGetVolInfo(VideoDecControls *decCtrl, VolInfo *pVolInfo) { Vol *currVol; if (pVolInfo == NULL || decCtrl == NULL || decCtrl->videoDecoderData == NULL || ((VideoDecData *)decCtrl->videoDecoderData)->vol[0] == NULL) return PV_FALSE; currVol = ((VideoDecData *)(decCtrl->videoDecoderData))->vol[0]; // get the VOL info pVolInfo->shortVideoHeader = (int32)((VideoDecData *)(decCtrl->videoDecoderData))->shortVideoHeader; pVolInfo->dataPartitioning = (int32)currVol->dataPartitioning; pVolInfo->errorResDisable = (int32)currVol->errorResDisable; pVolInfo->useReverseVLC = (int32)currVol->useReverseVLC; pVolInfo->scalability = (int32)currVol->scalability; pVolInfo->nbitsTimeIncRes = (int32)currVol->nbitsTimeIncRes; pVolInfo->profile_level_id = (int32)currVol->profile_level_id; return PV_TRUE; } ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/pvm4vdecoder.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ ////////////////////////////////////////////////////////////////////////////////// // // // File: pvm4vdecoder.cpp // // // ////////////////////////////////////////////////////////////////////////////////// #include "oscl_mem.h" #include "mp4dec_api.h" #include "pvm4vdecoder.h" #define OSCL_DISABLE_WARNING_FORCING_INT_TO_BOOL #include "osclconfig_compiler_warnings.h" ///////////////////////////////////////////////////////////////////////////// PVM4VDecoder::PVM4VDecoder() : iVideoCtrls(NULL) { } PVM4VDecoder* PVM4VDecoder::New(void) { PVM4VDecoder* self = new PVM4VDecoder; if (self) { if (!self->Construct()) { OSCL_DELETE(self); self = NULL; } } return self; } bool PVM4VDecoder::Construct() { iVideoCtrls = (VideoDecControls *) new VideoDecControls; if (iVideoCtrls) { oscl_memset(iVideoCtrls, 0, sizeof(VideoDecControls)); return true; } else { return false; } } ///////////////////////////////////////////////////////////////////////////// PVM4VDecoder::~PVM4VDecoder() { if (iVideoCtrls) { OSCL_DELETE((VideoDecControls *)iVideoCtrls); iVideoCtrls = NULL; } } ///////////////////////////////////////////////////////////////////////////// bool PVM4VDecoder::InitVideoDecoder(uint8 *volbuf[], int32 *volbuf_size, int32 nLayers, int32* iWidth, int32* iHeight, int *mode) { if (PVInitVideoDecoder((VideoDecControls *)iVideoCtrls, (uint8 **) volbuf, (int32*)volbuf_size, (int32)nLayers, *iWidth, *iHeight, (MP4DecodingMode) *mode)) { GetVideoDimensions(iWidth, iHeight); *mode = (int)PVGetDecBitstreamMode((VideoDecControls *)iVideoCtrls); return true; } else { return false; } } ///////////////////////////////////////////////////////////////////////////// bool PVM4VDecoder::GetVolInfo(VolInfo* pVolInfo) { if (!iVideoCtrls || !pVolInfo) return false; if (PVGetVolInfo((VideoDecControls *)iVideoCtrls, pVolInfo)) { return true; } else { return false; } } ///////////////////////////////////////////////////////////////////////////// void PVM4VDecoder::CleanUpVideoDecoder(void) { PVCleanUpVideoDecoder((VideoDecControls *)iVideoCtrls); } ///////////////////////////////////////////////////////////////////////////// bool PVM4VDecoder::DecodeVideoFrame(uint8 *bitstream[], uint32 *timestamp, int32 *buffer_size, uint *use_ext_timestamp, uint8 *currYUV) { return PVDecodeVideoFrame((VideoDecControls *)iVideoCtrls, (uint8 **) bitstream, (uint32*)timestamp, (int32*)buffer_size, (uint *) use_ext_timestamp, (uint8 *) currYUV) ? true : false; } ////////////////////////////////////////////////////////////////////////////// void PVM4VDecoder::SetReferenceYUV(uint8 *YUV) { PVSetReferenceYUV((VideoDecControls *)iVideoCtrls, YUV); } ///////////////////////////////////////////////////////////////////////////// void PVM4VDecoder::GetVideoDimensions(int32 *display_width, int32 *display_height) { PVGetVideoDimensions((VideoDecControls *)iVideoCtrls, display_width, display_height); } ///////////////////////////////////////////////////////////////////////////// void PVM4VDecoder::SetPostProcType(int32 mode) { PVSetPostProcType((VideoDecControls *)iVideoCtrls, mode); } ///////////////////////////////////////////////////////////////////////////// uint32 PVM4VDecoder::GetVideoTimestamp(void) { return PVGetVideoTimeStamp((VideoDecControls *)iVideoCtrls); } ///////////////////////////////////////////////////////////////////////////// bool PVM4VDecoder::IsIFrame(void) { return IsIntraFrame((VideoDecControls *)iVideoCtrls) ? true : false; } ///////////////////////////////////////////////////////////////////////////// void PVM4VDecoder::DecPostProcess(uint8 *YUV) { PVDecPostProcess((VideoDecControls *)iVideoCtrls, (uint8 *) YUV); } ///////////////////////////////////////////////////////////////////////////// uint8* PVM4VDecoder::GetDecOutputFrame(void) { PVDecPostProcess((VideoDecControls *)iVideoCtrls, NULL); return (uint8 *) PVGetDecOutputFrame((VideoDecControls *)iVideoCtrls); } ///////////////////////////////////////////////////////////////////////////// bool PVM4VDecoder::ResetVideoDecoder(void) { return PVResetVideoDecoder((VideoDecControls *)iVideoCtrls) ? true : false; } ///////////////////////////////////////////////////////////////////////////// void PVM4VDecoder::DecSetReference(uint8 *refYUV, uint32 timestamp) { PVDecSetReference((VideoDecControls *)iVideoCtrls, (uint8*)refYUV, timestamp); return ; } ///////////////////////////////////////////////////////////////////////////// void PVM4VDecoder::DecSetEnhReference(uint8 *refYUV, uint32 timestamp) { PVDecSetEnhReference((VideoDecControls *)iVideoCtrls, (uint8*)refYUV, timestamp); return ; } ///////////////////////////////////////////////////////////////////////////// uint32 PVM4VDecoder::GetDecBitrate(void) { return ((uint32)PVGetDecBitrate((VideoDecControls *)iVideoCtrls)); } ///////////////////////////////////////////////////////////////////////////// uint32 PVM4VDecoder::GetProfileAndLevel(void) { VolInfo iVolInfo; if (GetVolInfo(&iVolInfo)) { return iVolInfo.profile_level_id; } else { return 0; } } ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/pvm4vdecoder_factory.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /** * @file pvm4vdecoder_factory.cpp * @brief Singleton factory for PVM4VDecoder */ #include "oscl_base.h" #include "pvm4vdecoder.h" #include "pvm4vdecoder_factory.h" #include "oscl_error_codes.h" #include "oscl_exception.h" // Use default DLL entry point #include "oscl_dll.h" OSCL_DLL_ENTRY_POINT_DEFAULT() //////////////////////////////////////////////////////////////////////////// OSCL_EXPORT_REF PVVideoDecoderInterface* PVM4VDecoderFactory::CreatePVM4VDecoder() { PVVideoDecoderInterface* videodec = NULL; videodec = PVM4VDecoder::New(); if (videodec == NULL) { OSCL_LEAVE(OsclErrNoMemory); } return videodec; } //////////////////////////////////////////////////////////////////////////// OSCL_EXPORT_REF bool PVM4VDecoderFactory::DeletePVM4VDecoder(PVVideoDecoderInterface* aVideoDec) { if (aVideoDec) { OSCL_DELETE(aVideoDec); return true; } return false; } ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/scaling.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifdef __cplusplus extern "C" { #endif extern const int32 scale[63]; #define PV_GET_ROW(a,b) ((a)/(b)) /*---------------------------------------------------------------------------- ; SIMPLE TYPEDEF'S ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; ENUMERATED TYPEDEF'S ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; STRUCTURES TYPEDEF'S ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; GLOBAL FUNCTION DEFINITIONS ; Function Prototype declaration ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; END ----------------------------------------------------------------------------*/ #ifdef __cplusplus } #endif ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/scaling_tab.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /*---------------------------------------------------------------------------- ; INCLUDES ----------------------------------------------------------------------------*/ #include "mp4dec_api.h" #include "mp4def.h" #include "scaling.h" /*---------------------------------------------------------------------------- ; MACROS ; Define module specific macros here ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; DEFINES ; Include all pre-processor statements here. Include conditional ; compile variables also. ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; LOCAL FUNCTION DEFINITIONS ; Function Prototype declaration ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; LOCAL STORE/BUFFER/POINTER DEFINITIONS ; Variable declaration - defined here and used outside this module ----------------------------------------------------------------------------*/ /* this scaling can be used for dividing values up to 3292 07/10/01 */ const int32 scale[63] = {0, 262145, 131073, 87382, 65537, 52430, 43692, 37450, 32769, 29128, 26215, 23832, 21846, 20166, 18726, 17477, 16385, 15421, 14565, 13798, 13108, 12484, 11917, 11399, 10924, 10487, 10083, 9710, 9363, 9040, 8739, 8457, 8193, 7945, 7711, 7491, 7283, 7086, 6900, 6723, 6555, 6395, 6243, 6097, 5959, 5826, 5700, 5579, 5462, 5351, 5244, 5141, 5042, 4947, 4856, 4767, 4682, 4600, 4521, 4444, 4370, 4298, 4229 }; /*---------------------------------------------------------------------------- ; EXTERNAL FUNCTION REFERENCES ; Declare functions defined elsewhere and referenced in this module ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; EXTERNAL GLOBAL STORE/BUFFER/POINTER REFERENCES ; Declare variables used in this module but defined elsewhere ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; FUNCTION CODE ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; Define all local variables ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; Function body here ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; Return nothing or data or data pointer ----------------------------------------------------------------------------*/ ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/vlc_dec_tab.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /* * ------------------------------------------------------------------- * * MPEG-4 Simple Profile Video Decoder * * ------------------------------------------------------------------- * * * This software module was originally developed by * * Paulo Nunes (IST / ACTS-MoMuSyS) * * and edited by * * Robert Danielsen (Telenor / ACTS-MoMuSyS) * * in the course of development of the MPEG-4 Video (ISO/IEC 14496-2) standard. * This software module is an implementation of a part of one or more MPEG-4 * Video (ISO/IEC 14496-2) tools as specified by the MPEG-4 Video (ISO/IEC * 14496-2) standard. * * ISO/IEC gives users of the MPEG-4 Video (ISO/IEC 14496-2) standard free * license to this software module or modifications thereof for use in hardware * or software products claiming conformance to the MPEG-4 Video (ISO/IEC * 14496-2) standard. * * Those intending to use this software module in hardware or software products * are advised that its use may infringe existing patents. The original * developer of this software module and his/her company, the subsequent * editors and their companies, and ISO/IEC have no liability for use of this * software module or modifications thereof in an implementation. Copyright is * not released for non MPEG-4 Video (ISO/IEC 14496-2) Standard conforming * products. * * ACTS-MoMuSys partners retain full right to use the code for his/her own * purpose, assign or donate the code to a third party and to inhibit third * parties from using the code for non MPEG-4 Video (ISO/IEC 14496-2) Standard * conforming products. This copyright notice must be included in all copies or * derivative works. * * Copyright (c) 1996 * ***************************************************************************** ***********************************************************HeaderBegin******* * * File: vlc_dec_tab.h * * Author: Paulo Nunes (IST) - Paulo.Nunes@it.ist.utl.pt * Created: 1-Mar-96 * * Description: This file contains the VLC tables for module which deals * with VLC decoding. * * Notes: This file was created based on tmndecode * Written by Karl Olav Lillevold , * 1995 Telenor R&D. * Donated to the Momusys-project as background code by * Telenor. * * based on mpeg2decode, (C) 1994, MPEG Software Simulation Group * and mpeg2play, (C) 1994 Stefan Eckart * * * * Modified: 9-May-96 Paulo Nunes: Reformatted. New headers. * 14-May-96 Paulo Nunes: Changed TMNMVtabs according to VM2.1. * 04.11.96 Robert Danielsen: Added three new tables for coding * of Intra luminance coefficients (VM 4.0) * 01.05.97 Luis Ducla-Soares: added VM7.0 Reversible VLC tables (RVLC). * 13.05.97 Minhua Zhou: added VlC tables for CBPYtab2 CBPYtab3, * revised CBPYtab * ***********************************************************HeaderEnd********* This module is a header file for "vlc_decode.c". The table data actually resides in "vlc_tab.c". ------------------------------------------------------------------------------ */ /*---------------------------------------------------------------------------- ; CONTINUE ONLY IF NOT ALREADY DEFINED ----------------------------------------------------------------------------*/ #ifndef vlc_dec_tab_H #define vlc_dec_tab_H /*---------------------------------------------------------------------------- ; INCLUDES ----------------------------------------------------------------------------*/ #include "mp4def.h" /*---------------------------------------------------------------------------- ; MACROS ; Define module specific macros here ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; DEFINES ; Include all pre-processor statements here. ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; EXTERNAL VARIABLES REFERENCES ; Declare variables used in this module but defined elsewhere ----------------------------------------------------------------------------*/ #ifdef __cplusplus extern "C" { #endif extern const VLCshorttab PV_TMNMVtab0[]; extern const VLCshorttab PV_TMNMVtab1[]; extern const VLCshorttab PV_TMNMVtab2[]; extern const VLCshorttab PV_MCBPCtab[]; #ifdef PV_ANNEX_IJKT_SUPPORT extern const VLCshorttab PV_MCBPCtab1[]; #endif extern const VLCshorttab PV_MCBPCtabintra[]; /* Table for separate mode MCBPC, for coding DQUANT-flag and CBPC */ extern const VLCshorttab MCBPCtab_sep[32]; extern const VLCshorttab PV_CBPYtab[48]; extern const VLCshorttab CBPYtab2[16]; extern const VLCshorttab CBPYtab3[64]; extern const VLCtab2 PV_DCT3Dtab0[]; extern const VLCtab2 PV_DCT3Dtab1[]; extern const VLCtab2 PV_DCT3Dtab2[]; /* New tables for Intra luminance blocks */ extern const VLCtab2 PV_DCT3Dtab3[]; extern const VLCtab2 PV_DCT3Dtab4[]; extern const VLCtab2 PV_DCT3Dtab5[]; #ifdef PV_ANNEX_IJKT_SUPPORT /* Annex I tables */ extern const VLCtab2 PV_DCT3Dtab6[]; extern const VLCtab2 PV_DCT3Dtab7[]; extern const VLCtab2 PV_DCT3Dtab8[]; #endif /* RVLC tables */ extern const int ptrRvlcTab[]; extern const VLCtab2 RvlcDCTtabIntra[]; extern const VLCtab2 RvlcDCTtabInter[]; /*---------------------------------------------------------------------------- ; SIMPLE TYPEDEF'S ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; ENUMERATED TYPEDEF'S ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; STRUCTURES TYPEDEF'S ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; GLOBAL FUNCTION DEFINITIONS ; Function Prototype declaration ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; END ----------------------------------------------------------------------------*/ #endif #ifdef __cplusplus } #endif ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/vlc_decode.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /* * ------------------------------------------------------------------- * * MPEG-4 Simple Profile Video Decoder * * ------------------------------------------------------------------- * * * This software module was originally developed by * * Paulo Nunes (IST / ACTS-MoMuSyS) * Robert Danielsen (Telenor / ACTS-MoMuSyS) * * in the course of development of the MPEG-4 Video (ISO/IEC 14496-2) standard. * This software module is an implementation of a part of one or more MPEG-4 * Video (ISO/IEC 14496-2) tools as specified by the MPEG-4 Video (ISO/IEC * 14496-2) standard. * * ISO/IEC gives users of the MPEG-4 Video (ISO/IEC 14496-2) standard free * license to this software module or modifications thereof for use in hardware * or software products claiming conformance to the MPEG-4 Video (ISO/IEC * 14496-2) standard. * * Those intending to use this software module in hardware or software products * are advised that its use may infringe existing patents. The original * developer of this software module and his/her company, the subsequent * editors and their companies, and ISO/IEC have no liability for use of this * software module or modifications thereof in an implementation. Copyright is * not released for non MPEG-4 Video (ISO/IEC 14496-2) Standard conforming * products. * * ACTS-MoMuSys partners retain full right to use the code for his/her own * purpose, assign or donate the code to a third party and to inhibit third * parties from using the code for non MPEG-4 Video (ISO/IEC 14496-2) Standard * conforming products. This copyright notice must be included in all copies or * derivative works. * * Copyright (c) 1996 * *****************************************************************************/ /***********************************************************HeaderBegin******* * * File: vlc_dec.c * * Author: Paulo Nunes (IST) - Paulo.Nunes@lx.it.pt * Created: 1-Mar-96 * * Description: This file contains the VLC functions needed to decode a * bitstream. * * Notes: * The functions contained in this file were adapted from * tmndecode * Written by Karl Olav Lillevold , * 1995 Telenor R&D. * Donated to the Momusys-project as background code by * Telenor. * * based on mpeg2decode, (C) 1994, MPEG Software Simulation Group * and mpeg2play, (C) 1994 Stefan Eckart * * * * Modified: 9-May-96 Paulo Nunes: Reformatted. New headers. * 17-Jan-97 Jan De Lameillieure (HHI) : corrected in * 01.05.97 Luis Ducla-Soares: added RvlcDecTCOEF() to allow decoding * of Reversible VLCs. * 09.03.98 Paulo Nunes: Cleaning. * ***********************************************************HeaderEnd*********/ #include "mp4dec_lib.h" #include "vlc_dec_tab.h" #include "vlc_decode.h" #include "bitstream.h" #include "max_level.h" /* ====================================================================== / Function : DecodeUserData() Date : 04/10/2000 History : Modified : 04/16/2001 : removed status checking of PV_BitstreamFlushBits This is simply a realization of the user_data() function in the ISO/IEC 14496-2 manual. / ====================================================================== */ PV_STATUS DecodeUserData(BitstreamDecVideo *stream) { PV_STATUS status; uint32 code; BitstreamReadBits32HC(stream); BitstreamShowBits32(stream, 24, &code); while (code != 1) { /* Discard user data for now. 04/05/2000 */ BitstreamReadBits16(stream, 8); BitstreamShowBits32(stream, 24, &code); status = BitstreamCheckEndBuffer(stream); if (status == PV_END_OF_VOP) return status; /* 03/19/2002 */ } return PV_SUCCESS; } /***********************************************************CommentBegin****** * * 3/10/00 : initial modification to the * new PV-Decoder Lib format. * 3/29/00 : added return code check to some functions and * optimize the code. * ***********************************************************CommentEnd********/ PV_STATUS PV_GetMBvectors(VideoDecData *video, uint mode) { PV_STATUS status; BitstreamDecVideo *stream = video->bitstream; int f_code_f = video->currVop->fcodeForward; int vlc_code_mag; MOT *mot_x = video->motX; MOT *mot_y = video->motY; int k, offset; int x_pos = video->mbnum_col; int y_pos = video->mbnum_row; int doubleWidth = video->nMBPerRow << 1; int pos = (x_pos + y_pos * doubleWidth) << 1; MOT mvx = 0, mvy = 0; if (f_code_f == 1) { #ifdef PV_ANNEX_IJKT_SUPPORT if (mode == MODE_INTER4V || mode == MODE_INTER4V_Q) #else if (mode == MODE_INTER4V) #endif { for (k = 0; k < 4; k++) { offset = (k & 1) + (k >> 1) * doubleWidth; mv_prediction(video, k, &mvx, &mvy); /* decode component x */ status = PV_VlcDecMV(stream, &vlc_code_mag); if (status != PV_SUCCESS) { return status; } mvx += (MOT)vlc_code_mag; mvx = (MOT)(((mvx + 32) & 0x3F) - 32); status = PV_VlcDecMV(stream, &vlc_code_mag); if (status != PV_SUCCESS) { return status; } mvy += (MOT)vlc_code_mag; mvy = (MOT)(((mvy + 32) & 0x3F) - 32); mot_x[pos+offset] = (MOT) mvx; mot_y[pos+offset] = (MOT) mvy; } } else { mv_prediction(video, 0, &mvx, &mvy); /* For PVOPs, field appears only in MODE_INTER & MODE_INTER_Q */ status = PV_VlcDecMV(stream, &vlc_code_mag); if (status != PV_SUCCESS) { return status; } mvx += (MOT)vlc_code_mag; mvx = (MOT)(((mvx + 32) & 0x3F) - 32); status = PV_VlcDecMV(stream, &vlc_code_mag); if (status != PV_SUCCESS) { return status; } mvy += (MOT)vlc_code_mag; mvy = (MOT)(((mvy + 32) & 0x3F) - 32); mot_x[pos] = mot_x[pos+1] = (MOT) mvx; mot_y[pos] = mot_y[pos+1] = (MOT) mvy; pos += doubleWidth; mot_x[pos] = mot_x[pos+1] = (MOT) mvx; mot_y[pos] = mot_y[pos+1] = (MOT) mvy; } } else { #ifdef PV_ANNEX_IJKT_SUPPORT if (mode == MODE_INTER4V || mode == MODE_INTER4V_Q) #else if (mode == MODE_INTER4V) #endif { for (k = 0; k < 4; k++) { offset = (k & 1) + (k >> 1) * doubleWidth; mv_prediction(video, k, &mvx, &mvy); status = PV_DecodeMBVec(stream, &mvx, &mvy, f_code_f); mot_x[pos+offset] = (MOT) mvx; mot_y[pos+offset] = (MOT) mvy; if (status != PV_SUCCESS) { return status; } } } else { mv_prediction(video, 0, &mvx, &mvy); /* For PVOPs, field appears only in MODE_INTER & MODE_INTER_Q */ status = PV_DecodeMBVec(stream, &mvx, &mvy, f_code_f); mot_x[pos] = mot_x[pos+1] = (MOT) mvx; mot_y[pos] = mot_y[pos+1] = (MOT) mvy; pos += doubleWidth; mot_x[pos] = mot_x[pos+1] = (MOT) mvx; mot_y[pos] = mot_y[pos+1] = (MOT) mvy; if (status != PV_SUCCESS) { return status; } } } return PV_SUCCESS; } /***********************************************************CommentBegin****** * 3/10/00 : initial modification to the * new PV-Decoder Lib format. * 3/29/00 : added return code check to some functions * 5/10/00 : check whether the decoded vector is legal. * 4/17/01 : use MOT type ***********************************************************CommentEnd********/ PV_STATUS PV_DecodeMBVec(BitstreamDecVideo *stream, MOT *mv_x, MOT *mv_y, int f_code_f) { PV_STATUS status; int vlc_code_magx, vlc_code_magy; int residualx = 0, residualy = 0; /* decode component x */ status = PV_VlcDecMV(stream, &vlc_code_magx); if (status != PV_SUCCESS) { return status; } if (vlc_code_magx) { residualx = (int) BitstreamReadBits16_INLINE(stream, (int)(f_code_f - 1)); } /* decode component y */ status = PV_VlcDecMV(stream, &vlc_code_magy); if (status != PV_SUCCESS) { return status; } if (vlc_code_magy) { residualy = (int) BitstreamReadBits16_INLINE(stream, (int)(f_code_f - 1)); } if (PV_DeScaleMVD(f_code_f, residualx, vlc_code_magx, mv_x) != PV_SUCCESS) { return PV_FAIL; } if (PV_DeScaleMVD(f_code_f, residualy, vlc_code_magy, mv_y) != PV_SUCCESS) { return PV_FAIL; } return PV_SUCCESS; } /***********************************************************CommentBegin****** * 3/31/2000 : initial modification to the new PV-Decoder Lib format. * 5/10/2000 : check to see if the decoded vector falls within * the legal fcode range. * ***********************************************************CommentEnd********/ PV_STATUS PV_DeScaleMVD( int f_code, /* <-- MV range in 1/2 units: 1=32,2=64,...,7=2048 */ int residual, /* <-- part of the MV Diff. FLC coded */ int vlc_code_mag, /* <-- part of the MV Diff. VLC coded */ MOT *vector /* --> Obtained MV component in 1/2 units */ ) { int half_range = (1 << (f_code + 4)); int mask = (half_range << 1) - 1; int diff_vector; if (vlc_code_mag == 0) { diff_vector = vlc_code_mag; } else { diff_vector = ((PV_ABS(vlc_code_mag) - 1) << (f_code - 1)) + residual + 1; if (vlc_code_mag < 0) { diff_vector = -diff_vector; } } *vector += (MOT)(diff_vector); *vector = (MOT)((*vector + half_range) & mask) - half_range; return PV_SUCCESS; } void mv_prediction( VideoDecData *video, int block, MOT *mvx, MOT *mvy ) { /*---------------------------------------------------------------------------- ; Define all local variables ----------------------------------------------------------------------------*/ MOT *motxdata = video->motX; MOT *motydata = video->motY; int mbnum_col = video->mbnum_col; int mbnum_row = video->mbnum_row; uint8 *slice_nb = video->sliceNo; int nMBPerRow = video->nMBPerRow; int nMVPerRow = nMBPerRow << 1; int mbnum = video->mbnum; int p1x = 0, p2x = 0, p3x = 0; int p1y = 0, p2y = 0, p3y = 0; int rule1 = 0, rule2 = 0, rule3 = 0; int indx; indx = ((mbnum_col << 1) + (block & 1)) + ((mbnum_row << 1) + (block >> 1)) * nMVPerRow - 1; /* left block */ if (block & 1) /* block 1, 3 */ { p1x = motxdata[indx]; p1y = motydata[indx]; rule1 = 1; } else /* block 0, 2 */ { if (mbnum_col > 0 && slice_nb[mbnum] == slice_nb[mbnum-1]) { p1x = motxdata[indx]; p1y = motydata[indx]; rule1 = 1; } } indx = indx + 1 - nMVPerRow; /* upper_block */ if (block >> 1) { indx -= (block & 1); p2x = motxdata[indx]; p2y = motydata[indx]; p3x = motxdata[indx + 1]; p3y = motydata[indx + 1]; rule2 = rule3 = 1; } else { /* block 0,1 */ if (mbnum_row) { if (slice_nb[mbnum] == slice_nb[mbnum-nMBPerRow]) { p2x = motxdata[indx]; p2y = motydata[indx]; rule2 = 1; } if (mbnum_col < nMBPerRow - 1 && slice_nb[mbnum] == slice_nb[mbnum-nMBPerRow+1]) { indx = indx + 2 - (block & 1); p3x = motxdata[indx]; p3y = motydata[indx]; rule3 = 1; } } } if (rule1 + rule2 + rule3 > 1) { *mvx = (MOT)PV_MEDIAN(p1x, p2x, p3x); *mvy = (MOT)PV_MEDIAN(p1y, p2y, p3y); } else if (rule1 + rule2 + rule3 == 1) { /* two of three are zero */ *mvx = (MOT)(p1x + p2x + p3x); *mvy = (MOT)(p1y + p2y + p3y); } else { /* all MBs are outside the VOP */ *mvx = *mvy = 0; } /*---------------------------------------------------------------------------- ; Return nothing or data or data pointer ----------------------------------------------------------------------------*/ return; } /***********************************************************CommentBegin****** * * 3/30/2000 : initial modification to the new PV-Decoder Lib format. * 4/16/2001 : removed checking of status for PV_BitstreamFlushBits ***********************************************************CommentEnd********/ PV_STATUS PV_VlcDecMV(BitstreamDecVideo *stream, int *mv) { PV_STATUS status = PV_SUCCESS; uint code; BitstreamShow13Bits(stream, &code); if (code >> 12) { *mv = 0; /* Vector difference = 0 */ PV_BitstreamFlushBits(stream, 1); return PV_SUCCESS; } if (code >= 512) { code = (code >> 8) - 2; PV_BitstreamFlushBits(stream, PV_TMNMVtab0[code].len + 1); *mv = PV_TMNMVtab0[code].val; return status; } if (code >= 128) { code = (code >> 2) - 32; PV_BitstreamFlushBits(stream, PV_TMNMVtab1[code].len + 1); *mv = PV_TMNMVtab1[code].val; return status; } if (code < 4) { *mv = -1; return PV_FAIL; } code -= 4; PV_BitstreamFlushBits(stream, PV_TMNMVtab2[code].len + 1); *mv = PV_TMNMVtab2[code].val; return status; } /***********************************************************CommentBegin****** * 3/30/2000 : initial modification to the new PV-Decoder Lib * format and the change of error-handling method. * 4/16/01 : removed status checking of PV_BitstreamFlushBits ***********************************************************CommentEnd********/ int PV_VlcDecMCBPC_com_intra(BitstreamDecVideo *stream) { uint code; BitstreamShowBits16(stream, 9, &code); if (code < 8) { return VLC_CODE_ERROR; } code >>= 3; if (code >= 32) { PV_BitstreamFlushBits(stream, 1); return 3; } PV_BitstreamFlushBits(stream, PV_MCBPCtabintra[code].len); return PV_MCBPCtabintra[code].val; } /***********************************************************CommentBegin****** * * 3/30/2000 : initial modification to the new PV-Decoder Lib * format and the change of error-handling method. * 4/16/2001 : removed checking of return status of PV_BitstreamFlushBits ***********************************************************CommentEnd********/ int PV_VlcDecMCBPC_com_inter(BitstreamDecVideo *stream) { uint code; BitstreamShowBits16(stream, 9, &code); if (code == 0) { return VLC_CODE_ERROR; } else if (code >= 256) { PV_BitstreamFlushBits(stream, 1); return 0; } PV_BitstreamFlushBits(stream, PV_MCBPCtab[code].len); return PV_MCBPCtab[code].val; } #ifdef PV_ANNEX_IJKT_SUPPORT int PV_VlcDecMCBPC_com_inter_H263(BitstreamDecVideo *stream) { uint code; BitstreamShow13Bits(stream, &code); if (code == 0) { return VLC_CODE_ERROR; } else if (code >= 4096) { PV_BitstreamFlushBits(stream, 1); return 0; } if (code >= 16) { PV_BitstreamFlushBits(stream, PV_MCBPCtab[code >> 4].len); return PV_MCBPCtab[code >> 4].val; } else { PV_BitstreamFlushBits(stream, PV_MCBPCtab1[code - 8].len); return PV_MCBPCtab1[code - 8].val; } } #endif /***********************************************************CommentBegin****** * 3/30/2000 : initial modification to the new PV-Decoder Lib * format and the change of error-handling method. * 4/16/2001 : removed status checking for PV_BitstreamFlushBits ***********************************************************CommentEnd********/ int PV_VlcDecCBPY(BitstreamDecVideo *stream, int intra) { int CBPY = 0; uint code; BitstreamShowBits16(stream, 6, &code); if (code < 2) { return -1; } else if (code >= 48) { PV_BitstreamFlushBits(stream, 2); CBPY = 15; } else { PV_BitstreamFlushBits(stream, PV_CBPYtab[code].len); CBPY = PV_CBPYtab[code].val; } if (intra == 0) CBPY = 15 - CBPY; CBPY = CBPY & 15; return CBPY; } /***********************************************************CommentBegin****** * 3/31/2000 : initial modification to the new PV-Decoder Lib format. * * 8/23/2000 : optimize the function by removing unnecessary BitstreamShowBits() * function calls. * * 9/6/2000 : change the API to check for end-of-buffer for proper * termination of decoding process. ***********************************************************CommentEnd********/ PV_STATUS PV_VlcDecIntraDCPredSize(BitstreamDecVideo *stream, int compnum, uint *DC_size) { PV_STATUS status = PV_FAIL; /* 07/09/01 */ uint code; *DC_size = 0; if (compnum < 4) /* luminance block */ { BitstreamShowBits16(stream, 11, &code); if (code == 1) { *DC_size = 12; PV_BitstreamFlushBits(stream, 11); return PV_SUCCESS; } code >>= 1; if (code == 1) { *DC_size = 11; PV_BitstreamFlushBits(stream, 10); return PV_SUCCESS; } code >>= 1; if (code == 1) { *DC_size = 10; PV_BitstreamFlushBits(stream, 9); return PV_SUCCESS; } code >>= 1; if (code == 1) { *DC_size = 9; PV_BitstreamFlushBits(stream, 8); return PV_SUCCESS; } code >>= 1; if (code == 1) { *DC_size = 8; PV_BitstreamFlushBits(stream, 7); return PV_SUCCESS; } code >>= 1; if (code == 1) { *DC_size = 7; PV_BitstreamFlushBits(stream, 6); return PV_SUCCESS; } code >>= 1; if (code == 1) { *DC_size = 6; PV_BitstreamFlushBits(stream, 5); return PV_SUCCESS; } code >>= 1; if (code == 1) { *DC_size = 5; PV_BitstreamFlushBits(stream, 4); return PV_SUCCESS; } code >>= 1; if (code == 1) { *DC_size = 4; PV_BitstreamFlushBits(stream, 3); return PV_SUCCESS; } else if (code == 2) { *DC_size = 3; PV_BitstreamFlushBits(stream, 3); return PV_SUCCESS; } else if (code == 3) { *DC_size = 0; PV_BitstreamFlushBits(stream, 3); return PV_SUCCESS; } code >>= 1; if (code == 2) { *DC_size = 2; PV_BitstreamFlushBits(stream, 2); return PV_SUCCESS; } else if (code == 3) { *DC_size = 1; PV_BitstreamFlushBits(stream, 2); return PV_SUCCESS; } } else /* chrominance block */ { BitstreamShow13Bits(stream, &code); code >>= 1; if (code == 1) { *DC_size = 12; PV_BitstreamFlushBits(stream, 12); return PV_SUCCESS; } code >>= 1; if (code == 1) { *DC_size = 11; PV_BitstreamFlushBits(stream, 11); return PV_SUCCESS; } code >>= 1; if (code == 1) { *DC_size = 10; PV_BitstreamFlushBits(stream, 10); return PV_SUCCESS; } code >>= 1; if (code == 1) { *DC_size = 9; PV_BitstreamFlushBits(stream, 9); return PV_SUCCESS; } code >>= 1; if (code == 1) { *DC_size = 8; PV_BitstreamFlushBits(stream, 8); return PV_SUCCESS; } code >>= 1; if (code == 1) { *DC_size = 7; PV_BitstreamFlushBits(stream, 7); return PV_SUCCESS; } code >>= 1; if (code == 1) { *DC_size = 6; PV_BitstreamFlushBits(stream, 6); return PV_SUCCESS; } code >>= 1; if (code == 1) { *DC_size = 5; PV_BitstreamFlushBits(stream, 5); return PV_SUCCESS; } code >>= 1; if (code == 1) { *DC_size = 4; PV_BitstreamFlushBits(stream, 4); return PV_SUCCESS; } code >>= 1; if (code == 1) { *DC_size = 3; PV_BitstreamFlushBits(stream, 3); return PV_SUCCESS; } code >>= 1; { *DC_size = (int)(3 - code); PV_BitstreamFlushBits(stream, 2); return PV_SUCCESS; } } return status; } /***********************************************************CommentBegin****** * * * 3/30/2000 : initial modification to the new PV-Decoder Lib * format and the change of error-handling method. * ***********************************************************CommentEnd********/ PV_STATUS VlcDecTCOEFIntra(BitstreamDecVideo *stream, Tcoef *pTcoef) { uint code; const VLCtab2 *tab; BitstreamShow13Bits(stream, &code); /* 10/17/2000, perform a little bit better on ARM by putting the whole function in VlcDecTCOEFFIntra */ /* if(GetTcoeffIntra(code,pTcoef,&tab,stream)!=PV_SUCCESS) return status;*/ if (code >= 1024) { tab = &PV_DCT3Dtab3[(code >> 6) - 16]; } else { if (code >= 256) { tab = &PV_DCT3Dtab4[(code >> 3) - 32]; } else { if (code >= 16) { tab = &PV_DCT3Dtab5[(code>>1) - 8]; } else { return PV_FAIL; } } } PV_BitstreamFlushBits(stream, tab->len + 1); pTcoef->sign = (code >> (12 - tab->len)) & 1; pTcoef->run = (uint) tab->run; //(tab->val >> 8) & 255; pTcoef->level = (int) tab->level; //tab->val & 255; pTcoef->last = (uint) tab->last; //(tab->val >> 16) & 1; /* the following is modified for 3-mode escape -- boon */ if (tab->level != 0xFF) { return PV_SUCCESS; } //if (((tab->run<<8)|(tab->level)|(tab->last<<16)) == VLC_ESCAPE_CODE) if (!pTcoef->sign) { /* first escape mode. level is offset */ BitstreamShow13Bits(stream, &code); /* 10/17/2000, perform a little bit better on ARM by putting the whole function in VlcDecTCOEFFIntra */ /* if(GetTcoeffIntra(code,pTcoef,&tab,stream)!=PV_SUCCESS) return status;*/ if (code >= 1024) { tab = &PV_DCT3Dtab3[(code >> 6) - 16]; } else { if (code >= 256) { tab = &PV_DCT3Dtab4[(code >> 3) - 32]; } else { if (code >= 16) { tab = &PV_DCT3Dtab5[(code>>1) - 8]; } else { return PV_FAIL; } } } PV_BitstreamFlushBits(stream, tab->len + 1); /* sign bit */ pTcoef->sign = (code >> (12 - tab->len)) & 1; pTcoef->run = (uint)tab->run; //(tab->val >> 8) & 255; pTcoef->level = (int)tab->level; //tab->val & 255; pTcoef->last = (uint)tab->last; //(tab->val >> 16) & 1; /* need to add back the max level */ if ((pTcoef->last == 0 && pTcoef->run > 14) || (pTcoef->last == 1 && pTcoef->run > 20)) { return PV_FAIL; } pTcoef->level = pTcoef->level + intra_max_level[pTcoef->last][pTcoef->run]; } else { uint run_offset; run_offset = BitstreamRead1Bits_INLINE(stream); if (!run_offset) { /* second escape mode. run is offset */ BitstreamShow13Bits(stream, &code); /* 10/17/2000, perform a little bit better on ARM by putting the whole function in VlcDecTCOEFFIntra */ /* if(GetTcoeffIntra(code,pTcoef,&tab,stream)!=PV_SUCCESS) return status;*/ if (code >= 1024) { tab = &PV_DCT3Dtab3[(code >> 6) - 16]; } else { if (code >= 256) { tab = &PV_DCT3Dtab4[(code >> 3) - 32]; } else { if (code >= 16) { tab = &PV_DCT3Dtab5[(code>>1) - 8]; } else { return PV_FAIL; } } } PV_BitstreamFlushBits(stream, tab->len + 1); /* sign bit */ pTcoef->sign = (code >> (12 - tab->len)) & 1; pTcoef->run = (uint)tab->run; //(tab->val >> 8) & 255; pTcoef->level = (int)tab->level; //tab->val & 255; pTcoef->last = (uint)tab->last; //(tab->val >> 16) & 1; /* need to add back the max run */ if (pTcoef->last) { if (pTcoef->level > 8) { return PV_FAIL; } pTcoef->run = pTcoef->run + intra_max_run1[pTcoef->level] + 1; } else { if (pTcoef->level > 27) { return PV_FAIL; } pTcoef->run = pTcoef->run + intra_max_run0[pTcoef->level] + 1; } } else { code = BitstreamReadBits16_INLINE(stream, 8); pTcoef->last = code >> 7; pTcoef->run = (code >> 1) & 0x3F; pTcoef->level = (int)(BitstreamReadBits16_INLINE(stream, 13) >> 1); if (pTcoef->level >= 2048) { pTcoef->sign = 1; pTcoef->level = 4096 - pTcoef->level; } else { pTcoef->sign = 0; } } /* flc */ } return PV_SUCCESS; } /* VlcDecTCOEFIntra */ PV_STATUS VlcDecTCOEFInter(BitstreamDecVideo *stream, Tcoef *pTcoef) { uint code; const VLCtab2 *tab; BitstreamShow13Bits(stream, &code); /* 10/17/2000, perform a little bit better on ARM by putting the whole function in VlcDecTCOEFFInter */ /* if(GetTcoeffInter(code,pTcoef,&tab,stream)!=PV_SUCCESS) return status;*/ if (code >= 1024) { tab = &PV_DCT3Dtab0[(code >> 6) - 16]; } else { if (code >= 256) { tab = &PV_DCT3Dtab1[(code >> 3) - 32]; } else { if (code >= 16) { tab = &PV_DCT3Dtab2[(code>>1) - 8]; } else { return PV_FAIL; } } } PV_BitstreamFlushBits(stream, tab->len + 1); pTcoef->sign = (code >> (12 - tab->len)) & 1; pTcoef->run = (uint)tab->run; //(tab->val >> 4) & 255; pTcoef->level = (int)tab->level; //tab->val & 15; pTcoef->last = (uint)tab->last; //(tab->val >> 12) & 1; /* the following is modified for 3-mode escape -- boon */ if (tab->run != 0xBF) { return PV_SUCCESS; } //if (((tab->run<<4)|(tab->level)|(tab->last<<12)) == VLC_ESCAPE_CODE) if (!pTcoef->sign) { /* first escape mode. level is offset */ BitstreamShow13Bits(stream, &code); /* 10/17/2000, perform a little bit better on ARM by putting the whole function in VlcDecTCOEFFInter */ /* if(GetTcoeffInter(code,pTcoef,&tab,stream)!=PV_SUCCESS) return status;*/ if (code >= 1024) { tab = &PV_DCT3Dtab0[(code >> 6) - 16]; } else { if (code >= 256) { tab = &PV_DCT3Dtab1[(code >> 3) - 32]; } else { if (code >= 16) { tab = &PV_DCT3Dtab2[(code>>1) - 8]; } else { return PV_FAIL; } } } PV_BitstreamFlushBits(stream, tab->len + 1); pTcoef->sign = (code >> (12 - tab->len)) & 1; pTcoef->run = (uint)tab->run; //(tab->val >> 4) & 255; pTcoef->level = (int)tab->level; //tab->val & 15; pTcoef->last = (uint)tab->last; //(tab->val >> 12) & 1; /* need to add back the max level */ if ((pTcoef->last == 0 && pTcoef->run > 26) || (pTcoef->last == 1 && pTcoef->run > 40)) { return PV_FAIL; } pTcoef->level = pTcoef->level + inter_max_level[pTcoef->last][pTcoef->run]; } else { uint run_offset; run_offset = BitstreamRead1Bits_INLINE(stream); if (!run_offset) { /* second escape mode. run is offset */ BitstreamShow13Bits(stream, &code); /* 10/17/2000, perform a little bit better on ARM by putting the whole function in VlcDecTCOEFFInter */ /*if(GetTcoeffInter(code,pTcoef,&tab,stream)!=PV_SUCCESS) return status;*/ if (code >= 1024) { tab = &PV_DCT3Dtab0[(code >> 6) - 16]; } else { if (code >= 256) { tab = &PV_DCT3Dtab1[(code >> 3) - 32]; } else { if (code >= 16) { tab = &PV_DCT3Dtab2[(code>>1) - 8]; } else { return PV_FAIL; } } } PV_BitstreamFlushBits(stream, tab->len + 1); pTcoef->sign = (code >> (12 - tab->len)) & 1; pTcoef->run = (uint)tab->run; //(tab->val >> 4) & 255; pTcoef->level = (int)tab->level; //tab->val & 15; pTcoef->last = (uint)tab->last; //(tab->val >> 12) & 1; /* need to add back the max run */ if (pTcoef->last) { if (pTcoef->level > 3) { return PV_FAIL; } pTcoef->run = pTcoef->run + inter_max_run1[pTcoef->level] + 1; } else { if (pTcoef->level > 12) { return PV_FAIL; } pTcoef->run = pTcoef->run + inter_max_run0[pTcoef->level] + 1; } } else { code = BitstreamReadBits16_INLINE(stream, 8); pTcoef->last = code >> 7; pTcoef->run = (code >> 1) & 0x3F; pTcoef->level = (int)(BitstreamReadBits16_INLINE(stream, 13) >> 1); if (pTcoef->level >= 2048) { pTcoef->sign = 1; pTcoef->level = 4096 - pTcoef->level; } else { pTcoef->sign = 0; } } /* flc */ } return PV_SUCCESS; } /* VlcDecTCOEFInter */ /*======================================================= Function: VlcDecTCOEFShortHeader() Date : 04/27/99 Purpose : New function used in decoding of video planes with short header Modified: 05/23/2000 for new decoder structure. =========================================================*/ PV_STATUS VlcDecTCOEFShortHeader(BitstreamDecVideo *stream, Tcoef *pTcoef/*, int intra*/) { uint code; const VLCtab2 *tab; BitstreamShow13Bits(stream, &code); /*intra = 0;*/ if (code >= 1024) tab = &PV_DCT3Dtab0[(code >> 6) - 16]; else { if (code >= 256) tab = &PV_DCT3Dtab1[(code >> 3) - 32]; else { if (code >= 16) tab = &PV_DCT3Dtab2[(code>>1) - 8]; else return PV_FAIL; } } PV_BitstreamFlushBits(stream, tab->len + 1); pTcoef->sign = (code >> (12 - tab->len)) & 1; pTcoef->run = (uint)tab->run;//(tab->val >> 4) & 255; pTcoef->level = (int)tab->level;//tab->val & 15; pTcoef->last = (uint)tab->last;//(tab->val >> 12) & 1; /* the following is modified for 3-mode escape -- boon */ if (((tab->run << 4) | (tab->level) | (tab->last << 12)) != VLC_ESCAPE_CODE) /* ESCAPE */ { return PV_SUCCESS; } /* escape mode 4 - H.263 type */ pTcoef->last = pTcoef->sign; /* Last */ pTcoef->run = BitstreamReadBits16_INLINE(stream, 6); /* Run */ pTcoef->level = (int) BitstreamReadBits16_INLINE(stream, 8); /* Level */ if (pTcoef->level == 0 || pTcoef->level == 128) { return PV_FAIL; } if (pTcoef->level > 128) { pTcoef->sign = 1; pTcoef->level = 256 - pTcoef->level; } else { pTcoef->sign = 0; } return PV_SUCCESS; } /* VlcDecTCOEFShortHeader */ #ifdef PV_ANNEX_IJKT_SUPPORT PV_STATUS VlcDecTCOEFShortHeader_AnnexI(BitstreamDecVideo *stream, Tcoef *pTcoef/*, int intra*/) { uint code; const VLCtab2 *tab; BitstreamShow13Bits(stream, &code); /*intra = 0;*/ if (code >= 1024) tab = &PV_DCT3Dtab6[(code >> 6) - 16]; else { if (code >= 256) tab = &PV_DCT3Dtab7[(code >> 3) - 32]; else { if (code >= 16) tab = &PV_DCT3Dtab8[(code>>1) - 8]; else return PV_FAIL; } } PV_BitstreamFlushBits(stream, tab->len + 1); pTcoef->sign = (code >> (12 - tab->len)) & 1; pTcoef->run = (uint)tab->run;//(tab->val >> 4) & 255; pTcoef->level = (int)tab->level;//tab->val & 15; pTcoef->last = (uint)tab->last;//(tab->val >> 12) & 1; /* the following is modified for 3-mode escape -- boon */ if (((tab->run << 6) | (tab->level) | (tab->last << 12)) != VLC_ESCAPE_CODE) /* ESCAPE */ { return PV_SUCCESS; } /* escape mode 4 - H.263 type */ pTcoef->last = pTcoef->sign; /* Last */ pTcoef->run = BitstreamReadBits16(stream, 6); /* Run */ pTcoef->level = (int) BitstreamReadBits16(stream, 8); /* Level */ if (pTcoef->level == 0 || pTcoef->level == 128) { return PV_FAIL; } if (pTcoef->level > 128) { pTcoef->sign = 1; pTcoef->level = 256 - pTcoef->level; } else pTcoef->sign = 0; return PV_SUCCESS; } /* VlcDecTCOEFShortHeader_AnnexI */ PV_STATUS VlcDecTCOEFShortHeader_AnnexT(BitstreamDecVideo *stream, Tcoef *pTcoef/*, int intra*/) { uint code; const VLCtab2 *tab; BitstreamShow13Bits(stream, &code); /*intra = 0;*/ if (code >= 1024) tab = &PV_DCT3Dtab0[(code >> 6) - 16]; else { if (code >= 256) tab = &PV_DCT3Dtab1[(code >> 3) - 32]; else { if (code >= 16) tab = &PV_DCT3Dtab2[(code>>1) - 8]; else return PV_FAIL; } } PV_BitstreamFlushBits(stream, tab->len + 1); pTcoef->sign = (code >> (12 - tab->len)) & 1; pTcoef->run = (uint)tab->run;//(tab->val >> 4) & 255; pTcoef->level = (int)tab->level;//tab->val & 15; pTcoef->last = (uint)tab->last;//(tab->val >> 12) & 1; /* the following is modified for 3-mode escape -- */ if (((tab->run << 4) | (tab->level) | (tab->last << 12)) != VLC_ESCAPE_CODE) /* ESCAPE */ { return PV_SUCCESS; } /* escape mode 4 - H.263 type */ pTcoef->last = pTcoef->sign; /* Last */ pTcoef->run = BitstreamReadBits16(stream, 6); /* Run */ pTcoef->level = (int) BitstreamReadBits16(stream, 8); /* Level */ if (pTcoef->level == 0) { return PV_FAIL; } if (pTcoef->level >= 128) { pTcoef->sign = 1; pTcoef->level = 256 - pTcoef->level; } else { pTcoef->sign = 0; } if (pTcoef->level == 128) { code = BitstreamReadBits16(stream, 11); /* ANNEX_T */ code = (code >> 6 & 0x1F) | (code << 5 & 0x7ff); if (code > 1024) { pTcoef->sign = 1; pTcoef->level = (2048 - code); } else { pTcoef->sign = 0; pTcoef->level = code; } } return PV_SUCCESS; } /* VlcDecTCOEFShortHeader */ PV_STATUS VlcDecTCOEFShortHeader_AnnexIT(BitstreamDecVideo *stream, Tcoef *pTcoef/*, int intra*/) { uint code; const VLCtab2 *tab; BitstreamShow13Bits(stream, &code); /*intra = 0;*/ if (code >= 1024) tab = &PV_DCT3Dtab6[(code >> 6) - 16]; else { if (code >= 256) tab = &PV_DCT3Dtab7[(code >> 3) - 32]; else { if (code >= 16) tab = &PV_DCT3Dtab8[(code>>1) - 8]; else return PV_FAIL; } } PV_BitstreamFlushBits(stream, tab->len + 1); pTcoef->sign = (code >> (12 - tab->len)) & 1; pTcoef->run = (uint)tab->run;//(tab->val >> 4) & 255; pTcoef->level = (int)tab->level;//tab->val & 15; pTcoef->last = (uint)tab->last;//(tab->val >> 12) & 1; /* the following is modified for 3-mode escape -- */ if (((tab->run << 6) | (tab->level) | (tab->last << 12)) != VLC_ESCAPE_CODE) /* ESCAPE */ { return PV_SUCCESS; } /* escape mode 4 - H.263 type */ pTcoef->last = pTcoef->sign; /* Last */ pTcoef->run = BitstreamReadBits16(stream, 6); /* Run */ pTcoef->level = (int) BitstreamReadBits16(stream, 8); /* Level */ if (pTcoef->level == 0) { return PV_FAIL; } if (pTcoef->level >= 128) { pTcoef->sign = 1; pTcoef->level = 256 - pTcoef->level; } else { pTcoef->sign = 0; } if (pTcoef->level == 128) { code = BitstreamReadBits16(stream, 11); /* ANNEX_T */ code = (code >> 6 & 0x1F) | (code << 5 & 0x7ff); if (code > 1024) { pTcoef->sign = 1; pTcoef->level = (2048 - code); } else { pTcoef->sign = 0; pTcoef->level = code; } } return PV_SUCCESS; } /* VlcDecTCOEFShortHeader_AnnexI */ #endif /***********************************************************CommentBegin****** * 3/30/2000 : initial modification to the new PV-Decoder Lib * format and the change of error-handling method. * The coefficient is now returned thru a pre- * initialized parameters for speedup. * ***********************************************************CommentEnd********/ PV_STATUS RvlcDecTCOEFInter(BitstreamDecVideo *stream, Tcoef *pTcoef) { uint code, mask; const VLCtab2 *tab2; int count, len, num[2] = {0, 0} /* 01/30/01 */; mask = 0x4000; /* mask 100000000000000 */ BitstreamShow15Bits(stream, &code); /* 03/07/01 */ len = 1; // 09/20/99 Escape mode /// Bitstream Exchange if (code < 2048) { PV_BitstreamFlushBits(stream, 5); pTcoef->last = BitstreamRead1Bits_INLINE(stream); pTcoef->run = BitstreamReadBits16_INLINE(stream, 6); // 09/20/99 New marker bit PV_BitstreamFlushBits(stream, 1); // 09/20/99 The length for LEVEL used to be 7 in the old version pTcoef->level = (int)(BitstreamReadBits16_INLINE(stream, 12) >> 1); // 09/20/99 Another new marker bit // PV_BitstreamFlushBitsCheck(stream, 1); pTcoef->sign = BitstreamReadBits16_INLINE(stream, 5) & 0x1; /* fix 3/13/01 */ return PV_SUCCESS; } if (code & mask) { count = 1; while (mask && count > 0) /* fix 3/28/01 */ { mask = mask >> 1; if (code & mask) count--; else num[0]++; /* number of zeros in the middle */ len++; } } else { count = 2; while (mask && count > 0) /* fix 3/28/01 */ { mask = mask >> 1; if (!(code & mask)) count--; else num[count-1]++; /* number of ones in the middle */ len++; } } code = code & 0x7fff; code = code >> (15 - (len + 1)); /* 1/30/01, add fast decoding algorithm here */ /* code is in two forms : 0xxxx0xxx00 or 0xxx0xxx01 num[1] and num[0] x or : 1xxxxx10 or 1xxxxx11 num[0] x */ /* len+1 is the length of the above */ if (num[1] > 10 || num[0] > 11) /* invalid RVLC code */ return PV_FAIL; if (code&(1 << len)) tab2 = RvlcDCTtabInter + 146 + (num[0] << 1) + (code & 1); else tab2 = RvlcDCTtabInter + ptrRvlcTab[num[1]] + (num[0] << 1) + (code & 1); PV_BitstreamFlushBits(stream, (int) tab2->len); pTcoef->run = (uint)tab2->run;//(tab->val >> 8) & 255; pTcoef->level = (int)tab2->level;//tab->val & 255; pTcoef->last = (uint)tab2->last;//(tab->val >> 16) & 1; pTcoef->sign = BitstreamRead1Bits_INLINE(stream); return PV_SUCCESS; } /* RvlcDecTCOEFInter */ PV_STATUS RvlcDecTCOEFIntra(BitstreamDecVideo *stream, Tcoef *pTcoef) { uint code, mask; const VLCtab2 *tab2; int count, len, num[2] = {0, 0} /* 01/30/01 */; mask = 0x4000; /* mask 100000000000000 */ BitstreamShow15Bits(stream, &code); len = 1; // 09/20/99 Escape mode /// Bitstream Exchange if (code < 2048) { PV_BitstreamFlushBits(stream, 5); pTcoef->last = BitstreamRead1Bits_INLINE(stream); pTcoef->run = BitstreamReadBits16_INLINE(stream, 6); // 09/20/99 New marker bit PV_BitstreamFlushBits(stream, 1); // 09/20/99 The length for LEVEL used to be 7 in the old version pTcoef->level = (int)(BitstreamReadBits16_INLINE(stream, 12) >> 1); // 09/20/99 Another new marker bit // PV_BitstreamFlushBitsCheck(stream, 1); pTcoef->sign = BitstreamReadBits16_INLINE(stream, 5) & 0x1; /* fix 03/13/01 */ return PV_SUCCESS; } if (code & mask) { count = 1; while (mask && count > 0) /* fix 03/28/01 */ { mask = mask >> 1; if (code & mask) count--; else num[0]++; /* number of zeros in the middle */ len++; } } else { count = 2; while (mask && count > 0) /* fix 03/28/01 */ { mask = mask >> 1; if (!(code & mask)) count--; else num[count-1]++; /* number of ones in the middle */ len++; } } code = code & 0x7fff; code = code >> (15 - (len + 1)); /* 1/30/01, add fast decoding algorithm here */ /* code is in two forms : 0xxxx0xxx00 or 0xxx0xxx01 num[1] and num[0] x or : 1xxxxx10 or 1xxxxx11 num[0] x */ /* len+1 is the length of the above */ if (num[1] > 10 || num[0] > 11) /* invalid RVLC code */ return PV_FAIL; if (code & (1 << len)) tab2 = RvlcDCTtabIntra + 146 + (num[0] << 1) + (code & 1); else tab2 = RvlcDCTtabIntra + ptrRvlcTab[num[1]] + (num[0] << 1) + (code & 1); PV_BitstreamFlushBits(stream, (int) tab2->len); pTcoef->run = (uint)tab2->run;//(tab->val >> 8) & 255; pTcoef->level = (int)tab2->level;//tab->val & 255; pTcoef->last = (uint)tab2->last;//(tab->val >> 16) & 1; pTcoef->sign = BitstreamRead1Bits_INLINE(stream); return PV_SUCCESS; } /* RvlcDecTCOEFIntra */ ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/vlc_decode.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /* ------------------------------------------------------------------- MPEG-4 Simple Profile Video Decoder ------------------------------------------------------------------- * * This software module was originally developed by * * Paulo Nunes (IST / ACTS-MoMuSyS) * * in the course of development of the MPEG-4 Video (ISO/IEC 14496-2) standard. * This software module is an implementation of a part of one or more MPEG-4 * Video (ISO/IEC 14496-2) tools as specified by the MPEG-4 Video (ISO/IEC * 14496-2) standard. * * ISO/IEC gives users of the MPEG-4 Video (ISO/IEC 14496-2) standard free * license to this software module or modifications thereof for use in hardware * or software products claiming conformance to the MPEG-4 Video (ISO/IEC * 14496-2) standard. * * Those intending to use this software module in hardware or software products * are advised that its use may infringe existing patents. The original * developer of this software module and his/her company, the subsequent * editors and their companies, and ISO/IEC have no liability for use of this * software module or modifications thereof in an implementation. Copyright is * not released for non MPEG-4 Video (ISO/IEC 14496-2) Standard conforming * products. * * ACTS-MoMuSys partners retain full right to use the code for his/her own * purpose, assign or donate the code to a third party and to inhibit third * parties from using the code for non MPEG-4 Video (ISO/IEC 14496-2) Standard * conforming products. This copyright notice must be included in all copies or * derivative works. * * Copyright (c) 1996 * *****************************************************************************/ /***********************************************************HeaderBegin******* * * File: vlc_dec.h * * Author: Paulo Nunes (IST) - Paulo.Nunes@lx.it.pt * Created: * * Description: This is the header file for the "vlcdec" module. * * Notes: * * Modified: 9-May-96 Paulo Nunes: Reformatted. New headers. * * ================= PacketVideo Modification ================================ * * 3/30/00 : initial modification to the * new PV-Decoder Lib format. * ***********************************************************CommentEnd********/ #ifndef _VLCDECODE_H_ #define _VLCDECODE_H_ #include "mp4lib_int.h" #define VLC_ERROR_DETECTED(x) ((x) < 0) #define VLC_IO_ERROR -1 #define VLC_CODE_ERROR -2 #define VLC_MB_STUFFING -4 #define VLC_NO_LAST_BIT -5 #define VLC_ESCAPE_CODE 7167 #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ PV_STATUS DecodeUserData(BitstreamDecVideo *stream); PV_STATUS PV_GetMBvectors(VideoDecData *, uint mode); PV_STATUS PV_DecodeMBVec(BitstreamDecVideo *stream, MOT *mv_x, MOT *mv_y, int f_code_f); PV_STATUS PV_DeScaleMVD(int f_code, int residual, int vlc_code_mag, MOT *vector); PV_STATUS PV_VlcDecMV(BitstreamDecVideo *stream, int *mv); int PV_VlcDecMCBPC_com_intra(BitstreamDecVideo *stream); int PV_VlcDecMCBPC_com_inter(BitstreamDecVideo *stream); #ifdef PV_ANNEX_IJKT_SUPPORT int PV_VlcDecMCBPC_com_inter_H263(BitstreamDecVideo *stream); PV_STATUS VlcDecTCOEFShortHeader_AnnexI(BitstreamDecVideo *stream, Tcoef *pTcoef); PV_STATUS VlcDecTCOEFShortHeader_AnnexT(BitstreamDecVideo *stream, Tcoef *pTcoef); /* ANNEX_T */ PV_STATUS VlcDecTCOEFShortHeader_AnnexIT(BitstreamDecVideo *stream, Tcoef *pTcoef); #endif int PV_VlcDecCBPY(BitstreamDecVideo *stream, int intra); PV_STATUS VlcDecTCOEFIntra(BitstreamDecVideo *stream, Tcoef *pTcoef); PV_STATUS VlcDecTCOEFInter(BitstreamDecVideo *stream, Tcoef *pTcoef); PV_STATUS VlcDecTCOEFShortHeader(BitstreamDecVideo *stream, Tcoef *pTcoef); PV_STATUS RvlcDecTCOEFIntra(BitstreamDecVideo *stream, Tcoef *pTcoef); PV_STATUS RvlcDecTCOEFInter(BitstreamDecVideo *stream, Tcoef *pTcoef); PV_STATUS PV_VlcDecIntraDCPredSize(BitstreamDecVideo *stream, int compnum, uint *DC_size); #ifdef __cplusplus } #endif /* __cplusplus */ #endif ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/vlc_dequant.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "mp4dec_lib.h" #include "vlc_decode.h" #include "zigzag.h" typedef PV_STATUS(*VlcDecFuncP)(BitstreamDecVideo *stream, Tcoef *pTcoef); static const uint8 AC_rowcol[64] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, }; static const uint8 mask[8] = /* for fast bitmap */ {128, 64, 32, 16, 8, 4, 2, 1}; /***********************************************************CommentBegin****** * * -- VlcDequantMpegBlock -- Decodes the DCT coefficients of one 8x8 block and perform dequantization using Mpeg mode. Date: 08/08/2000 Modified: 3/21/01 Added pre IDCT clipping, new ACDC prediction structure, ACDC prediction clipping, 16-bit int case, removed multiple zigzaging ******************************************************************************/ #ifdef PV_SUPPORT_MAIN_PROFILE int VlcDequantMpegIntraBlock(void *vid, int comp, int switched, uint8 *bitmapcol, uint8 *bitmaprow) { VideoDecData *video = (VideoDecData*) vid; Vol *currVol = video->vol[video->currLayer]; BitstreamDecVideo *stream = video->bitstream; int16 *datablock = video->mblock->block[comp]; /* 10/20/2000, assume it has been reset of all-zero !!!*/ int mbnum = video->mbnum; uint CBP = video->headerInfo.CBP[mbnum]; int QP = video->QPMB[mbnum]; typeDCStore *DC = video->predDC + mbnum; int x_pos = video->mbnum_col; typeDCACStore *DCAC_row = video->predDCAC_row + x_pos; typeDCACStore *DCAC_col = video->predDCAC_col; uint ACpred_flag = (uint) video->acPredFlag[mbnum]; /*** VLC *****/ int i, j, k; Tcoef run_level; int last, return_status; VlcDecFuncP vlcDecCoeff; int direction; const int *inv_zigzag; /*** Quantizer ****/ int dc_scaler; int sum; int *qmat; int32 temp; const int B_Xtab[6] = {0, 1, 0, 1, 2, 3}; const int B_Ytab[6] = {0, 0, 1, 1, 2, 3}; int16 *dcac_row, *dcac_col; dcac_row = (*DCAC_row)[B_Xtab[comp]]; dcac_col = (*DCAC_col)[B_Ytab[comp]]; i = 1 - switched; #ifdef FAST_IDCT *((uint32*)bitmapcol) = *((uint32*)(bitmapcol + 4)) = 0; *bitmaprow = 0; #endif /* select which Huffman table to be used */ vlcDecCoeff = video->vlcDecCoeffIntra; dc_scaler = (comp < 4) ? video->mblock->DCScalarLum : video->mblock->DCScalarChr; /* enter the zero run decoding loop */ sum = 0; qmat = currVol->iqmat; /* perform only VLC decoding */ /* We cannot do DCACrecon before VLC decoding. 10/17/2000 */ doDCACPrediction(video, comp, datablock, &direction); if (!ACpred_flag) direction = 0; inv_zigzag = zigzag_inv + (ACpred_flag << 6) + (direction << 6); if (CBP & (1 << (5 - comp))) { do { return_status = (*vlcDecCoeff)(stream, &run_level); if (return_status != PV_SUCCESS) { last = 1;/* 11/1/2000 let it slips undetected, just like in original version */ i = VLC_ERROR; ACpred_flag = 0; /* no of coefficients should not get reset 03/07/2002 */ break; } i += run_level.run; last = run_level.last; if (i >= 64) { /* i = NCOEFF_BLOCK; */ /* 11/1/00 */ ACpred_flag = 0; /* no of coefficients should not get reset 03/07/2002 */ i = VLC_NO_LAST_BIT; last = 1; break; } k = inv_zigzag[i]; if (run_level.sign == 1) { datablock[k] -= run_level.level; } else { datablock[k] += run_level.level; } if (AC_rowcol[k]) { temp = (int32)datablock[k] * qmat[k] * QP; temp = (temp + (0x7 & (temp >> 31))) >> 3; if (temp > 2047) temp = 2047; else if (temp < -2048) temp = -2048; datablock[k] = (int) temp; #ifdef FAST_IDCT bitmapcol[k&0x7] |= mask[k>>3]; #endif sum ^= temp; } i++; } while (!last); } else { i = 1; /* 04/26/01 needed for switched case */ } ///// NEED TO DEQUANT THOSE PREDICTED AC COEFF /* dequantize the rest of AC predicted coeff that haven't been dequant */ if (ACpred_flag) { i = NCOEFF_BLOCK; /* otherwise, FAST IDCT won't work correctly, 10/18/2000 */ if (!direction) /* check vertical */ { dcac_row[0] = datablock[1]; dcac_row[1] = datablock[2]; dcac_row[2] = datablock[3]; dcac_row[3] = datablock[4]; dcac_row[4] = datablock[5]; dcac_row[5] = datablock[6]; dcac_row[6] = datablock[7]; for (j = 0, k = 8; k < 64; k += 8, j++) { if (dcac_col[j] = datablock[k]) { /* ACDC clipping 03/26/01 */ if (datablock[k] > 2047) dcac_col[j] = 2047; else if (datablock[k] < -2048) dcac_col[j] = -2048; temp = (int32)dcac_col[j] * qmat[k] * QP; temp = (temp + (0x7 & (temp >> 31))) >> 3; /* 03/26/01*/ if (temp > 2047) temp = 2047; else if (temp < -2048) temp = -2048; datablock[k] = (int)temp; sum ^= temp; /* 7/5/01 */ #ifdef FAST_IDCT bitmapcol[0] |= mask[k>>3]; #endif } } for (k = 1; k < 8; k++) { if (datablock[k]) { temp = (int32)datablock[k] * qmat[k] * QP; temp = (temp + (0x7 & (temp >> 31))) >> 3; /* 03/26/01*/ if (temp > 2047) temp = 2047; else if (temp < -2048) temp = -2048; datablock[k] = (int)temp; sum ^= temp; /* 7/5/01 */ #ifdef FAST_IDCT bitmapcol[k] |= 128; #endif } } } else { dcac_col[0] = datablock[8]; dcac_col[1] = datablock[16]; dcac_col[2] = datablock[24]; dcac_col[3] = datablock[32]; dcac_col[4] = datablock[40]; dcac_col[5] = datablock[48]; dcac_col[6] = datablock[56]; for (j = 0, k = 1; k < 8; k++, j++) { if (dcac_row[j] = datablock[k]) { /* ACDC clipping 03/26/01 */ if (datablock[k] > 2047) dcac_row[j] = 2047; else if (datablock[k] < -2048) dcac_row[j] = -2048; temp = (int32)dcac_row[j] * qmat[k] * QP; temp = (temp + (0x7 & (temp >> 31))) >> 3; /* 03/26/01 */ if (temp > 2047) temp = 2047; else if (temp < -2048) temp = -2048; datablock[k] = (int)temp; sum ^= temp; #ifdef FAST_IDCT bitmapcol[k] |= 128; #endif } } for (k = 8; k < 64; k += 8) { if (datablock[k]) { temp = (int32)datablock[k] * qmat[k] * QP; temp = (temp + (0x7 & (temp >> 31))) >> 3; /* 03/26/01 */ if (temp > 2047) temp = 2047; else if (temp < -2048) temp = -2048; datablock[k] = (int)temp; sum ^= temp; #ifdef FAST_IDCT bitmapcol[0] |= mask[k>>3]; #endif } } } } else { /* Store the qcoeff-values needed later for prediction */ dcac_row[0] = datablock[1]; /* ACDC, no need for clipping */ dcac_row[1] = datablock[2]; dcac_row[2] = datablock[3]; dcac_row[3] = datablock[4]; dcac_row[4] = datablock[5]; dcac_row[5] = datablock[6]; dcac_row[6] = datablock[7]; dcac_col[0] = datablock[8]; dcac_col[1] = datablock[16]; dcac_col[2] = datablock[24]; dcac_col[3] = datablock[32]; dcac_col[4] = datablock[40]; dcac_col[5] = datablock[48]; dcac_col[6] = datablock[56]; for (k = 1; k < 8; k++) { if (datablock[k]) { temp = (int32)datablock[k] * qmat[k] * QP; temp = (temp + (0x7 & (temp >> 31))) >> 3; /* 03/26/01*/ if (temp > 2047) temp = 2047; else if (temp < -2048) temp = -2048; datablock[k] = (int)temp; sum ^= temp; /* 7/5/01 */ #ifdef FAST_IDCT bitmapcol[k] |= 128; #endif } } for (k = 8; k < 64; k += 8) { if (datablock[k]) { temp = (int32)datablock[k] * qmat[k] * QP; temp = (temp + (0x7 & (temp >> 31))) >> 3; /* 03/26/01 */ if (temp > 2047) temp = 2047; else if (temp < -2048) temp = -2048; datablock[k] = (int)temp; sum ^= temp; #ifdef FAST_IDCT bitmapcol[0] |= mask[k>>3]; #endif } } } if (datablock[0]) { temp = (int32)datablock[0] * dc_scaler; if (temp > 2047) temp = 2047; /* 03/14/01 */ else if (temp < -2048) temp = -2048; datablock[0] = (int)temp; sum ^= temp; #ifdef FAST_IDCT bitmapcol[0] |= 128; #endif } if ((sum & 1) == 0) { datablock[63] = datablock[63] ^ 0x1; #ifdef FAST_IDCT /* 7/5/01, need to update bitmap */ if (datablock[63]) bitmapcol[7] |= 1; #endif i = (-64 & i) | NCOEFF_BLOCK; /* if i > -1 then i is set to NCOEFF_BLOCK */ } #ifdef FAST_IDCT if (i > 10) { for (k = 1; k < 4; k++) { if (bitmapcol[k] != 0) { (*bitmaprow) |= mask[k]; } } } #endif /* Store the qcoeff-values needed later for prediction */ (*DC)[comp] = datablock[0]; return i; } /***********************************************************CommentBegin****** * * -- VlcDequantMpegInterBlock -- Decodes the DCT coefficients of one 8x8 block and perform dequantization using Mpeg mode for INTER block. Date: 08/08/2000 Modified: 3/21/01 clean up, added clipping, 16-bit int case, new ACDC prediction ******************************************************************************/ int VlcDequantMpegInterBlock(void *vid, int comp, uint8 *bitmapcol, uint8 *bitmaprow) { VideoDecData *video = (VideoDecData*) vid; BitstreamDecVideo *stream = video->bitstream; Vol *currVol = video->vol[video->currLayer]; int16 *datablock = video->mblock->block[comp]; /* 10/20/2000, assume it has been reset of all-zero !!!*/ int mbnum = video->mbnum; int QP = video->QPMB[mbnum]; /*** VLC *****/ int i, k; Tcoef run_level; int last, return_status; VlcDecFuncP vlcDecCoeff; /*** Quantizer ****/ int sum; int *qmat; int32 temp; i = 0 ; #ifdef FAST_IDCT *((uint32*)bitmapcol) = *((uint32*)(bitmapcol + 4)) = 0; *bitmaprow = 0; #endif /* select which Huffman table to be used */ vlcDecCoeff = video->vlcDecCoeffInter; /* enter the zero run decoding loop */ sum = 0; qmat = currVol->niqmat; do { return_status = (*vlcDecCoeff)(stream, &run_level); if (return_status != PV_SUCCESS) { last = 1;/* 11/1/2000 let it slips undetected, just like in original version */ i = VLC_ERROR; sum = 1; /* no of coefficients should not get reset 03/07/2002 */ break; } i += run_level.run; last = run_level.last; if (i >= 64) { /* i = NCOEFF_BLOCK; */ /* 11/1/00 */ //return VLC_NO_LAST_BIT; i = VLC_NO_LAST_BIT; last = 1; sum = 1; /* no of coefficients should not get reset 03/07/2002 */ break; } k = zigzag_inv[i]; if (run_level.sign == 1) { temp = (-(int32)(2 * run_level.level + 1) * qmat[k] * QP + 15) >> 4; /* 03/23/01 */ if (temp < -2048) temp = - 2048; } else { temp = ((int32)(2 * run_level.level + 1) * qmat[k] * QP) >> 4; /* 03/23/01 */ if (temp > 2047) temp = 2047; } datablock[k] = (int)temp; #ifdef FAST_IDCT bitmapcol[k&0x7] |= mask[k>>3]; #endif sum ^= temp; i++; } while (!last); if ((sum & 1) == 0) { datablock[63] = datablock[63] ^ 0x1; #ifdef FAST_IDCT /* 7/5/01, need to update bitmap */ if (datablock[63]) bitmapcol[7] |= 1; #endif i = NCOEFF_BLOCK; } #ifdef FAST_IDCT if (i > 10) { for (k = 1; k < 4; k++) /* 07/19/01 */ { if (bitmapcol[k] != 0) { (*bitmaprow) |= mask[k]; } } } #endif return i; } #endif /***********************************************************CommentBegin****** * * -- VlcDequantIntraH263Block -- Decodes the DCT coefficients of one 8x8 block and perform dequantization in H.263 mode for INTRA block. Date: 08/08/2000 Modified: 3/21/01 clean up, added clipping, 16-bit int case, removed multiple zigzaging ******************************************************************************/ int VlcDequantH263IntraBlock(VideoDecData *video, int comp, int switched, uint8 *bitmapcol, uint8 *bitmaprow) { BitstreamDecVideo *stream = video->bitstream; int16 *datablock = video->mblock->block[comp]; /* 10/20/2000, assume it has been reset of all-zero !!!*/ int32 temp; int mbnum = video->mbnum; uint CBP = video->headerInfo.CBP[mbnum]; int QP = video->QPMB[mbnum]; typeDCStore *DC = video->predDC + mbnum; int x_pos = video->mbnum_col; typeDCACStore *DCAC_row = video->predDCAC_row + x_pos; typeDCACStore *DCAC_col = video->predDCAC_col; uint ACpred_flag = (uint) video->acPredFlag[mbnum]; /*** VLC *****/ int i, j, k; Tcoef run_level; int last, return_status; VlcDecFuncP vlcDecCoeff; int direction; const int *inv_zigzag; /*** Quantizer ****/ int dc_scaler; int sgn_coeff; const int B_Xtab[6] = {0, 1, 0, 1, 2, 3}; const int B_Ytab[6] = {0, 0, 1, 1, 2, 3}; int16 *dcac_row, *dcac_col; dcac_row = (*DCAC_row)[B_Xtab[comp]]; dcac_col = (*DCAC_col)[B_Ytab[comp]]; #ifdef FAST_IDCT *((uint32*)bitmapcol) = *((uint32*)(bitmapcol + 4)) = 0; *bitmaprow = 0; #endif /* select which Huffman table to be used */ vlcDecCoeff = video->vlcDecCoeffIntra; dc_scaler = (comp < 4) ? video->mblock->DCScalarLum : video->mblock->DCScalarChr; /* perform only VLC decoding */ doDCACPrediction(video, comp, datablock, &direction); if (!ACpred_flag) direction = 0; inv_zigzag = zigzag_inv + (ACpred_flag << 6) + (direction << 6); /* 04/17/01 */ i = 1; if (CBP & (1 << (5 - comp))) { i = 1 - switched; do { return_status = (*vlcDecCoeff)(stream, &run_level); if (return_status != PV_SUCCESS) { last = 1;/* 11/1/2000 let it slips undetected, just like in original version */ i = VLC_ERROR; ACpred_flag = 0; /* no of coefficients should not get reset 03/07/2002 */ break; } i += run_level.run; last = run_level.last; if (i >= 64) { ACpred_flag = 0; /* no of coefficients should not get reset 03/07/2002 */ i = VLC_NO_LAST_BIT; last = 1; break; } k = inv_zigzag[i]; if (run_level.sign == 1) { datablock[k] -= run_level.level; sgn_coeff = -1; } else { datablock[k] += run_level.level; sgn_coeff = 1; } if (AC_rowcol[k]) /* 10/25/2000 */ { temp = (int32)QP * (2 * datablock[k] + sgn_coeff) - sgn_coeff + (QP & 1) * sgn_coeff; if (temp > 2047) temp = 2047; /* 03/14/01 */ else if (temp < -2048) temp = -2048; datablock[k] = (int16) temp; #ifdef FAST_IDCT bitmapcol[k&0x7] |= mask[k>>3]; #endif } i++; } while (!last); } ///// NEED TO DEQUANT THOSE PREDICTED AC COEFF /* dequantize the rest of AC predicted coeff that haven't been dequant */ if (ACpred_flag) { i = NCOEFF_BLOCK; /* otherwise, FAST IDCT won't work correctly, 10/18/2000 */ if (!direction) /* check vertical */ { dcac_row[0] = datablock[1]; dcac_row[1] = datablock[2]; dcac_row[2] = datablock[3]; dcac_row[3] = datablock[4]; dcac_row[4] = datablock[5]; dcac_row[5] = datablock[6]; dcac_row[6] = datablock[7]; for (j = 0, k = 8; k < 64; k += 8, j++) { dcac_col[j] = datablock[k]; if (dcac_col[j]) { if (datablock[k] > 0) { if (datablock[k] > 2047) dcac_col[j] = 2047; sgn_coeff = 1; } else { if (datablock[k] < -2048) dcac_col[j] = -2048; sgn_coeff = -1; } temp = (int32)QP * (2 * datablock[k] + sgn_coeff) - sgn_coeff + (QP & 1) * sgn_coeff; if (temp > 2047) temp = 2047; /* 03/14/01 */ else if (temp < -2048) temp = -2048; datablock[k] = (int16) temp; #ifdef FAST_IDCT bitmapcol[0] |= mask[k>>3]; #endif } } for (k = 1; k < 8; k++) { if (datablock[k]) { sgn_coeff = (datablock[k] > 0) ? 1 : -1; temp = (int32)QP * (2 * datablock[k] + sgn_coeff) - sgn_coeff + (QP & 1) * sgn_coeff; if (temp > 2047) temp = 2047; /* 03/14/01 */ else if (temp < -2048) temp = -2048; datablock[k] = (int16) temp; #ifdef FAST_IDCT bitmapcol[k] |= 128; #endif } } } else { dcac_col[0] = datablock[8]; dcac_col[1] = datablock[16]; dcac_col[2] = datablock[24]; dcac_col[3] = datablock[32]; dcac_col[4] = datablock[40]; dcac_col[5] = datablock[48]; dcac_col[6] = datablock[56]; for (j = 0, k = 1; k < 8; k++, j++) { dcac_row[j] = datablock[k]; if (dcac_row[j]) { if (datablock[k] > 0) { if (datablock[k] > 2047) dcac_row[j] = 2047; sgn_coeff = 1; } else { if (datablock[k] < -2048) dcac_row[j] = -2048; sgn_coeff = -1; } temp = (int32)QP * (2 * datablock[k] + sgn_coeff) - sgn_coeff + (QP & 1) * sgn_coeff; if (temp > 2047) temp = 2047; /* 03/14/01 */ else if (temp < -2048) temp = -2048; datablock[k] = (int) temp; #ifdef FAST_IDCT bitmapcol[k] |= 128; #endif } } for (k = 8; k < 64; k += 8) { if (datablock[k]) { sgn_coeff = (datablock[k] > 0) ? 1 : -1; temp = (int32)QP * (2 * datablock[k] + sgn_coeff) - sgn_coeff + (QP & 1) * sgn_coeff; if (temp > 2047) temp = 2047; /* 03/14/01 */ else if (temp < -2048) temp = -2048; datablock[k] = (int16) temp; #ifdef FAST_IDCT bitmapcol[0] |= mask[k>>3]; #endif } } } } else { dcac_row[0] = datablock[1]; dcac_row[1] = datablock[2]; dcac_row[2] = datablock[3]; dcac_row[3] = datablock[4]; dcac_row[4] = datablock[5]; dcac_row[5] = datablock[6]; dcac_row[6] = datablock[7]; dcac_col[0] = datablock[8]; dcac_col[1] = datablock[16]; dcac_col[2] = datablock[24]; dcac_col[3] = datablock[32]; dcac_col[4] = datablock[40]; dcac_col[5] = datablock[48]; dcac_col[6] = datablock[56]; for (k = 1; k < 8; k++) { if (datablock[k]) { sgn_coeff = (datablock[k] > 0) ? 1 : -1; temp = (int32)QP * (2 * datablock[k] + sgn_coeff) - sgn_coeff + (QP & 1) * sgn_coeff; if (temp > 2047) temp = 2047; /* 03/14/01 */ else if (temp < -2048) temp = -2048; datablock[k] = (int16) temp; #ifdef FAST_IDCT bitmapcol[k] |= 128; #endif } } for (k = 8; k < 64; k += 8) { if (datablock[k]) { sgn_coeff = (datablock[k] > 0) ? 1 : -1; temp = (int32)QP * (2 * datablock[k] + sgn_coeff) - sgn_coeff + (QP & 1) * sgn_coeff; if (temp > 2047) temp = 2047; /* 03/14/01 */ else if (temp < -2048) temp = -2048; datablock[k] = (int16) temp; #ifdef FAST_IDCT bitmapcol[0] |= mask[k>>3]; #endif } } } if (datablock[0]) { #ifdef FAST_IDCT bitmapcol[0] |= 128; #endif temp = (int32)datablock[0] * dc_scaler; if (temp > 2047) temp = 2047; /* 03/14/01 */ else if (temp < -2048) temp = -2048; datablock[0] = (int16)temp; } #ifdef FAST_IDCT if (i > 10) { for (k = 1; k < 4; k++) /* if i > 10 then k = 0 does not matter */ { if (bitmapcol[k] != 0) { (*bitmaprow) |= mask[k]; /* (1<<(7-i)); */ } } } #endif /* Store the qcoeff-values needed later for prediction */ (*DC)[comp] = datablock[0]; return i; } int VlcDequantH263IntraBlock_SH(VideoDecData *video, int comp, uint8 *bitmapcol, uint8 *bitmaprow) { BitstreamDecVideo *stream = video->bitstream; int16 *datablock = video->mblock->block[comp]; /*, 10/20/2000, assume it has been reset of all-zero !!!*/ int32 temp; int mbnum = video->mbnum; uint CBP = video->headerInfo.CBP[mbnum]; int16 QP = video->QPMB[mbnum]; typeDCStore *DC = video->predDC + mbnum; int x_pos = video->mbnum_col; typeDCACStore *DCAC_row = video->predDCAC_row + x_pos; typeDCACStore *DCAC_col = video->predDCAC_col; uint ACpred_flag = (uint) video->acPredFlag[mbnum]; /*** VLC *****/ int i, k; Tcoef run_level; int last, return_status; VlcDecFuncP vlcDecCoeff; #ifdef PV_ANNEX_IJKT_SUPPORT int direction; const int *inv_zigzag; #endif /*** Quantizer ****/ const int B_Xtab[6] = {0, 1, 0, 1, 2, 3}; const int B_Ytab[6] = {0, 0, 1, 1, 2, 3}; int16 *dcac_row, *dcac_col; dcac_row = (*DCAC_row)[B_Xtab[comp]]; dcac_col = (*DCAC_col)[B_Ytab[comp]]; i = 1; #ifdef FAST_IDCT *((uint32*)bitmapcol) = *((uint32*)(bitmapcol + 4)) = 0; *bitmaprow = 0; #endif /* select which Huffman table to be used */ vlcDecCoeff = video->vlcDecCoeffIntra; #ifdef PV_ANNEX_IJKT_SUPPORT if (comp > 3) /* ANNEX_T */ { QP = video->QP_CHR; } if (!video->advanced_INTRA) { #endif if ((CBP & (1 << (5 - comp))) == 0) { #ifdef FAST_IDCT bitmapcol[0] = 128; bitmapcol[1] = bitmapcol[2] = bitmapcol[3] = bitmapcol[4] = bitmapcol[5] = bitmapcol[6] = bitmapcol[7] = 0; #endif datablock[0] <<= 3; /* no need to clip */ return 1;//ncoeffs; } else { /* enter the zero run decoding loop */ do { return_status = (*vlcDecCoeff)(stream, &run_level); if (return_status != PV_SUCCESS) { last = 1;/* 11/1/2000 let it slips undetected, just like in original version */ i = VLC_ERROR; break; } i += run_level.run; last = run_level.last; if (i >= 64) { /* i = NCOEFF_BLOCK; */ /* 11/1/00 */ i = VLC_NO_LAST_BIT; last = 1; break; } k = zigzag_inv[i]; if (run_level.sign == 0) { temp = (int32)QP * (2 * run_level.level + 1) - 1 + (QP & 1); if (temp > 2047) temp = 2047; } else { temp = -(int32)QP * (2 * run_level.level + 1) + 1 - (QP & 1); if (temp < -2048) temp = -2048; } datablock[k] = (int16) temp; #ifdef FAST_IDCT bitmapcol[k&0x7] |= mask[k>>3]; #endif i++; } while (!last); } /* no ACDC prediction when ACDC disable */ if (datablock[0]) { #ifdef FAST_IDCT bitmapcol[0] |= 128; #endif datablock[0] <<= 3; /* no need to clip 09/18/2001 */ } #ifdef PV_ANNEX_IJKT_SUPPORT } else /* advanced_INTRA mode */ { i = 1; doDCACPrediction_I(video, comp, datablock); /* perform only VLC decoding */ if (!ACpred_flag) { direction = 0; } else { direction = video->mblock->direction; } inv_zigzag = zigzag_inv + (ACpred_flag << 6) + (direction << 6); /* 04/17/01 */ if (CBP & (1 << (5 - comp))) { i = 0; do { return_status = (*vlcDecCoeff)(stream, &run_level); if (return_status != PV_SUCCESS) { last = 1;/* 11/1/2000 let it slips undetected, just like in original version */ i = VLC_ERROR; ACpred_flag = 0; /* no of coefficients should not get reset 03/07/2002 */ break; } i += run_level.run; last = run_level.last; if (i >= 64) { /* i = NCOEFF_BLOCK; */ /* 11/1/00 */ ACpred_flag = 0; /* no of coefficients should not get reset 03/07/2002 */ i = VLC_NO_LAST_BIT; last = 1; break; } k = inv_zigzag[i]; if (run_level.sign == 0) { datablock[k] += (int16)QP * 2 * run_level.level; if (datablock[k] > 2047) datablock[k] = 2047; } else { datablock[k] -= (int16)QP * 2 * run_level.level; if (datablock[k] < -2048) datablock[k] = -2048; } #ifdef FAST_IDCT bitmapcol[k&0x7] |= mask[k>>3]; #endif i++; } while (!last); } ///// NEED TO DEQUANT THOSE PREDICTED AC COEFF /* dequantize the rest of AC predicted coeff that haven't been dequant */ if (ACpred_flag) { i = NCOEFF_BLOCK; for (k = 1; k < 8; k++) { if (datablock[k]) { bitmapcol[k] |= 128; } if (datablock[k<<3]) { bitmapcol[0] |= mask[k]; } } } dcac_row[0] = datablock[1]; dcac_row[1] = datablock[2]; dcac_row[2] = datablock[3]; dcac_row[3] = datablock[4]; dcac_row[4] = datablock[5]; dcac_row[5] = datablock[6]; dcac_row[6] = datablock[7]; dcac_col[0] = datablock[8]; dcac_col[1] = datablock[16]; dcac_col[2] = datablock[24]; dcac_col[3] = datablock[32]; dcac_col[4] = datablock[40]; dcac_col[5] = datablock[48]; dcac_col[6] = datablock[56]; if (datablock[0]) { #ifdef FAST_IDCT bitmapcol[0] |= 128; #endif datablock[0] |= 1; if (datablock[0] < 0) { datablock[0] = 0; } } } #endif #ifdef FAST_IDCT if (i > 10) { for (k = 1; k < 4; k++) /* if i > 10 then k = 0 does not matter */ { if (bitmapcol[k] != 0) { (*bitmaprow) |= mask[k]; /* (1<<(7-i)); */ } } } #endif /* Store the qcoeff-values needed later for prediction */ (*DC)[comp] = datablock[0]; return i; } /***********************************************************CommentBegin****** * * -- VlcDequantInterH263Block -- Decodes the DCT coefficients of one 8x8 block and perform dequantization in H.263 mode for INTER block. Date: 08/08/2000 Modified: 3/21/01 clean up, added clipping, 16-bit int case ******************************************************************************/ int VlcDequantH263InterBlock(VideoDecData *video, int comp, uint8 *bitmapcol, uint8 *bitmaprow) { BitstreamDecVideo *stream = video->bitstream; int16 *datablock = video->mblock->block[comp]; /* 10/20/2000, assume it has been reset of all-zero !!!*/ int32 temp; int mbnum = video->mbnum; int QP = video->QPMB[mbnum]; /*** VLC *****/ int i, k; Tcoef run_level; int last, return_status; VlcDecFuncP vlcDecCoeff; /*** Quantizer ****/ i = 0; #ifdef FAST_IDCT *((uint32*)bitmapcol) = *((uint32*)(bitmapcol + 4)) = 0; *bitmaprow = 0; #endif /* select which Huffman table to be used */ vlcDecCoeff = video->vlcDecCoeffInter; /* enter the zero run decoding loop */ do { return_status = (*vlcDecCoeff)(stream, &run_level); if (return_status != PV_SUCCESS) { last = 1;/* 11/1/2000 let it slips undetected, just like in original version */ i = -1; break; } i += run_level.run; last = run_level.last; if (i >= 64) { i = -1; last = 1; break; } if (run_level.sign == 0) { temp = (int32)QP * (2 * run_level.level + 1) - 1 + (QP & 1); if (temp > 2047) temp = 2047; } else { temp = -(int32)QP * (2 * run_level.level + 1) + 1 - (QP & 1); if (temp < -2048) temp = -2048; } k = zigzag_inv[i]; datablock[k] = (int16)temp; #ifdef FAST_IDCT bitmapcol[k&0x7] |= mask[k>>3]; #endif i++; } while (!last); #ifdef FAST_IDCT if (i > 10) /* 07/19/01 */ { for (k = 1; k < 4; k++) /* if (i > 10 ) k = 0 does not matter */ { if (bitmapcol[k] != 0) { (*bitmaprow) |= mask[k]; /* (1<<(7-i)); */ } } } #endif return i; } ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/vlc_tab.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "mp4dec_api.h" #include "mp4def.h" #include "mp4lib_int.h" #include "vlc_dec_tab.h" #include "max_level.h" const int intra_max_level[2][NCOEFF_BLOCK] = { {27, 10, 5, 4, 3, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, {8, 3, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }; const int inter_max_level[2][NCOEFF_BLOCK] = { {12, 6, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }; const int intra_max_run0[28] = { 999, 14, 9, 7, 3, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; const int intra_max_run1[9] = { 999, 20, 6, 1, 0, 0, 0, 0, 0 }; const int inter_max_run0[13] = { 999, 26, 10, 6, 2, 1, 1, 0, 0, 0, 0, 0, 0 }; const int inter_max_run1[4] = { 999, 40, 1, 0 }; const VLCshorttab PV_TMNMVtab0[] = { {3, 4}, { -3, 4}, {2, 3}, {2, 3}, { -2, 3}, { -2, 3}, {1, 2}, {1, 2}, {1, 2}, {1, 2}, { -1, 2}, { -1, 2}, { -1, 2}, { -1, 2} }; const VLCshorttab PV_TMNMVtab1[] = { {12, 10}, { -12, 10}, {11, 10}, { -11, 10}, {10, 9}, {10, 9}, { -10, 9}, { -10, 9}, {9, 9}, {9, 9}, { -9, 9}, { -9, 9}, {8, 9}, {8, 9}, { -8, 9}, { -8, 9}, {7, 7}, {7, 7}, {7, 7}, {7, 7}, {7, 7}, {7, 7}, {7, 7}, {7, 7}, { -7, 7}, { -7, 7}, { -7, 7}, { -7, 7}, { -7, 7}, { -7, 7}, { -7, 7}, { -7, 7}, {6, 7}, {6, 7}, {6, 7}, {6, 7}, {6, 7}, {6, 7}, {6, 7}, {6, 7}, { -6, 7}, { -6, 7}, { -6, 7}, { -6, 7}, { -6, 7}, { -6, 7}, { -6, 7}, { -6, 7}, {5, 7}, {5, 7}, {5, 7}, {5, 7}, {5, 7}, {5, 7}, {5, 7}, {5, 7}, { -5, 7}, { -5, 7}, { -5, 7}, { -5, 7}, { -5, 7}, { -5, 7}, { -5, 7}, { -5, 7}, {4, 6}, {4, 6}, {4, 6}, {4, 6}, {4, 6}, {4, 6}, {4, 6}, {4, 6}, {4, 6}, {4, 6}, {4, 6}, {4, 6}, {4, 6}, {4, 6}, {4, 6}, {4, 6}, { -4, 6}, { -4, 6}, { -4, 6}, { -4, 6}, { -4, 6}, { -4, 6}, { -4, 6}, { -4, 6}, { -4, 6}, { -4, 6}, { -4, 6}, { -4, 6}, { -4, 6}, { -4, 6}, { -4, 6}, { -4, 6} }; const VLCshorttab PV_TMNMVtab2[] = { {32, 12}, { -32, 12}, {31, 12}, { -31, 12}, {30, 11}, {30, 11}, { -30, 11}, { -30, 11}, {29, 11}, {29, 11}, { -29, 11}, { -29, 11}, {28, 11}, {28, 11}, { -28, 11}, { -28, 11}, {27, 11}, {27, 11}, { -27, 11}, { -27, 11}, {26, 11}, {26, 11}, { -26, 11}, { -26, 11}, {25, 11}, {25, 11}, { -25, 11}, { -25, 11}, {24, 10}, {24, 10}, {24, 10}, {24, 10}, { -24, 10}, { -24, 10}, { -24, 10}, { -24, 10}, {23, 10}, {23, 10}, {23, 10}, {23, 10}, { -23, 10}, { -23, 10}, { -23, 10}, { -23, 10}, {22, 10}, {22, 10}, {22, 10}, {22, 10}, { -22, 10}, { -22, 10}, { -22, 10}, { -22, 10}, {21, 10}, {21, 10}, {21, 10}, {21, 10}, { -21, 10}, { -21, 10}, { -21, 10}, { -21, 10}, {20, 10}, {20, 10}, {20, 10}, {20, 10}, { -20, 10}, { -20, 10}, { -20, 10}, { -20, 10}, {19, 10}, {19, 10}, {19, 10}, {19, 10}, { -19, 10}, { -19, 10}, { -19, 10}, { -19, 10}, {18, 10}, {18, 10}, {18, 10}, {18, 10}, { -18, 10}, { -18, 10}, { -18, 10}, { -18, 10}, {17, 10}, {17, 10}, {17, 10}, {17, 10}, { -17, 10}, { -17, 10}, { -17, 10}, { -17, 10}, {16, 10}, {16, 10}, {16, 10}, {16, 10}, { -16, 10}, { -16, 10}, { -16, 10}, { -16, 10}, {15, 10}, {15, 10}, {15, 10}, {15, 10}, { -15, 10}, { -15, 10}, { -15, 10}, { -15, 10}, {14, 10}, {14, 10}, {14, 10}, {14, 10}, { -14, 10}, { -14, 10}, { -14, 10}, { -14, 10}, {13, 10}, {13, 10}, {13, 10}, {13, 10}, { -13, 10}, { -13, 10}, { -13, 10}, { -13, 10} }; const VLCshorttab PV_MCBPCtab[] = { {VLC_ERROR, 0}, {255, 9}, {52, 9}, {36, 9}, {20, 9}, {49, 9}, {35, 8}, {35, 8}, {19, 8}, {19, 8}, {50, 8}, {50, 8}, {51, 7}, {51, 7}, {51, 7}, {51, 7}, {34, 7}, {34, 7}, {34, 7}, {34, 7}, {18, 7}, {18, 7}, {18, 7}, {18, 7}, {33, 7}, {33, 7}, {33, 7}, {33, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {4, 6}, {4, 6}, {4, 6}, {4, 6}, {4, 6}, {4, 6}, {4, 6}, {4, 6}, {48, 6}, {48, 6}, {48, 6}, {48, 6}, {48, 6}, {48, 6}, {48, 6}, {48, 6}, {3, 5}, {3, 5}, {3, 5}, {3, 5}, {3, 5}, {3, 5}, {3, 5}, {3, 5}, {3, 5}, {3, 5}, {3, 5}, {3, 5}, {3, 5}, {3, 5}, {3, 5}, {3, 5}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3} }; #ifdef PV_ANNEX_IJKT_SUPPORT const VLCshorttab PV_MCBPCtab1[] = { {5, 11}, {5, 11}, {5, 11}, {5, 11}, {21, 13}, {21, 13}, {37, 13}, {53, 13}, }; #endif const VLCshorttab PV_MCBPCtabintra[] = { {VLC_ERROR, 0}, {20, 6}, {36, 6}, {52, 6}, {4, 4}, {4, 4}, {4, 4}, {4, 4}, {19, 3}, {19, 3}, {19, 3}, {19, 3}, {19, 3}, {19, 3}, {19, 3}, {19, 3}, {35, 3}, {35, 3}, {35, 3}, {35, 3}, {35, 3}, {35, 3}, {35, 3}, {35, 3}, {51, 3}, {51, 3}, {51, 3}, {51, 3}, {51, 3}, {51, 3}, {51, 3}, {51, 3} }; const VLCshorttab PV_CBPYtab[48] = { {VLC_ERROR, 0}, {VLC_ERROR, 0}, {6, 6}, {9, 6}, {8, 5}, {8, 5}, {4, 5}, {4, 5}, {2, 5}, {2, 5}, {1, 5}, {1, 5}, {0, 4}, {0, 4}, {0, 4}, {0, 4}, {12, 4}, {12, 4}, {12, 4}, {12, 4}, {10, 4}, {10, 4}, {10, 4}, {10, 4}, {14, 4}, {14, 4}, {14, 4}, {14, 4}, {5, 4}, {5, 4}, {5, 4}, {5, 4}, {13, 4}, {13, 4}, {13, 4}, {13, 4}, {3, 4}, {3, 4}, {3, 4}, {3, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4}, {7, 4}, {7, 4}, {7, 4}, {7, 4} }; const VLCtab2 PV_DCT3Dtab0[] = { {0x8, 1, 1, 7}, {0x7, 1, 1, 7}, {0x6, 1, 1, 7}, {0x5, 1, 1, 7}, {0xc, 1, 0, 7}, {0xb, 1, 0, 7}, {0xa, 1, 0, 7}, {0x0, 4, 0, 7}, {0x4, 1, 1, 6}, {0x4, 1, 1, 6}, {0x3, 1, 1, 6}, {0x3, 1, 1, 6}, {0x2, 1, 1, 6}, {0x2, 1, 1, 6}, {0x1, 1, 1, 6}, {0x1, 1, 1, 6}, {0x9, 1, 0, 6}, {0x9, 1, 0, 6}, {0x8, 1, 0, 6}, {0x8, 1, 0, 6}, {0x7, 1, 0, 6}, {0x7, 1, 0, 6}, {0x6, 1, 0, 6}, {0x6, 1, 0, 6}, {0x1, 2, 0, 6}, {0x1, 2, 0, 6}, {0x0, 3, 0, 6}, {0x0, 3, 0, 6}, {0x5, 1, 0, 5}, {0x5, 1, 0, 5}, {0x5, 1, 0, 5}, {0x5, 1, 0, 5}, {0x4, 1, 0, 5}, {0x4, 1, 0, 5}, {0x4, 1, 0, 5}, {0x4, 1, 0, 5}, {0x3, 1, 0, 5}, {0x3, 1, 0, 5}, {0x3, 1, 0, 5}, {0x3, 1, 0, 5}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x1, 1, 0, 3}, {0x1, 1, 0, 3}, {0x1, 1, 0, 3}, {0x1, 1, 0, 3}, {0x1, 1, 0, 3}, {0x1, 1, 0, 3}, {0x1, 1, 0, 3}, {0x1, 1, 0, 3}, {0x1, 1, 0, 3}, {0x1, 1, 0, 3}, {0x1, 1, 0, 3}, {0x1, 1, 0, 3}, {0x1, 1, 0, 3}, {0x1, 1, 0, 3}, {0x1, 1, 0, 3}, {0x1, 1, 0, 3}, {0x2, 1, 0, 4}, {0x2, 1, 0, 4}, {0x2, 1, 0, 4}, {0x2, 1, 0, 4}, {0x2, 1, 0, 4}, {0x2, 1, 0, 4}, {0x2, 1, 0, 4}, {0x2, 1, 0, 4}, {0x0, 2, 0, 4}, {0x0, 2, 0, 4}, {0x0, 2, 0, 4}, {0x0, 2, 0, 4}, {0x0, 2, 0, 4}, {0x0, 2, 0, 4}, {0x0, 2, 0, 4}, {0x0, 2, 0, 4} }; const VLCtab2 PV_DCT3Dtab1[] = { {0x0, 9, 0, 10}, {0x0, 8, 0, 10}, {0x18, 1, 1, 9}, {0x18, 1, 1, 9}, {0x17, 1, 1, 9}, {0x17, 1, 1, 9}, {0x16, 1, 1, 9}, {0x16, 1, 1, 9}, {0x15, 1, 1, 9}, {0x15, 1, 1, 9}, {0x14, 1, 1, 9}, {0x14, 1, 1, 9}, {0x13, 1, 1, 9}, {0x13, 1, 1, 9}, {0x12, 1, 1, 9}, {0x12, 1, 1, 9}, {0x11, 1, 1, 9}, {0x11, 1, 1, 9}, {0x0, 2, 1, 9}, {0x0, 2, 1, 9}, {0x16, 1, 0, 9}, {0x16, 1, 0, 9}, {0x15, 1, 0, 9}, {0x15, 1, 0, 9}, {0x14, 1, 0, 9}, {0x14, 1, 0, 9}, {0x13, 1, 0, 9}, {0x13, 1, 0, 9}, {0x12, 1, 0, 9}, {0x12, 1, 0, 9}, {0x11, 1, 0, 9}, {0x11, 1, 0, 9}, {0x10, 1, 0, 9}, {0x10, 1, 0, 9}, {0xf, 1, 0, 9}, {0xf, 1, 0, 9}, {0x4, 2, 0, 9}, {0x4, 2, 0, 9}, {0x3, 2, 0, 9}, {0x3, 2, 0, 9}, {0x0, 7, 0, 9}, {0x0, 7, 0, 9}, {0x0, 6, 0, 9}, {0x0, 6, 0, 9}, {0x10, 1, 1, 8}, {0x10, 1, 1, 8}, {0x10, 1, 1, 8}, {0x10, 1, 1, 8}, {0xf, 1, 1, 8}, {0xf, 1, 1, 8}, {0xf, 1, 1, 8}, {0xf, 1, 1, 8}, {0xe, 1, 1, 8}, {0xe, 1, 1, 8}, {0xe, 1, 1, 8}, {0xe, 1, 1, 8}, {0xd, 1, 1, 8}, {0xd, 1, 1, 8}, {0xd, 1, 1, 8}, {0xd, 1, 1, 8}, {0xc, 1, 1, 8}, {0xc, 1, 1, 8}, {0xc, 1, 1, 8}, {0xc, 1, 1, 8}, {0xb, 1, 1, 8}, {0xb, 1, 1, 8}, {0xb, 1, 1, 8}, {0xb, 1, 1, 8}, {0xa, 1, 1, 8}, {0xa, 1, 1, 8}, {0xa, 1, 1, 8}, {0xa, 1, 1, 8}, {0x9, 1, 1, 8}, {0x9, 1, 1, 8}, {0x9, 1, 1, 8}, {0x9, 1, 1, 8}, {0xe, 1, 0, 8}, {0xe, 1, 0, 8}, {0xe, 1, 0, 8}, {0xe, 1, 0, 8}, {0xd, 1, 0, 8}, {0xd, 1, 0, 8}, {0xd, 1, 0, 8}, {0xd, 1, 0, 8}, {0x2, 2, 0, 8}, {0x2, 2, 0, 8}, {0x2, 2, 0, 8}, {0x2, 2, 0, 8}, {0x1, 3, 0, 8}, {0x1, 3, 0, 8}, {0x1, 3, 0, 8}, {0x1, 3, 0, 8}, {0x0, 5, 0, 8}, {0x0, 5, 0, 8}, {0x0, 5, 0, 8}, {0x0, 5, 0, 8} }; const VLCtab2 PV_DCT3Dtab2[] = { {0x1, 2, 1, 11}, {0x1, 2, 1, 11}, {0x0, 3, 1, 11}, {0x0, 3, 1, 11}, {0x0, 0xb, 0, 11}, {0x0, 0xb, 0, 11}, {0x0, 0xa, 0, 11}, {0x0, 0xa, 0, 11}, {0x1c, 1, 1, 10}, {0x1c, 1, 1, 10}, {0x1c, 1, 1, 10}, {0x1c, 1, 1, 10}, {0x1b, 1, 1, 10}, {0x1b, 1, 1, 10}, {0x1b, 1, 1, 10}, {0x1b, 1, 1, 10}, {0x1a, 1, 1, 10}, {0x1a, 1, 1, 10}, {0x1a, 1, 1, 10}, {0x1a, 1, 1, 10}, {0x19, 1, 1, 10}, {0x19, 1, 1, 10}, {0x19, 1, 1, 10}, {0x19, 1, 1, 10}, {0x9, 2, 0, 10}, {0x9, 2, 0, 10}, {0x9, 2, 0, 10}, {0x9, 2, 0, 10}, {0x8, 2, 0, 10}, {0x8, 2, 0, 10}, {0x8, 2, 0, 10}, {0x8, 2, 0, 10}, {0x7, 2, 0, 10}, {0x7, 2, 0, 10}, {0x7, 2, 0, 10}, {0x7, 2, 0, 10}, {0x6, 2, 0, 10}, {0x6, 2, 0, 10}, {0x6, 2, 0, 10}, {0x6, 2, 0, 10}, {0x5, 2, 0, 10}, {0x5, 2, 0, 10}, {0x5, 2, 0, 10}, {0x5, 2, 0, 10}, {0x3, 3, 0, 10}, {0x3, 3, 0, 10}, {0x3, 3, 0, 10}, {0x3, 3, 0, 10}, {0x2, 3, 0, 10}, {0x2, 3, 0, 10}, {0x2, 3, 0, 10}, {0x2, 3, 0, 10}, {0x1, 4, 0, 10}, {0x1, 4, 0, 10}, {0x1, 4, 0, 10}, {0x1, 4, 0, 10}, {0x0, 0xc, 0, 11}, {0x0, 0xc, 0, 11}, {0x1, 5, 0, 11}, {0x1, 5, 0, 11}, {0x17, 1, 0, 11}, {0x17, 1, 0, 11}, {0x18, 1, 0, 11}, {0x18, 1, 0, 11}, {0x1d, 1, 1, 11}, {0x1d, 1, 1, 11}, {0x1e, 1, 1, 11}, {0x1e, 1, 1, 11}, {0x1f, 1, 1, 11}, {0x1f, 1, 1, 11}, {0x20, 1, 1, 11}, {0x20, 1, 1, 11}, {0x1, 6, 0, 12}, {0x2, 4, 0, 12}, {0x4, 3, 0, 12}, {0x5, 3, 0, 12}, {0x6, 3, 0, 12}, {0xa, 2, 0, 12}, {0x19, 1, 0, 12}, {0x1a, 1, 0, 12}, {0x21, 1, 1, 12}, {0x22, 1, 1, 12}, {0x23, 1, 1, 12}, {0x24, 1, 1, 12}, {0x25, 1, 1, 12}, {0x26, 1, 1, 12}, {0x27, 1, 1, 12}, {0x28, 1, 1, 12}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7} }; /* New tables for Intra luminance blocks */ const VLCtab2 PV_DCT3Dtab3[] = { {0x4, 1, 1, 7}, {0x3, 1, 1, 7}, {0x6, 1, 0, 7}, {0x5, 1, 1, 7}, {0x7, 1, 0, 7}, {0x2, 2, 0, 7}, {0x1, 3, 0, 7}, {0x0, 9, 0, 7}, {0x0, 2, 1, 6}, {0x0, 2, 1, 6}, {0x5, 1, 0, 6}, {0x5, 1, 0, 6}, {0x2, 1, 1, 6}, {0x2, 1, 1, 6}, {0x1, 1, 1, 6}, {0x1, 1, 1, 6}, {0x4, 1, 0, 6}, {0x4, 1, 0, 6}, {0x3, 1, 0, 6}, {0x3, 1, 0, 6}, {0x0, 8, 0, 6}, {0x0, 8, 0, 6}, {0x0, 7, 0, 6}, {0x0, 7, 0, 6}, {0x1, 2, 0, 6}, {0x1, 2, 0, 6}, {0x0, 6, 0, 6}, {0x0, 6, 0, 6}, {0x2, 1, 0, 5}, {0x2, 1, 0, 5}, {0x2, 1, 0, 5}, {0x2, 1, 0, 5}, {0x0, 5, 0, 5}, {0x0, 5, 0, 5}, {0x0, 5, 0, 5}, {0x0, 5, 0, 5}, {0x0, 4, 0, 5}, {0x0, 4, 0, 5}, {0x0, 4, 0, 5}, {0x0, 4, 0, 5}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x1, 1, 0, 4}, {0x1, 1, 0, 4}, {0x1, 1, 0, 4}, {0x1, 1, 0, 4}, {0x1, 1, 0, 4}, {0x1, 1, 0, 4}, {0x1, 1, 0, 4}, {0x1, 1, 0, 4}, {0x0, 3, 0, 4}, {0x0, 3, 0, 4}, {0x0, 3, 0, 4}, {0x0, 3, 0, 4}, {0x0, 3, 0, 4}, {0x0, 3, 0, 4}, {0x0, 3, 0, 4}, {0x0, 3, 0, 4} }; const VLCtab2 PV_DCT3Dtab4[] = { {0x0, 0x12, 0, 10}, {0x0, 0x11, 0, 10}, {0xe, 1, 1, 9}, {0xe, 1, 1, 9}, {0xd, 1, 1, 9}, {0xd, 1, 1, 9}, {0xc, 1, 1, 9}, {0xc, 1, 1, 9}, {0xb, 1, 1, 9}, {0xb, 1, 1, 9}, {0xa, 1, 1, 9}, {0xa, 1, 1, 9}, {0x1, 2, 1, 9}, {0x1, 2, 1, 9}, {0x0, 4, 1, 9}, {0x0, 4, 1, 9}, {0xc, 1, 0, 9}, {0xc, 1, 0, 9}, {0xb, 1, 0, 9}, {0xb, 1, 0, 9}, {0x7, 2, 0, 9}, {0x7, 2, 0, 9}, {0x6, 2, 0, 9}, {0x6, 2, 0, 9}, {0x5, 2, 0, 9}, {0x5, 2, 0, 9}, {0x3, 3, 0, 9}, {0x3, 3, 0, 9}, {0x2, 3, 0, 9}, {0x2, 3, 0, 9}, {0x1, 6, 0, 9}, {0x1, 6, 0, 9}, {0x1, 5, 0, 9}, {0x1, 5, 0, 9}, {0x0, 0x10, 0, 9}, {0x0, 0x10, 0, 9}, {0x4, 2, 0, 9}, {0x4, 2, 0, 9}, {0x0, 0xf, 0, 9}, {0x0, 0xf, 0, 9}, {0x0, 0xe, 0, 9}, {0x0, 0xe, 0, 9}, {0x0, 0xd, 0, 9}, {0x0, 0xd, 0, 9}, {0x8, 1, 1, 8}, {0x8, 1, 1, 8}, {0x8, 1, 1, 8}, {0x8, 1, 1, 8}, {0x7, 1, 1, 8}, {0x7, 1, 1, 8}, {0x7, 1, 1, 8}, {0x7, 1, 1, 8}, {0x6, 1, 1, 8}, {0x6, 1, 1, 8}, {0x6, 1, 1, 8}, {0x6, 1, 1, 8}, {0x0, 3, 1, 8}, {0x0, 3, 1, 8}, {0x0, 3, 1, 8}, {0x0, 3, 1, 8}, {0xa, 1, 0, 8}, {0xa, 1, 0, 8}, {0xa, 1, 0, 8}, {0xa, 1, 0, 8}, {0x9, 1, 0, 8}, {0x9, 1, 0, 8}, {0x9, 1, 0, 8}, {0x9, 1, 0, 8}, {0x8, 1, 0, 8}, {0x8, 1, 0, 8}, {0x8, 1, 0, 8}, {0x8, 1, 0, 8}, {0x9, 1, 1, 8}, {0x9, 1, 1, 8}, {0x9, 1, 1, 8}, {0x9, 1, 1, 8}, {0x3, 2, 0, 8}, {0x3, 2, 0, 8}, {0x3, 2, 0, 8}, {0x3, 2, 0, 8}, {0x1, 4, 0, 8}, {0x1, 4, 0, 8}, {0x1, 4, 0, 8}, {0x1, 4, 0, 8}, {0x0, 0xc, 0, 8}, {0x0, 0xc, 0, 8}, {0x0, 0xc, 0, 8}, {0x0, 0xc, 0, 8}, {0x0, 0xb, 0, 8}, {0x0, 0xb, 0, 8}, {0x0, 0xb, 0, 8}, {0x0, 0xb, 0, 8}, {0x0, 0xa, 0, 8}, {0x0, 0xa, 0, 8}, {0x0, 0xa, 0, 8}, {0x0, 0xa, 0, 8} }; const VLCtab2 PV_DCT3Dtab5[] = { {0x0, 7, 1, 11}, {0x0, 7, 1, 11}, {0x0, 6, 1, 11}, {0x0, 6, 1, 11}, {0x0, 0x16, 0, 11}, {0x0, 0x16, 0, 11}, {0x0, 0x15, 0, 11}, {0x0, 0x15, 0, 11}, {0x2, 2, 1, 10}, {0x2, 2, 1, 10}, {0x2, 2, 1, 10}, {0x2, 2, 1, 10}, {0x1, 3, 1, 10}, {0x1, 3, 1, 10}, {0x1, 3, 1, 10}, {0x1, 3, 1, 10}, {0x0, 5, 1, 10}, {0x0, 5, 1, 10}, {0x0, 5, 1, 10}, {0x0, 5, 1, 10}, {0xd, 1, 0, 10}, {0xd, 1, 0, 10}, {0xd, 1, 0, 10}, {0xd, 1, 0, 10}, {0x5, 3, 0, 10}, {0x5, 3, 0, 10}, {0x5, 3, 0, 10}, {0x5, 3, 0, 10}, {0x8, 2, 0, 10}, {0x8, 2, 0, 10}, {0x8, 2, 0, 10}, {0x8, 2, 0, 10}, {0x4, 3, 0, 10}, {0x4, 3, 0, 10}, {0x4, 3, 0, 10}, {0x4, 3, 0, 10}, {0x3, 4, 0, 10}, {0x3, 4, 0, 10}, {0x3, 4, 0, 10}, {0x3, 4, 0, 10}, {0x2, 4, 0, 10}, {0x2, 4, 0, 10}, {0x2, 4, 0, 10}, {0x2, 4, 0, 10}, {0x1, 7, 0, 10}, {0x1, 7, 0, 10}, {0x1, 7, 0, 10}, {0x1, 7, 0, 10}, {0x0, 0x14, 0, 10}, {0x0, 0x14, 0, 10}, {0x0, 0x14, 0, 10}, {0x0, 0x14, 0, 10}, {0x0, 0x13, 0, 10}, {0x0, 0x13, 0, 10}, {0x0, 0x13, 0, 10}, {0x0, 0x13, 0, 10}, {0x0, 0x17, 0, 11}, {0x0, 0x17, 0, 11}, {0x0, 0x18, 0, 11}, {0x0, 0x18, 0, 11}, {0x1, 8, 0, 11}, {0x1, 8, 0, 11}, {0x9, 2, 0, 11}, {0x9, 2, 0, 11}, {0x3, 2, 1, 11}, {0x3, 2, 1, 11}, {0x4, 2, 1, 11}, {0x4, 2, 1, 11}, {0xf, 1, 1, 11}, {0xf, 1, 1, 11}, {0x10, 1, 1, 11}, {0x10, 1, 1, 11}, {0, 0x19, 0, 12}, {0, 0x1a, 0, 12}, {0, 0x1b, 0, 12}, {1, 9, 0, 12}, {0x6, 3, 0, 12}, {0x1, 0xa, 0, 12}, {0x2, 5, 0, 12}, {0x7, 3, 0, 12}, {0xe, 1, 0, 12}, {0x0, 8, 1, 12}, {0x5, 2, 1, 12}, {0x6, 2, 1, 12}, {0x11, 1, 1, 12}, {0x12, 1, 1, 12}, {0x13, 1, 1, 12}, {0x14, 1, 1, 12}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7} }; #ifdef PV_ANNEX_IJKT_SUPPORT const VLCtab2 PV_DCT3Dtab6[] = { {0x0, 3, 1, 7}, {0x4, 1, 1, 7}, {0x6, 1, 1, 7}, {0x5, 1, 1, 7}, {0x1, 3, 0, 7}, {0x2, 2, 0, 7}, {0x0, 9, 0, 7}, {0x5, 1, 0, 7}, {0x0, 2, 1, 6}, {0x0, 2, 1, 6}, {0x3, 1, 1, 6}, {0x3, 1, 1, 6}, {0x2, 1, 1, 6}, {0x2, 1, 1, 6}, {0x1, 1, 1, 6}, {0x1, 1, 1, 6}, {0x0, 6, 0, 6}, {0x0, 6, 0, 6}, {0x0, 7, 0, 6}, {0x0, 7, 0, 6}, {0x0, 8, 0, 6}, {0x0, 8, 0, 6}, {0x4, 1, 0, 6}, {0x4, 1, 0, 6}, {0x1, 2, 0, 6}, {0x1, 2, 0, 6}, {0x3, 1, 0, 6}, {0x3, 1, 0, 6}, {0x2, 1, 0, 5}, {0x2, 1, 0, 5}, {0x2, 1, 0, 5}, {0x2, 1, 0, 5}, {0x0, 4, 0, 5}, {0x0, 4, 0, 5}, {0x0, 4, 0, 5}, {0x0, 4, 0, 5}, {0x0, 5, 0, 5}, {0x0, 5, 0, 5}, {0x0, 5, 0, 5}, {0x0, 5, 0, 5}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 3, 0, 4}, {0x0, 3, 0, 4}, {0x0, 3, 0, 4}, {0x0, 3, 0, 4}, {0x0, 3, 0, 4}, {0x0, 3, 0, 4}, {0x0, 3, 0, 4}, {0x0, 3, 0, 4}, {0x1, 1, 0, 4}, {0x1, 1, 0, 4}, {0x1, 1, 0, 4}, {0x1, 1, 0, 4}, {0x1, 1, 0, 4}, {0x1, 1, 0, 4}, {0x1, 1, 0, 4}, {0x1, 1, 0, 4} }; const VLCtab2 PV_DCT3Dtab7[] = { {0xb, 1, 0, 10}, {0xa, 1, 0, 10}, {0x0, 5, 1, 9}, {0x0, 5, 1, 9}, {0x0, 6, 1, 9}, {0x0, 6, 1, 9}, {0x1, 2, 1, 9}, {0x1, 2, 1, 9}, {0x2, 2, 1, 9}, {0x2, 2, 1, 9}, {0xf, 1, 1, 9}, {0xf, 1, 1, 9}, {0x10, 1, 1, 9}, {0x10, 1, 1, 9}, {0x12, 1, 1, 9}, {0x12, 1, 1, 9}, {0x11, 1, 1, 9}, {0x11, 1, 1, 9}, {0xe, 1, 1, 9}, {0xe, 1, 1, 9}, {0x0, 13, 0, 9}, {0x0, 13, 0, 9}, {0x0, 14, 0, 9}, {0x0, 14, 0, 9}, {0x0, 15, 0, 9}, {0x0, 15, 0, 9}, {0x0, 16, 0, 9}, {0x0, 16, 0, 9}, {0x0, 17, 0, 9}, {0x0, 17, 0, 9}, {0x0, 18, 0, 9}, {0x0, 18, 0, 9}, {0x0, 11, 0, 9}, {0x0, 11, 0, 9}, {0x0, 12, 0, 9}, {0x0, 12, 0, 9}, {0x5, 2, 0, 9}, {0x5, 2, 0, 9}, {0x4, 2, 0, 9}, {0x4, 2, 0, 9}, {0x9, 1, 0, 9}, {0x9, 1, 0, 9}, {0x8, 1, 0, 9}, {0x8, 1, 0, 9}, {0x0, 4, 1, 8}, {0x0, 4, 1, 8}, {0x0, 4, 1, 8}, {0x0, 4, 1, 8}, {0x7, 1, 1, 8}, {0x7, 1, 1, 8}, {0x7, 1, 1, 8}, {0x7, 1, 1, 8}, {0x8, 1, 1, 8}, {0x8, 1, 1, 8}, {0x8, 1, 1, 8}, {0x8, 1, 1, 8}, {0xd, 1, 1, 8}, {0xd, 1, 1, 8}, {0xd, 1, 1, 8}, {0xd, 1, 1, 8}, {0xc, 1, 1, 8}, {0xc, 1, 1, 8}, {0xc, 1, 1, 8}, {0xc, 1, 1, 8}, {0xb, 1, 1, 8}, {0xb, 1, 1, 8}, {0xb, 1, 1, 8}, {0xb, 1, 1, 8}, {0xa, 1, 1, 8}, {0xa, 1, 1, 8}, {0xa, 1, 1, 8}, {0xa, 1, 1, 8}, {0x9, 1, 1, 8}, {0x9, 1, 1, 8}, {0x9, 1, 1, 8}, {0x9, 1, 1, 8}, {0x0, 10, 0, 8}, {0x0, 10, 0, 8}, {0x0, 10, 0, 8}, {0x0, 10, 0, 8}, {0x6, 1, 0, 8}, {0x6, 1, 0, 8}, {0x6, 1, 0, 8}, {0x6, 1, 0, 8}, {0x3, 2, 0, 8}, {0x3, 2, 0, 8}, {0x3, 2, 0, 8}, {0x3, 2, 0, 8}, {0x1, 4, 0, 8}, {0x1, 4, 0, 8}, {0x1, 4, 0, 8}, {0x1, 4, 0, 8}, {0x7, 1, 0, 8}, {0x7, 1, 0, 8}, {0x7, 1, 0, 8}, {0x7, 1, 0, 8} }; const VLCtab2 PV_DCT3Dtab8[] = { {0x13, 0x1, 1, 11}, {0x13, 0x1, 1, 11}, {0x14, 0x1, 1, 11}, {0x14, 0x1, 1, 11}, {0x9, 0x2, 0, 11}, {0x9, 0x2, 0, 11}, {0x4, 0x3, 0, 11}, {0x4, 0x3, 0, 11}, {0x0, 0x7, 1, 10}, {0x0, 0x7, 1, 10}, {0x0, 0x7, 1, 10}, {0x0, 0x7, 1, 10}, {0x1, 0x3, 1, 10}, {0x1, 0x3, 1, 10}, {0x1, 0x3, 1, 10}, {0x1, 0x3, 1, 10}, {0x3, 0x2, 1, 10}, {0x3, 0x2, 1, 10}, {0x3, 0x2, 1, 10}, {0x3, 0x2, 1, 10}, {0x4, 0x2, 1, 10}, {0x4, 0x2, 1, 10}, {0x4, 0x2, 1, 10}, {0x4, 0x2, 1, 10}, {0xc, 0x1, 0, 10}, {0xc, 0x1, 0, 10}, {0xc, 0x1, 0, 10}, {0xc, 0x1, 0, 10}, {0x2, 0x4, 0, 10}, {0x2, 0x4, 0, 10}, {0x2, 0x4, 0, 10}, {0x2, 0x4, 0, 10}, {0x8, 0x2, 0, 10}, {0x8, 0x2, 0, 10}, {0x8, 0x2, 0, 10}, {0x8, 0x2, 0, 10}, {0x7, 0x2, 0, 10}, {0x7, 0x2, 0, 10}, {0x7, 0x2, 0, 10}, {0x7, 0x2, 0, 10}, {0x6, 0x2, 0, 10}, {0x6, 0x2, 0, 10}, {0x6, 0x2, 0, 10}, {0x6, 0x2, 0, 10}, {0x3, 0x3, 0, 10}, {0x3, 0x3, 0, 10}, {0x3, 0x3, 0, 10}, {0x3, 0x3, 0, 10}, {0x2, 0x3, 0, 10}, {0x2, 0x3, 0, 10}, {0x2, 0x3, 0, 10}, {0x2, 0x3, 0, 10}, {0x1, 0x5, 0, 10}, {0x1, 0x5, 0, 10}, {0x1, 0x5, 0, 10}, {0x1, 0x5, 0, 10}, {0xd, 0x1, 0, 11}, {0xd, 0x1, 0, 11}, {0x1, 0x6, 0, 11}, {0x1, 0x6, 0, 11}, {0x0, 0x14, 0, 11}, {0x0, 0x14, 0, 11}, {0x0, 0x13, 0, 11}, {0x0, 0x13, 0, 11}, {0x2, 0x3, 1, 11}, {0x2, 0x3, 1, 11}, {0x1, 0x4, 1, 11}, {0x1, 0x4, 1, 11}, {0x0, 0x9, 1, 11}, {0x0, 0x9, 1, 11}, {0x0, 0x8, 1, 11}, {0x0, 0x8, 1, 11}, {0x1, 0x7, 0, 12}, {0x3, 0x4, 0, 12}, {0x5, 0x3, 0, 12}, {0x0, 0x19, 0, 12}, {0x0, 0x18, 0, 12}, {0x0, 0x17, 0, 12}, {0x0, 0x16, 0, 12}, {0x0, 0x15, 0, 12}, {0x15, 0x1, 1, 12}, {0x16, 0x1, 1, 12}, {0x17, 0x1, 1, 12}, {0x7, 0x2, 1, 12}, {0x6, 0x2, 1, 12}, {0x5, 0x2, 1, 12}, {0x3, 0x3, 1, 12}, {0x0, 0xa, 1, 12}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7} }; #endif /* RVLC tables */ const int ptrRvlcTab[11] = {0, 24, 46, 66, 84, 100, 114, 126, 134, 140, 144}; const VLCtab2 RvlcDCTtabIntra[170] = /* 00xxxx00 or 00xxxx01 */ { {27, 255, 0, 5}, /* 0000 is escape code */ {1, 1, 0, 4}, {2, 1, 0, 5}, {3, 1, 0, 5}, {4, 1, 0, 6}, {5, 1, 0, 6}, {6, 1, 0, 7}, {7, 1, 0, 7}, {8, 1, 0, 8}, {9, 1, 0, 8}, {10, 1, 0, 9}, {5, 2, 0, 9}, {11, 1, 0, 10}, {12, 1, 0, 10}, {13, 1, 0, 11}, {9, 2, 0, 11}, {10, 2, 0, 12}, {4, 4, 0, 12}, {14, 1, 0, 13}, {15, 1, 0, 13}, {16, 1, 0, 14}, {17, 1, 0, 14}, {0, 27, 0, 15}, {3, 9, 0, 15}, /* 010xxxx00 or 010xxxx01 */ {1, 2, 0, 5}, {0, 4, 0, 5}, {0, 5, 0, 6}, {0, 6, 0, 6}, {2, 2, 0, 7}, {1, 3, 0, 7}, {3, 2, 0, 8}, {4, 2, 0, 8}, {2, 3, 0, 9}, {3, 3, 0, 9}, {6, 2, 0, 10}, {7, 2, 0, 10}, {5, 3, 0, 11}, {6, 3, 0, 11}, {5, 4, 0, 12}, {6, 4, 0, 12}, {11, 2, 0, 13}, {8, 3, 0, 13}, {18, 1, 0, 14}, {8, 4, 0, 14}, {6, 5, 0, 15}, {7, 5, 0, 15}, /* 0110xxxx00 or 0110xxxx01 */ {3, 1, 1, 6}, {4, 1, 1, 6}, {0, 7, 0, 7}, {7, 1, 1, 7}, {1, 4, 0, 8}, {1, 5, 0, 8}, {1, 6, 0, 9}, {0, 10, 0, 9}, {8, 2, 0, 10}, {4, 3, 0, 10}, {7, 3, 0, 11}, {3, 4, 0, 11}, {3, 5, 0, 12}, {4, 5, 0, 12}, {9, 3, 0, 13}, {7, 4, 0, 13}, {5, 5, 0, 14}, {4, 6, 0, 14}, {9, 4, 0, 15}, {12, 2, 0, 15}, /* 01110xxxx00 or 01110xxxx01 */ {8, 1, 1, 7}, {9, 1, 1, 7}, {0, 8, 0, 8}, {0, 9, 0, 8}, {0, 11, 0, 9}, {1, 2, 1, 9}, {2, 4, 0, 10}, {1, 7, 0, 10}, {2, 5, 0, 11}, {2, 6, 0, 11}, {1, 10, 0, 12}, {0, 18, 0, 12}, {3, 6, 0, 13}, {2, 7, 0, 13}, {5, 6, 0, 14}, {3, 7, 0, 14}, {19, 1, 0, 15}, {1, 5, 1, 15}, /* 011110xxxx00 or 011110xxxx01 */ {0, 2, 1, 8}, {12, 1, 1, 8}, {15, 1, 1, 9}, {16, 1, 1, 9}, {0, 12, 0, 10}, {0, 13, 0, 10}, {1, 8, 0, 11}, {1, 9, 0, 11}, {0, 19, 0, 12}, {0, 22, 0, 12}, {2, 8, 0, 13}, {2, 9, 0, 13}, {3, 8, 0, 14}, {2, 10, 0, 14}, {2, 3, 1, 15}, {13, 2, 1, 15}, /* 0111110xxxx00 or 0111110xxxx01 */ {17, 1, 1, 9}, {18, 1, 1, 9}, {0, 14, 0, 10}, {21, 1, 1, 10}, {0, 15, 0, 11}, {0, 16, 0, 11}, {1, 3, 1, 12}, {3, 2, 1, 12}, {1, 11, 0, 13}, {0, 20, 0, 13}, {2, 11, 0, 14}, {1, 12, 0, 14}, {41, 1, 1, 15}, {42, 1, 1, 15}, /* 01111110xxxx00 or 01111110xxxx01 */ {22, 1, 1, 10}, {23, 1, 1, 10}, {0, 17, 0, 11}, {0, 3, 1, 11}, {4, 2, 1, 12}, {29, 1, 1, 12}, {0, 21, 0, 13}, {0, 23, 0, 13}, {1, 13, 0, 14}, {0, 24, 0, 14}, {43, 1, 1, 15}, {44, 1, 1, 15}, /* 011111110xxxx00 or 011111110xxxx01 */ {2, 2, 1, 11}, {26, 1, 1, 11}, {30, 1, 1, 12}, {31, 1, 1, 12}, {0, 4, 1, 13}, {5, 2, 1, 13}, {0, 25, 0, 14}, {0, 26, 0, 14}, /* 0111111110xxxx00 or 0111111110xxxx01 */ {32, 1, 1, 12}, {33, 1, 1, 12}, {6, 2, 1, 13}, {7, 2, 1, 13}, {0, 5, 1, 14}, {1, 4, 1, 14}, /* 01111111110xxxx00 or 01111111110xxxx01 */ {8, 2, 1, 13}, {9, 2, 1, 13}, {10, 2, 1, 14}, {11, 2, 1, 14}, /* 011111111110xxxx00 or 011111111110xxxx01 */ {12, 2, 1, 14}, {38, 1, 1, 14}, /* 1xxxx10 or 1xxxx11 from 11 zeros to 0 zeros*/ {0, 1, 0, 3}, {0, 2, 0, 3}, {0, 3, 0, 4}, {0, 1, 1, 4}, {1, 1, 1, 5}, {2, 1, 1, 5}, {5, 1, 1, 6}, {6, 1, 1, 6}, {10, 1, 1, 7}, {11, 1, 1, 7}, {13, 1, 1, 8}, {14, 1, 1, 8}, {19, 1, 1, 9}, {20, 1, 1, 9}, {24, 1, 1, 10}, {25, 1, 1, 10}, {27, 1, 1, 11}, {28, 1, 1, 11}, {34, 1, 1, 12}, {35, 1, 1, 12}, {36, 1, 1, 13}, {37, 1, 1, 13}, {39, 1, 1, 14}, {40, 1, 1, 14} }; const VLCtab2 RvlcDCTtabInter[170] = /* 00xxxx00 or 00xxxx01 */ { {27, 255, 0, 5}, /* 0000 is escape code */ {0, 2, 0, 4}, {0, 3, 0, 5}, {3, 1, 0, 5}, {1, 2, 0, 6}, {6, 1, 0, 6}, {0, 4, 0, 7}, {2, 2, 0, 7}, {0, 5, 0, 8}, {0, 6, 0, 8}, {0, 7, 0, 9}, {1, 4, 0, 9}, {0, 8, 0, 10}, {0, 9, 0, 10}, {0, 10, 0, 11}, {0, 11, 0, 11}, {0, 12, 0, 12}, {1, 7, 0, 12}, {0, 13, 0, 13}, {0, 14, 0, 13}, {0, 17, 0, 14}, {0, 18, 0, 14}, {0, 19, 0, 15}, {3, 7, 0, 15}, /* 010xxxx00 or 010xxxx01 */ {4, 1, 0, 5}, {5, 1, 0, 5}, {7, 1, 0, 6}, {8, 1, 0, 6}, {9, 1, 0, 7}, {10, 1, 0, 7}, {1, 3, 0, 8}, {3, 2, 0, 8}, {2, 3, 0, 9}, {5, 2, 0, 9}, {1, 5, 0, 10}, {3, 3, 0, 10}, {1, 6, 0, 11}, {2, 4, 0, 11}, {2, 5, 0, 12}, {3, 4, 0, 12}, {0, 15, 0, 13}, {0, 16, 0, 13}, {1, 9, 0, 14}, {1, 10, 0, 14}, {4, 5, 0, 15}, {7, 4, 0, 15}, /* 0110xxxx00 or 0110xxxx01 */ {3, 1, 1, 6}, {4, 1, 1, 6}, {11, 1, 0, 7}, {7, 1, 1, 7}, {4, 2, 0, 8}, {12, 1, 0, 8}, {15, 1, 0, 9}, {16, 1, 0, 9}, {6, 2, 0, 10}, {7, 2, 0, 10}, {4, 3, 0, 11}, {5, 3, 0, 11}, {6, 3, 0, 12}, {7, 3, 0, 12}, {1, 8, 0, 13}, {3, 5, 0, 13}, {2, 6, 0, 14}, {2, 7, 0, 14}, {17, 2, 0, 15}, {37, 1, 0, 15}, /* 01110xxxx00 or 01110xxxx01 */ {8, 1, 1, 7}, {9, 1, 1, 7}, {13, 1, 0, 8}, {14, 1, 0, 8}, {17, 1, 0, 9}, {1, 2, 1, 9}, {8, 2, 0, 10}, {9, 2, 0, 10}, {10, 2, 0, 11}, {21, 1, 0, 11}, {11, 2, 0, 12}, {27, 1, 0, 12}, {4, 4, 0, 13}, {5, 4, 0, 13}, {3, 6, 0, 14}, {6, 4, 0, 14}, {38, 1, 0, 15}, {1, 5, 1, 15}, /* 011110xxxx00 or 011110xxxx01 */ {0, 2, 1, 8}, {12, 1, 1, 8}, {15, 1, 1, 9}, {16, 1, 1, 9}, {18, 1, 0, 10}, {19, 1, 0, 10}, {22, 1, 0, 11}, {23, 1, 0, 11}, {28, 1, 0, 12}, {29, 1, 0, 12}, {8, 3, 0, 13}, {12, 2, 0, 13}, {9, 3, 0, 14}, {13, 2, 0, 14}, {2, 3, 1, 15}, {13, 2, 1, 15}, /* 0111110xxxx00 or 0111110xxxx01 */ {17, 1, 1, 9}, {18, 1, 1, 9}, {20, 1, 0, 10}, {21, 1, 1, 10}, {24, 1, 0, 11}, {25, 1, 0, 11}, {1, 3, 1, 12}, {3, 2, 1, 12}, {30, 1, 0, 13}, {31, 1, 0, 13}, {14, 2, 0, 14}, {15, 2, 0, 14}, {41, 1, 1, 15}, {42, 1, 1, 15}, /* 01111110xxxx00 or 01111110xxxx01 */ {22, 1, 1, 10}, {23, 1, 1, 10}, {26, 1, 0, 11}, {0, 3, 1, 11}, {4, 2, 1, 12}, {29, 1, 1, 12}, {32, 1, 0, 13}, {33, 1, 0, 13}, {16, 2, 0, 14}, {34, 1, 0, 14}, {43, 1, 1, 15}, {44, 1, 1, 15}, /* 011111110xxxx00 or 011111110xxxx01 */ {2, 2, 1, 11}, {26, 1, 1, 11}, {30, 1, 1, 12}, {31, 1, 1, 12}, {0, 4, 1, 13}, {5, 2, 1, 13}, {35, 1, 0, 14}, {36, 1, 0, 14}, /* 0111111110xxxx00 or 0111111110xxxx01 */ {32, 1, 1, 12}, {33, 1, 1, 12}, {6, 2, 1, 13}, {7, 2, 1, 13}, {0, 5, 1, 14}, {1, 4, 1, 14}, /* 01111111110xxxx00 or 01111111110xxxx01 */ {8, 2, 1, 13}, {9, 2, 1, 13}, {10, 2, 1, 14}, {11, 2, 1, 14}, /* 011111111110xxxx00 or 011111111110xxxx01 */ {12, 2, 1, 14}, {38, 1, 1, 14}, /* 1xxxx10 or 1xxxx11 from 11 zeros to 0 zeros*/ {0, 1, 0, 3}, {1, 1, 0, 3}, {2, 1, 0, 4}, {0, 1, 1, 4}, {1, 1, 1, 5}, {2, 1, 1, 5}, {5, 1, 1, 6}, {6, 1, 1, 6}, {10, 1, 1, 7}, {11, 1, 1, 7}, {13, 1, 1, 8}, {14, 1, 1, 8}, {19, 1, 1, 9}, {20, 1, 1, 9}, {24, 1, 1, 10}, {25, 1, 1, 10}, {27, 1, 1, 11}, {28, 1, 1, 11}, {34, 1, 1, 12}, {35, 1, 1, 12}, {36, 1, 1, 13}, {37, 1, 1, 13}, {39, 1, 1, 14}, {40, 1, 1, 14} }; /*---------------------------------------------------------------------------- ; EXTERNAL FUNCTION REFERENCES ; Declare functions defined elsewhere and referenced in this module ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; EXTERNAL GLOBAL STORE/BUFFER/POINTER REFERENCES ; Declare variables used in this module but defined elsewhere ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; FUNCTION CODE ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; Define all local variables ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; Function body here ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; Return nothing or data or data pointer ----------------------------------------------------------------------------*/ ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/vop.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "mp4dec_lib.h" #include "bitstream.h" #include "vlc_decode.h" #include "zigzag.h" #define OSCL_DISABLE_WARNING_CONV_POSSIBLE_LOSS_OF_DATA #include "osclconfig_compiler_warnings.h" #ifdef PV_SUPPORT_MAIN_PROFILE /* INTRA */ const static int mpeg_iqmat_def[NCOEFF_BLOCK] = { 8, 17, 18, 19, 21, 23, 25, 27, 17, 18, 19, 21, 23, 25, 27, 28, 20, 21, 22, 23, 24, 26, 28, 30, 21, 22, 23, 24, 26, 28, 30, 32, 22, 23, 24, 26, 28, 30, 32, 35, 23, 24, 26, 28, 30, 32, 35, 38, 25, 26, 28, 30, 32, 35, 38, 41, 27, 28, 30, 32, 35, 38, 41, 45 }; /* INTER */ const static int mpeg_nqmat_def[64] = { 16, 17, 18, 19, 20, 21, 22, 23, 17, 18, 19, 20, 21, 22, 23, 24, 18, 19, 20, 21, 22, 23, 24, 25, 19, 20, 21, 22, 23, 24, 26, 27, 20, 21, 22, 23, 25, 26, 27, 28, 21, 22, 23, 24, 26, 27, 28, 30, 22, 23, 24, 26, 27, 28, 30, 31, 23, 24, 25, 27, 28, 30, 31, 33 }; #endif /* ======================================================================== */ /* Function : CalcNumBits() */ /* Purpose : */ /* In/out : */ /* Return : Calculate the minimum number of bits required to */ /* represent x. */ /* Note : This is an equivalent implementation of */ /* (long)ceil(log((double)x)/log(2.0)) */ /* Modified : */ /* ======================================================================== */ int CalcNumBits(uint x) { int i = 1; while (x >>= 1) i++; return i; } /***********************************************************CommentBegin****** * * -- DecodeVolHeader -- Decode the header of a VOL * * 04/10/2000 : initial modification to the new PV-Decoder Lib format. * 10/12/2001 : reject non compliant bitstreams * ***********************************************************CommentEnd********/ PV_STATUS DecodeVOLHeader(VideoDecData *video, int layer) { PV_STATUS status; Vol *currVol; BitstreamDecVideo *stream; uint32 tmpvar, vol_shape; uint32 startCode; #ifdef PV_SUPPORT_MAIN_PROFILE int *qmat, i, j; #endif int version_id = 1; #ifdef PV_TOLERATE_VOL_ERRORS uint32 profile = 0x01; #endif /* There's a "currLayer" variable inside videoDecData. */ /* However, we don't maintain it until we decode frame data. 04/05/2000 */ currVol = video->vol[layer]; stream = currVol->bitstream; currVol->moduloTimeBase = 0; /* Determine which start code for the decoder to begin with */ status = BitstreamShowBits32HC(stream, &startCode); if (startCode == VISUAL_OBJECT_SEQUENCE_START_CODE) { /* Bitstream Exhchange Fix 9/99 */ /* Bitstream Exchange requires we allow start with Video Object Sequence */ /* visual_object_sequence_start_code */ (void) BitstreamReadBits32HC(stream); tmpvar = (uint32) BitstreamReadBits16(stream, 8); /* profile */ #ifndef PV_TOLERATE_VOL_ERRORS if (layer) /* */ { /* support SSPL0-2 */ if (tmpvar != 0x10 && tmpvar != 0x11 && tmpvar != 0x12 && tmpvar != 0xA1 && tmpvar != 0xA2 && tmpvar != 0xA3/* Core SP@L1-L3 */) return PV_FAIL; } else { /* support SPL0-3 & SSPL0-2 */ if (tmpvar != 0x01 && tmpvar != 0x02 && tmpvar != 0x03 && tmpvar != 0x08 && tmpvar != 0x10 && tmpvar != 0x11 && tmpvar != 0x12 && tmpvar != 0x21 && tmpvar != 0x22 && /* Core Profile Levels */ tmpvar != 0xA1 && tmpvar != 0xA2 && tmpvar != 0xA3 && tmpvar != 0xF0 && tmpvar != 0xF1 && /* Advanced Simple Profile Levels*/ tmpvar != 0xF2 && tmpvar != 0xF3 && tmpvar != 0xF4 && tmpvar != 0xF5) return PV_FAIL; } #else profile = tmpvar; #endif // save the profile and level for the query currVol->profile_level_id = (uint)tmpvar; // 6/10/04 status = BitstreamShowBits32HC(stream, &tmpvar); if (tmpvar == USER_DATA_START_CODE) { /* Something has to be done with user data 11/11/99 */ status = DecodeUserData(stream); if (status != PV_SUCCESS) return PV_FAIL; } /* visual_object_start_code */ BitstreamShowBits32HC(stream, &tmpvar); if (tmpvar != VISUAL_OBJECT_START_CODE) { do { /* Search for VOL_HEADER */ status = PVSearchNextM4VFrame(stream); /* search 0x00 0x00 0x01 */ if (status != PV_SUCCESS) return PV_FAIL; /* breaks the loop */ BitstreamShowBits32(stream, VOL_START_CODE_LENGTH, &tmpvar); PV_BitstreamFlushBits(stream, 8); } while (tmpvar != VOL_START_CODE); goto decode_vol; } else { BitstreamReadBits32HC(stream); } /* is_visual_object_identifier */ tmpvar = (uint32) BitstreamRead1Bits(stream); if (tmpvar) { /* visual_object_verid */ tmpvar = (uint32) BitstreamReadBits16(stream, 4); /* visual_object_priority */ tmpvar = (uint32) BitstreamReadBits16(stream, 3); } /* visual_object_type */ BitstreamShowBits32(stream, 4, &tmpvar); if (tmpvar == 1) { /* video_signal_type */ PV_BitstreamFlushBits(stream, 4); tmpvar = (uint32) BitstreamRead1Bits(stream); if (tmpvar == 1) { /* video_format */ tmpvar = (uint32) BitstreamReadBits16(stream, 3); /* video_range */ tmpvar = (uint32) BitstreamRead1Bits(stream); /* color_description */ tmpvar = (uint32) BitstreamRead1Bits(stream); if (tmpvar == 1) { /* color_primaries */ tmpvar = (uint32) BitstreamReadBits16(stream, 8); /* transfer_characteristics */ tmpvar = (uint32) BitstreamReadBits16(stream, 8); /* matrix_coefficients */ tmpvar = (uint32) BitstreamReadBits16(stream, 8); } } } else { do { /* Search for VOL_HEADER */ status = PVSearchNextM4VFrame(stream); /* search 0x00 0x00 0x01 */ if (status != PV_SUCCESS) return PV_FAIL; /* breaks the loop */ BitstreamShowBits32(stream, VOL_START_CODE_LENGTH, &tmpvar); PV_BitstreamFlushBits(stream, 8); } while (tmpvar != VOL_START_CODE); goto decode_vol; } /* next_start_code() */ status = PV_BitstreamByteAlign(stream); /* 10/12/01 */ status = BitstreamShowBits32HC(stream, &tmpvar); if (tmpvar == USER_DATA_START_CODE) { /* Something has to be done to deal with user data (parse it) 11/11/99 */ status = DecodeUserData(stream); if (status != PV_SUCCESS) return PV_FAIL; } status = BitstreamShowBits32(stream, 27, &tmpvar); /* 10/12/01 */ } else { /* tmpvar = 0; */ /* 10/12/01 */ status = BitstreamShowBits32(stream, 27, &tmpvar); /* uncomment this line if you want to start decoding with a video_object_start_code */ } if (tmpvar == VO_START_CODE) { /***** * * Read the VOL header entries from the bitstream * *****/ /* video_object_start_code */ tmpvar = BitstreamReadBits32(stream, 27); tmpvar = (uint32) BitstreamReadBits16(stream, 5); /* video_object_layer_start_code */ BitstreamShowBits32(stream, VOL_START_CODE_LENGTH, &tmpvar); if (tmpvar != VOL_START_CODE) { status = BitstreamCheckEndBuffer(stream); if (status == PV_END_OF_VOP) { video->shortVideoHeader = PV_H263; return PV_SUCCESS; } else { do { /* Search for VOL_HEADER */ status = PVSearchNextM4VFrame(stream);/* search 0x00 0x00 0x01 */ if (status != PV_SUCCESS) return PV_FAIL; /* breaks the loop */ BitstreamShowBits32(stream, VOL_START_CODE_LENGTH, &tmpvar); PV_BitstreamFlushBits(stream, 8); /* advance the byte ptr */ } while (tmpvar != VOL_START_CODE); } } else { PV_BitstreamFlushBits(stream, 8); } decode_vol: PV_BitstreamFlushBits(stream, VOL_START_CODE_LENGTH - 8); video->shortVideoHeader = 0; /* vol_id (4 bits) */ currVol->volID = (int) BitstreamReadBits16(stream, 4); /* RandomAccessible flag */ tmpvar = (uint32) BitstreamRead1Bits(stream); /* object type */ tmpvar = (uint32) BitstreamReadBits16(stream, 8); /* */ #ifdef PV_TOLERATE_VOL_ERRORS if (tmpvar == 0) { if (layer) /* */ { /* support SSPL0-2 */ if (profile != 0x10 && profile != 0x11 && profile != 0x12) return PV_FAIL; tmpvar = 0x02; } else { /* support SPL0-3 & SSPL0-2 */ if (profile != 0x01 && profile != 0x02 && profile != 0x03 && profile != 0x08 && profile != 0x10 && profile != 0x11 && profile != 0x12) return PV_FAIL; tmpvar = 0x01; } profile |= 0x0100; } #endif if (layer) { if (tmpvar != 0x02) return PV_FAIL; } else { if (tmpvar != 0x01) return PV_FAIL; } /* version id specified? */ tmpvar = (uint32) BitstreamRead1Bits(stream); if (tmpvar == 1) { /* version ID */ version_id = (uint32) BitstreamReadBits16(stream, 4); /* priority */ tmpvar = (uint32) BitstreamReadBits16(stream, 3); } /* aspect ratio info */ tmpvar = (uint32) BitstreamReadBits16(stream, 4); // Commenting out PV_FAIL return in aspect ratio info is 0. Don't think there is bitstream corruption. // It's just bad encoding. We can make a change to our decoder to ignore this type of encoding flaw. // if (tmpvar == 0) return PV_FAIL; if (tmpvar == 0xf /* extended_par */) { /* width */ tmpvar = (uint32) BitstreamReadBits16(stream, 8); /* height */ tmpvar = (uint32) BitstreamReadBits16(stream, 8); } /* control parameters present? */ tmpvar = (uint32) BitstreamRead1Bits(stream); /* Get the parameters (skipped) */ /* 03/10/99 */ if (tmpvar) { /* chroma_format */ tmpvar = BitstreamReadBits16(stream, 2); if (tmpvar != 1) return PV_FAIL; /* low_delay */ tmpvar = BitstreamRead1Bits(stream); /* vbv_parameters present? */ tmpvar = (uint32) BitstreamRead1Bits(stream); if (tmpvar) { /* first_half_bit_rate */ BitstreamReadBits16(stream, 15); if (!BitstreamRead1Bits(stream)) return PV_FAIL; /* latter_half_bit_rate */ BitstreamReadBits16(stream, 15); if (!BitstreamRead1Bits(stream)) return PV_FAIL; /* first_half_vbv_buffer_size */ BitstreamReadBits16(stream, 15); if (!BitstreamRead1Bits(stream)) return PV_FAIL; /* latter_half_vbv_buffer_size */ BitstreamReadBits16(stream, 3); /* first_half_vbv_occupancy */ BitstreamReadBits16(stream, 11); if (!BitstreamRead1Bits(stream)) return PV_FAIL; /* latter_half_vbv_occupancy */ BitstreamReadBits16(stream, 15); if (!BitstreamRead1Bits(stream)) return PV_FAIL; } } /* video_object_layer_shape (2 bits), only 00 (rect) is supported for now */ vol_shape = (uint32) BitstreamReadBits16(stream, 2); if (vol_shape) return PV_FAIL; /* marker bit, 03/10/99 */ if (!BitstreamRead1Bits(stream)) return PV_FAIL; /* vop_time_increment_resolution */ currVol->timeIncrementResolution = BitstreamReadBits16(stream, 16); if (currVol->timeIncrementResolution == 0) return PV_FAIL; /* . since nbitsTimeIncRes will be used over and over again, */ /* we should put it in Vol structure. 04/12/2000. */ currVol->nbitsTimeIncRes = CalcNumBits((uint)currVol->timeIncrementResolution - 1); if (!BitstreamRead1Bits(stream)) return PV_FAIL; /* fixed_vop_rate */ currVol->fixedVopRate = (int) BitstreamRead1Bits(stream); if (currVol->fixedVopRate) { /* fixed_vop_time_increment */ tmpvar = BitstreamReadBits16(stream, currVol->nbitsTimeIncRes); } /* marker bit */ if (!BitstreamRead1Bits(stream)) return PV_FAIL; /* video_object_layer_width (13 bits) */ video->displayWidth = video->width = (int) BitstreamReadBits16(stream, 13); /* round up to a multiple of MB_SIZE. 08/09/2000 */ video->width = (video->width + 15) & -16; // video->displayWidth += (video->displayWidth & 0x1); /* displayed image should be even size */ /* marker bit */ if (!BitstreamRead1Bits(stream)) return PV_FAIL; /* video_object_layer_height (13 bits) */ video->displayHeight = video->height = (int) BitstreamReadBits16(stream, 13); /* round up to a multiple of MB_SIZE. 08/09/2000 */ video->height = (video->height + 15) & -16; // video->displayHeight += (video->displayHeight & 0x1); /* displayed image should be even size */ if (!BitstreamRead1Bits(stream)) return PV_FAIL; /* 03/10/99 */ /* interlaced */ tmpvar = (uint32) BitstreamRead1Bits(stream); if (tmpvar != 0) { mp4dec_log("DecodeVOLHeader(): Interlaced video is not supported.\n"); return PV_FAIL; } /* obmc_disable */ tmpvar = (uint32) BitstreamRead1Bits(stream); if (tmpvar == 0) return PV_FAIL; if (version_id == 1) { /* sprite_enable (1 bits) */ tmpvar = (uint32) BitstreamRead1Bits(stream); if (tmpvar) { mp4dec_log("DecodeVOLHeader(): Sprite is not supported.\n"); return PV_FAIL; } } else { /* For version 2, vol_sprite_usage has two bits. */ /* sprite_enable */ tmpvar = (uint32) BitstreamReadBits16(stream, 2); if (tmpvar) { mp4dec_log("DecodeVOLHeader(): Sprite is not supported.\n"); return PV_FAIL; } } /* not_8_bit */ if (BitstreamRead1Bits(stream)) { /* quant_precision */ currVol->quantPrecision = BitstreamReadBits16(stream, 4); /* bits_per_pixel */ currVol->bitsPerPixel = BitstreamReadBits16(stream, 4); mp4dec_log("DecodeVOLHeader(): not an 8-bit stream.\n"); // For the time being we do not support != 8 bits return PV_FAIL; } else { currVol->quantPrecision = 5; currVol->bitsPerPixel = 8; } /* quant_type (1 bit) */ currVol->quantType = BitstreamRead1Bits(stream); if (currVol->quantType) { #ifdef PV_SUPPORT_MAIN_PROFILE /* load quantization matrices. 5/22/2000 */ /* load_intra_quant_mat (1 bit) */ qmat = currVol->iqmat; currVol->loadIntraQuantMat = BitstreamRead1Bits(stream); if (currVol->loadIntraQuantMat) { /* intra_quant_mat (8*64 bits) */ i = 0; do { qmat[*(zigzag_inv+i)] = (int) BitstreamReadBits16(stream, 8); } while ((qmat[*(zigzag_inv+i)] != 0) && (++i < 64)); for (j = i; j < 64; j++) qmat[*(zigzag_inv+j)] = qmat[*(zigzag_inv+i-1)]; } else { oscl_memcpy(qmat, mpeg_iqmat_def, 64*sizeof(int)); } qmat[0] = 0; /* necessary for switched && MPEG quant 07/09/01 */ /* load_nonintra_quant_mat (1 bit) */ qmat = currVol->niqmat; currVol->loadNonIntraQuantMat = BitstreamRead1Bits(stream); if (currVol->loadNonIntraQuantMat) { /* nonintra_quant_mat (8*64 bits) */ i = 0; do { qmat[*(zigzag_inv+i)] = (int) BitstreamReadBits16(stream, 8); } while ((qmat[*(zigzag_inv+i)] != 0) && (++i < 64)); for (j = i; j < 64; j++) qmat[*(zigzag_inv+j)] = qmat[*(zigzag_inv+i-1)]; } else { oscl_memcpy(qmat, mpeg_nqmat_def, 64*sizeof(int)); } #else return PV_FAIL; #endif } if (version_id != 1) { /* quarter_sample enabled */ tmpvar = BitstreamRead1Bits(stream); if (tmpvar) return PV_FAIL; } /* complexity_estimation_disable */ currVol->complexity_estDisable = BitstreamRead1Bits(stream); if (currVol->complexity_estDisable == 0) { currVol->complexity_estMethod = BitstreamReadBits16(stream, 2); if (currVol->complexity_estMethod < 2) { /* shape_complexity_estimation_disable */ tmpvar = BitstreamRead1Bits(stream); if (tmpvar == 0) { mp4dec_log("DecodeVOLHeader(): Shape Complexity estimation is not supported.\n"); return PV_FAIL; } /* texture_complexity_estimation_set_1_disable */ tmpvar = BitstreamRead1Bits(stream); if (tmpvar == 0) { currVol->complexity.text_1 = BitstreamReadBits16(stream, 4); } /* marker bit */ if (!BitstreamRead1Bits(stream)) return PV_FAIL; /* texture_complexity_estimation_set_2_disable */ tmpvar = BitstreamRead1Bits(stream); if (tmpvar == 0) { currVol->complexity.text_2 = BitstreamReadBits16(stream, 4); } /* motion_compensation_complexity_disable */ tmpvar = BitstreamRead1Bits(stream); if (tmpvar == 0) { currVol->complexity.mc = BitstreamReadBits16(stream, 6); } /* marker bit */ if (!BitstreamRead1Bits(stream)) return PV_FAIL; if (currVol->complexity_estMethod == 1) { /* version2_complexity_estimation_disable */ tmpvar = BitstreamRead1Bits(stream); if (tmpvar == 0) { mp4dec_log("DecodeVOLHeader(): sadct, quarter pel not supported.\n"); return PV_FAIL; } } } } /* 03/10/99 */ /* resync_marker_disable */ currVol->errorResDisable = (int) BitstreamRead1Bits(stream); /* data_partititioned */ currVol->dataPartitioning = (int) BitstreamRead1Bits(stream); video->vlcDecCoeffIntra = &VlcDecTCOEFIntra; video->vlcDecCoeffInter = &VlcDecTCOEFInter; if (currVol->dataPartitioning) { if (layer) return PV_FAIL; /* */ /* reversible_vlc */ currVol->useReverseVLC = (int)BitstreamRead1Bits(stream); if (currVol->useReverseVLC) { video->vlcDecCoeffIntra = &RvlcDecTCOEFIntra; video->vlcDecCoeffInter = &RvlcDecTCOEFInter; } currVol->errorResDisable = 0; } else { currVol->useReverseVLC = 0; } if (version_id != 1) { /* newpred_enable */ tmpvar = BitstreamRead1Bits(stream); if (tmpvar) return PV_FAIL; /* reduced_resolution_vop */ tmpvar = BitstreamRead1Bits(stream); if (tmpvar) return PV_FAIL; } /* Intra AC/DC prediction is always true */ video->intra_acdcPredDisable = 0; /* scalability */ currVol->scalability = (int) BitstreamRead1Bits(stream); if (currVol->scalability) { if (layer == 0) return PV_FAIL; /* */ /* hierarchy_type: 1 : temporal, 0 : spatial */ /* 03/10/99 */ currVol->scalType = (int) BitstreamRead1Bits(stream); /* */ if (!currVol->scalType) return PV_FAIL; /* ref_layer_id (4 bits) */ currVol->refVolID = (int) BitstreamReadBits16(stream, 4); if (layer) /* */ { if (currVol->refVolID != video->vol[0]->volID) return PV_FAIL; } /* ref_layer_sampling_direc (1 bits) */ /* 1 : ref. layer has higher resolution */ /* 0 : ref. layer has equal or lower resolution */ currVol->refSampDir = (int) BitstreamRead1Bits(stream); if (currVol->refSampDir) return PV_FAIL; /* hor_sampling_factor_n (5 bits) */ currVol->horSamp_n = (int) BitstreamReadBits16(stream, 5); /* hor_sampling_factor_m (5 bits) */ currVol->horSamp_m = (int) BitstreamReadBits16(stream, 5); if (currVol->horSamp_m == 0) return PV_FAIL; if (currVol->horSamp_n != currVol->horSamp_m) return PV_FAIL; /* ver_sampling_factor_n (5 bits) */ currVol->verSamp_n = (int) BitstreamReadBits16(stream, 5); /* ver_sampling_factor_m (5 bits) */ currVol->verSamp_m = (int) BitstreamReadBits16(stream, 5); if (currVol->verSamp_m == 0) return PV_FAIL; if (currVol->verSamp_n != currVol->verSamp_m) return PV_FAIL; /* enhancement_type: 1 : partial region, 0 : full region */ /* 04/10/2000: we only support full region enhancement layer. */ if (BitstreamRead1Bits(stream)) return PV_FAIL; } PV_BitstreamByteAlign(stream); status = BitstreamShowBits32HC(stream, &tmpvar); /* if we hit the end of buffer, tmpvar == 0. 08/30/2000 */ if (tmpvar == USER_DATA_START_CODE) { status = DecodeUserData(stream); /* you should not check for status here 03/19/2002 */ status = PV_SUCCESS; } /* Compute some convenience variables: 04/13/2000 */ video->nMBPerRow = video->width / MB_SIZE; video->nMBPerCol = video->height / MB_SIZE; video->nTotalMB = video->nMBPerRow * video->nMBPerCol; video->nBitsForMBID = CalcNumBits((uint)video->nTotalMB - 1); #ifdef PV_ANNEX_IJKT_SUPPORT video->modified_quant = 0; video->advanced_INTRA = 0; video->deblocking = 0; video->slice_structure = 0; #endif } else { /* SHORT_HEADER */ status = BitstreamShowBits32(stream, SHORT_VIDEO_START_MARKER_LENGTH, &tmpvar); if (tmpvar == SHORT_VIDEO_START_MARKER) { video->shortVideoHeader = PV_H263; } else { do { /* Search for VOL_HEADER */ status = PVSearchNextM4VFrame(stream); /* search 0x00 0x00 0x01 */ if (status != PV_SUCCESS) return PV_FAIL; /* breaks the loop */ BitstreamShowBits32(stream, VOL_START_CODE_LENGTH, &tmpvar); PV_BitstreamFlushBits(stream, 8); } while (tmpvar != VOL_START_CODE); goto decode_vol; } } #ifdef PV_TOLERATE_VOL_ERRORS if (profile > 0xFF || profile == 0) { return PV_BAD_VOLHEADER; } #endif return status; } /***********************************************************CommentBegin****** * * -- DecodeGOV -- Decodes the Group of VOPs from bitstream * * 04/20/2000 initial modification to the new PV-Decoder Lib format. * ***********************************************************CommentEnd********/ PV_STATUS DecodeGOVHeader(BitstreamDecVideo *stream, uint32 *time_base) { uint32 tmpvar, time_s; int closed_gov, broken_link; /* group_start_code (32 bits) */ // tmpvar = BitstreamReadBits32(stream, 32); /* hours */ tmpvar = (uint32) BitstreamReadBits16(stream, 5); time_s = tmpvar * 3600; /* minutes */ tmpvar = (uint32) BitstreamReadBits16(stream, 6); time_s += tmpvar * 60; /* marker bit */ tmpvar = (uint32) BitstreamRead1Bits(stream); /* seconds */ tmpvar = (uint32) BitstreamReadBits16(stream, 6); time_s += tmpvar; /* We have to check the timestamp here. If the sync timestamp is */ /* earlier than the previous timestamp or longer than 60 sec. */ /* after the previous timestamp, assume the GOV header is */ /* corrupted. 05/12/2000 */ *time_base = time_s; /* 02/27/2002 */ // *time_base = *time_base/1000; // tmpvar = time_s - *time_base; // if (tmpvar <= 60) *time_base = time_s; // else return PV_FAIL; tmpvar = (uint32) BitstreamRead1Bits(stream); closed_gov = tmpvar; tmpvar = (uint32) BitstreamRead1Bits(stream); broken_link = tmpvar; if ((closed_gov == 0) && (broken_link == 1)) { return PV_SUCCESS; /* 03/15/2002 you can also return PV_FAIL */ } PV_BitstreamByteAlign(stream); BitstreamShowBits32HC(stream, &tmpvar); while (tmpvar == USER_DATA_START_CODE) /* 03/15/2002 */ { DecodeUserData(stream); BitstreamShowBits32HC(stream, &tmpvar); } return PV_SUCCESS; } /***********************************************************CommentBegin****** * * -- DecodeVopHeader -- Decodes the VOPheader information from the bitstream * * 04/12/2000 Initial port to the new PV decoder library format. * 05/10/2000 Error resilient decoding of vop header. * ***********************************************************CommentEnd********/ PV_STATUS DecodeVOPHeader(VideoDecData *video, Vop *currVop, Bool use_ext_timestamp) { PV_STATUS status = PV_SUCCESS; Vol *currVol = video->vol[video->currLayer]; BitstreamDecVideo *stream = currVol->bitstream; uint32 tmpvar; int time_base; /***** * Read the VOP header from the bitstream (No shortVideoHeader Mode here!) *****/ BitstreamShowBits32HC(stream, &tmpvar); /* check if we have a GOV header here. 08/30/2000 */ if (tmpvar == GROUP_START_CODE) { tmpvar = BitstreamReadBits32HC(stream); // rewindBitstream(stream, START_CODE_LENGTH); /* for backward compatibility */ status = DecodeGOVHeader(stream, &tmpvar); if (status != PV_SUCCESS) { return status; } // use_ext_timestamp = TRUE; /* 02/08/2002 */ /* We should have a VOP header following the GOV header. 03/15/2001 */ BitstreamShowBits32HC(stream, &tmpvar); } #ifdef PV_SUPPORT_TEMPORAL_SCALABILITY currVop->timeStamp = -1; #endif if (tmpvar == VOP_START_CODE) { tmpvar = BitstreamReadBits32HC(stream); } else { PV_BitstreamFlushBits(stream, 8); // advance by a byte status = PV_FAIL; goto return_point; } /* vop_prediction_type (2 bits) */ currVop->predictionType = (int) BitstreamReadBits16(stream, 2); /* modulo_time_base (? bits) */ time_base = -1; do { time_base++; tmpvar = (uint32) BitstreamRead1Bits(stream); } while (tmpvar == 1); if (!use_ext_timestamp) { currVol->moduloTimeBase += 1000 * time_base; /* milliseconds based MTB 11/12/01 */ } /* marker_bit (1 bit) */ if (!BitstreamRead1Bits(stream)) { status = PV_FAIL; goto return_point; } /* vop_time_increment (1-15 bits) in Nov_Compliant (1-16 bits) */ /* we always assumes fixed vop rate here */ currVop->timeInc = BitstreamReadBits16(stream, currVol->nbitsTimeIncRes); /* marker_bit (1 bit) */ if (!BitstreamRead1Bits(stream)) { status = PV_FAIL; goto return_point; } /* vop_coded */ currVop->vopCoded = (int) BitstreamRead1Bits(stream); if (currVop->vopCoded == 0) { status = PV_SUCCESS; goto return_point; } /* read vop_rounding_type */ if (currVop->predictionType == P_VOP) { currVop->roundingType = (int) BitstreamRead1Bits(stream); } else { currVop->roundingType = 0; } if (currVol->complexity_estDisable == 0) { if (currVol->complexity_estMethod < 2) /* OCT 2002 */ { if ((currVol->complexity.text_1 >> 3) & 0x1) /* intra */ BitstreamReadBits16(stream, 8); if (currVol->complexity.text_1 & 0x1) /* not_coded */ BitstreamReadBits16(stream, 8); if ((currVol->complexity.text_2 >> 3) & 0x1) /* dct_coefs */ BitstreamReadBits16(stream, 8); if ((currVol->complexity.text_2 >> 2) & 0x1) /* dct_lines */ BitstreamReadBits16(stream, 8); if ((currVol->complexity.text_2 >> 1) & 0x1) /* vlc_symbols */ BitstreamReadBits16(stream, 8); if (currVol->complexity.text_2 & 0x1) /* vlc_bits */ BitstreamReadBits16(stream, 4); if (currVop->predictionType != I_VOP) { if ((currVol->complexity.text_1 >> 2) & 0x1) /* inter */ BitstreamReadBits16(stream, 8); if ((currVol->complexity.text_1 >> 1) & 0x1) /* inter_4v */ BitstreamReadBits16(stream, 8); if ((currVol->complexity.mc >> 5) & 0x1) /* apm */ BitstreamReadBits16(stream, 8); if ((currVol->complexity.mc >> 4) & 0x1) /* npm */ BitstreamReadBits16(stream, 8); /* interpolate_mc_q */ if ((currVol->complexity.mc >> 2) & 0x1) /* forw_back_mc_q */ BitstreamReadBits16(stream, 8); if ((currVol->complexity.mc >> 1) & 0x1) /* halfpel2 */ BitstreamReadBits16(stream, 8); if (currVol->complexity.mc & 0x1) /* halfpel4 */ BitstreamReadBits16(stream, 8); } if (currVop->predictionType == B_VOP) { if ((currVol->complexity.mc >> 3) & 0x1) /* interpolate_mc_q */ BitstreamReadBits16(stream, 8); } } } /* read intra_dc_vlc_thr */ currVop->intraDCVlcThr = (int) BitstreamReadBits16(stream, 3); /* read vop_quant (currVol->quantPrecision bits) */ currVop->quantizer = (int16) BitstreamReadBits16(stream, currVol->quantPrecision); if (currVop->quantizer == 0) { currVop->quantizer = video->prevVop->quantizer; status = PV_FAIL; goto return_point; } /* read vop_fcode_forward */ if (currVop->predictionType != I_VOP) { tmpvar = (uint32) BitstreamReadBits16(stream, 3); if (tmpvar < 1) { currVop->fcodeForward = 1; status = PV_FAIL; goto return_point; } currVop->fcodeForward = tmpvar; } else { currVop->fcodeForward = 0; } /* read vop_fcode_backward */ if (currVop->predictionType == B_VOP) { return PV_FAIL; // return this for now. } else { currVop->fcodeBackward = 0; } if (currVol->scalability) { currVop->refSelectCode = (int) BitstreamReadBits16(stream, 2); } return_point: return status; } /***********************************************************CommentBegin****** * * -- VideoPlaneWithShortHeader -- Decodes the short_video_header information from the bitstream * Modified : 04/23/2001. Remove the codes related to the "first pass" decoding. We use a different function to set up the decoder now. ***********************************************************CommentEnd********/ PV_STATUS DecodeShortHeader(VideoDecData *video, Vop *currVop) { PV_STATUS status = PV_SUCCESS; { status = DecodeH263Header(video, currVop); } return status; } PV_STATUS DecodeH263Header(VideoDecData *video, Vop *currVop) { PV_STATUS status = PV_SUCCESS; Vol *currVol = video->vol[0]; BitstreamDecVideo *stream = currVol->bitstream; uint32 tmpvar; int32 size; int extended_PTYPE = FALSE; int UFEP = 0, custom_PFMT = 0, custom_PCF = 0; status = BitstreamShowBits32(stream, SHORT_VIDEO_START_MARKER_LENGTH, &tmpvar); if (tmpvar != SHORT_VIDEO_START_MARKER) { { status = PV_FAIL; goto return_point; } } PV_BitstreamFlushBits(stream, SHORT_VIDEO_START_MARKER_LENGTH); /* Temporal reference. Using vop_time_increment_resolution = 30000 */ tmpvar = (uint32) BitstreamReadBits16(stream, 8); currVop->temporalRef = (int) tmpvar; currVop->timeInc = 0xff & (256 + currVop->temporalRef - video->prevVop->temporalRef); currVol->moduloTimeBase += currVop->timeInc; /* mseconds 11/12/01 */ /* Marker Bit */ if (!BitstreamRead1Bits(stream)) { mp4dec_log("DecodeShortHeader(): Market bit wrong.\n"); status = PV_FAIL; goto return_point; } /* Zero Bit */ if (BitstreamRead1Bits(stream)) { mp4dec_log("DecodeShortHeader(): Zero bit wrong.\n"); status = PV_FAIL; goto return_point; } /*split_screen_indicator*/ if (BitstreamRead1Bits(stream)) { mp4dec_log("DecodeShortHeader(): Split Screen not supported.\n"); VideoDecoderErrorDetected(video); } /*document_freeze_camera*/ if (BitstreamRead1Bits(stream)) { mp4dec_log("DecodeShortHeader(): Freeze Camera not supported.\n"); VideoDecoderErrorDetected(video); } /*freeze_picture_release*/ if (BitstreamRead1Bits(stream)) { mp4dec_log("DecodeShortHeader(): Freeze Release not supported.\n"); VideoDecoderErrorDetected(video); } /* source format */ switch (BitstreamReadBits16(stream, 3)) { case 1: video->displayWidth = video->width = 128; video->displayHeight = video->height = 96; break; case 2: video->displayWidth = video->width = 176; video->displayHeight = video->height = 144; break; case 3: video->displayWidth = video->width = 352; video->displayHeight = video->height = 288; break; case 4: video->displayWidth = video->width = 704; video->displayHeight = video->height = 576; break; case 5: video->displayWidth = video->width = 1408; video->displayHeight = video->height = 1152; break; case 7: extended_PTYPE = TRUE; break; default: /* Msg("H.263 source format not legal\n"); */ status = PV_FAIL; goto return_point; } currVop->roundingType = 0; if (extended_PTYPE == FALSE) { currVop->predictionType = (int) BitstreamRead1Bits(stream); /* four_reserved_zero_bits */ if (BitstreamReadBits16(stream, 4)) { mp4dec_log("DecodeShortHeader(): Reserved bits wrong.\n"); status = PV_FAIL; goto return_point; } } else { UFEP = BitstreamReadBits16(stream, 3); if (UFEP == 1) { /* source format */ switch (BitstreamReadBits16(stream, 3)) { case 1: video->displayWidth = video->width = 128; video->displayHeight = video->height = 96; break; case 2: video->displayWidth = video->width = 176; video->displayHeight = video->height = 144; break; case 3: video->displayWidth = video->width = 352; video->displayHeight = video->height = 288; break; case 4: video->displayWidth = video->width = 704; video->displayHeight = video->height = 576; break; case 5: video->displayWidth = video->width = 1408; video->displayHeight = video->height = 1152; break; case 6: custom_PFMT = TRUE; break; default: /* Msg("H.263 source format not legal\n"); */ status = PV_FAIL; goto return_point; } custom_PCF = BitstreamRead1Bits(stream); /* unrestricted MV */ if (BitstreamRead1Bits(stream)) { status = PV_FAIL; goto return_point; } /* SAC */ if (BitstreamRead1Bits(stream)) { status = PV_FAIL; goto return_point; } /* AP */ if (BitstreamRead1Bits(stream)) { status = PV_FAIL; goto return_point; } video->advanced_INTRA = BitstreamRead1Bits(stream); video->deblocking = BitstreamRead1Bits(stream); video->slice_structure = BitstreamRead1Bits(stream); /* RPS, ISD, AIV */ if (BitstreamReadBits16(stream, 3)) { status = PV_FAIL; goto return_point; } video->modified_quant = BitstreamRead1Bits(stream); /* Marker Bit and reserved*/ if (BitstreamReadBits16(stream, 4) != 8) { status = PV_FAIL; goto return_point; } } #ifndef PV_ANNEX_IJKT_SUPPORT if (video->advanced_INTRA | video->deblocking | video->modified_quant | video->modified_quant) { status = PV_FAIL; goto return_point; } #endif if (UFEP == 0 || UFEP == 1) { tmpvar = BitstreamReadBits16(stream, 3); if (tmpvar > 1) { status = PV_FAIL; goto return_point; } currVop->predictionType = tmpvar; /* RPR */ if (BitstreamRead1Bits(stream)) { status = PV_FAIL; goto return_point; } /* RRU */ if (BitstreamRead1Bits(stream)) { status = PV_FAIL; goto return_point; } currVop->roundingType = (int) BitstreamRead1Bits(stream); if (BitstreamReadBits16(stream, 3) != 1) { status = PV_FAIL; goto return_point; } } else { status = PV_FAIL; goto return_point; } /* CPM */ if (BitstreamRead1Bits(stream)) { status = PV_FAIL; goto return_point; } /* CPFMT */ if (custom_PFMT == 1 && UFEP == 1) { /* aspect ratio */ tmpvar = BitstreamReadBits16(stream, 4); if (tmpvar == 0) { status = PV_FAIL; goto return_point; } /* Extended PAR */ if (tmpvar == 0xF) { /* Read par_width and par_height but do nothing */ /* par_width */ tmpvar = BitstreamReadBits16(stream, 8); /* par_height */ tmpvar = BitstreamReadBits16(stream, 8); } tmpvar = BitstreamReadBits16(stream, 9); video->displayWidth = (tmpvar + 1) << 2; video->width = (video->displayWidth + 15) & -16; /* marker bit */ if (!BitstreamRead1Bits(stream)) { status = PV_FAIL; goto return_point; } tmpvar = BitstreamReadBits16(stream, 9); if (tmpvar == 0) { status = PV_FAIL; goto return_point; } video->displayHeight = tmpvar << 2; video->height = (video->displayHeight + 15) & -16; video->nTotalMB = video->width / MB_SIZE * video->height / MB_SIZE; if (video->nTotalMB <= 48) { video->nBitsForMBID = 6; } else if (video->nTotalMB <= 99) { video->nBitsForMBID = 7; } else if (video->nTotalMB <= 396) { video->nBitsForMBID = 9; } else if (video->nTotalMB <= 1584) { video->nBitsForMBID = 11; } else if (video->nTotalMB <= 6336) { video->nBitsForMBID = 13 ; } else if (video->nTotalMB <= 9216) { video->nBitsForMBID = 14 ; } else { status = PV_FAIL; goto return_point; } } if (UFEP == 1 && custom_PCF == 1) { BitstreamRead1Bits(stream); tmpvar = BitstreamReadBits16(stream, 7); if (tmpvar == 0) { status = PV_FAIL; goto return_point; } } if (custom_PCF == 1) { currVop->ETR = BitstreamReadBits16(stream, 2); } if (UFEP == 1 && video->slice_structure == 1) { /* SSS */ tmpvar = BitstreamReadBits16(stream, 2); if (tmpvar != 0) { status = PV_FAIL; goto return_point; } } } /* Recalculate number of macroblocks per row & col since */ /* the frame size can change. 04/23/2001. */ video->nMBinGOB = video->nMBPerRow = video->width / MB_SIZE; video->nGOBinVop = video->nMBPerCol = video->height / MB_SIZE; video->nTotalMB = video->nMBPerRow * video->nMBPerCol; if (custom_PFMT == 0 || UFEP == 0) { video->nBitsForMBID = CalcNumBits((uint)video->nTotalMB - 1); /* otherwise calculate above */ } size = (int32)video->width * video->height; if ((size > video->size) && ((video->currVop->predictionType == P_VOP) || (video->initialized == PV_TRUE))) { status = PV_FAIL; goto return_point; } #ifdef PV_MEMORY_POOL video->videoDecControls->size = size; #endif video->size = size; video->currVop->uChan = video->currVop->yChan + size; video->currVop->vChan = video->currVop->uChan + (size >> 2); video->prevVop->uChan = video->prevVop->yChan + size; video->prevVop->vChan = video->prevVop->uChan + (size >> 2); currVop->quantizer = (int16) BitstreamReadBits16(stream, 5); if (currVop->quantizer == 0) /* 04/03/01 */ { currVop->quantizer = video->prevVop->quantizer; status = PV_FAIL; goto return_point; } /* Zero bit */ if (extended_PTYPE == FALSE) { if (BitstreamRead1Bits(stream)) { mp4dec_log("DecodeShortHeader(): Zero bit wrong.\n"); status = PV_FAIL; goto return_point; } } /* pei */ tmpvar = (uint32) BitstreamRead1Bits(stream); while (tmpvar) { tmpvar = (uint32) BitstreamReadBits16(stream, 8); /* "PSPARE" */ tmpvar = (uint32) BitstreamRead1Bits(stream); /* "PEI" */ } if (video->slice_structure) /* ANNEX_K */ { if (!BitstreamRead1Bits(stream)) /* SEPB1 */ { status = PV_FAIL; goto return_point; } // if (currVol->nBitsForMBID // if (BitstreamReadBits16(stream, video->nBitsForMBID)) { status = PV_FAIL; /* no ASO, RS support for Annex K */ goto return_point; } if (!BitstreamRead1Bits(stream)) /*SEPB3 */ { status = PV_FAIL; goto return_point; } } /* Setting of other VOP-header parameters */ currVop->gobNumber = 0; currVop->vopCoded = 1; currVop->intraDCVlcThr = 0; currVop->gobFrameID = 0; /* initial value, 05/22/00 */ currVol->errorResDisable = 0; /*PutVopInterlaced(0,curr_vop); no implemented yet */ if (currVop->predictionType != I_VOP) currVop->fcodeForward = 1; else currVop->fcodeForward = 0; return_point: return status; } /***********************************************************CommentBegin****** * * -- PV_DecodeVop -- Decodes the VOP information from the bitstream * * 04/12/2000 * Initial port to the new PV decoder library format. * This function is different from the one in MoMuSys MPEG-4 * visual decoder. We handle combined mode with or withput * error resilience and H.263 mode through the sam path now. * * 05/04/2000 * Added temporal scalability to the decoder. * ***********************************************************CommentEnd********/ PV_STATUS PV_DecodeVop(VideoDecData *video) { Vol *currVol = video->vol[video->currLayer]; PV_STATUS status; uint32 tmpvar; /***** * Do scalable or non-scalable decoding of the current VOP *****/ if (!currVol->scalability) { if (currVol->dataPartitioning) { /* Data partitioning mode comes here */ status = DecodeFrameDataPartMode(video); } else { /* Combined mode with or without error resilience */ /* and short video header comes here. */ status = DecodeFrameCombinedMode(video); } } else { #ifdef DO_NOT_FOLLOW_STANDARD /* according to the standard, only combined mode is allowed */ /* in the enhancement layer. 06/01/2000. */ if (currVol->dataPartitioning) { /* Data partitioning mode comes here */ status = DecodeFrameDataPartMode(video); } else { /* Combined mode with or without error resilience */ /* and short video header comes here. */ status = DecodeFrameCombinedMode(video); } #else status = DecodeFrameCombinedMode(video); #endif } /* This part is for consuming Visual_object_sequence_end_code and EOS Code */ /* 10/15/01 */ if (!video->shortVideoHeader) { /* at this point bitstream is expected to be byte aligned */ BitstreamByteAlignNoForceStuffing(currVol->bitstream); status = BitstreamShowBits32HC(currVol->bitstream, &tmpvar); /* 07/07/01 */ if (tmpvar == VISUAL_OBJECT_SEQUENCE_END_CODE)/* VOS_END_CODE */ { PV_BitstreamFlushBits(currVol->bitstream, 16); PV_BitstreamFlushBits(currVol->bitstream, 16); } } else { #ifdef PV_ANNEX_IJKT_SUPPORT if (video->deblocking) { H263_Deblock(video->currVop->yChan, video->width, video->height, video->QPMB, video->headerInfo.Mode, 0, 0); H263_Deblock(video->currVop->uChan, video->width >> 1, video->height >> 1, video->QPMB, video->headerInfo.Mode, 1, video->modified_quant); H263_Deblock(video->currVop->vChan, video->width >> 1, video->height >> 1, video->QPMB, video->headerInfo.Mode, 1, video->modified_quant); } #endif /* Read EOS code for shortheader bitstreams */ status = BitstreamShowBits32(currVol->bitstream, 22, &tmpvar); if (tmpvar == SHORT_VIDEO_END_MARKER) { PV_BitstreamFlushBits(currVol->bitstream, 22); } else { status = PV_BitstreamShowBitsByteAlign(currVol->bitstream, 22, &tmpvar); if (tmpvar == SHORT_VIDEO_END_MARKER) { PV_BitstreamByteAlign(currVol->bitstream); PV_BitstreamFlushBits(currVol->bitstream, 22); } } } return status; } /***********************************************************CommentBegin****** * * -- CalcVopDisplayTime -- calculate absolute time when VOP is to be displayed * * 04/12/2000 Initial port to the new PV decoder library format. * ***********************************************************CommentEnd********/ uint32 CalcVopDisplayTime(Vol *currVol, Vop *currVop, int shortVideoHeader) { uint32 display_time; /***** * Calculate the time when the VOP is to be displayed next *****/ if (!shortVideoHeader) { display_time = (uint32)(currVol->moduloTimeBase + (((int32)currVop->timeInc - (int32)currVol->timeInc_offset) * 1000) / ((int32)currVol->timeIncrementResolution)); /* 11/12/2001 */ if (currVop->timeStamp >= display_time) { display_time += 1000; /* this case is valid if GOVHeader timestamp is ignored */ } } else { display_time = (uint32)(currVol->moduloTimeBase * 33 + (currVol->moduloTimeBase * 11) / 30); /* 11/12/2001 */ } return(display_time); } ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/yuv2rgb.cpp ================================================ /* * yuv2rgb.cpp * * Created on: 29 juil. 2009 * Author: rglt1266 */ #include #include "yuv2rgb.h" int convert (int width,int height, uint8 *in,uint32 *out){ uint8 *pY; uint8 *pU; uint8 *pV; int Y,U,V; int i,j; int R,G,B,Cr,Cb; /* Init */ pY = in; pU = in + (width*height); pV = pU + (width*height/4); for(i=0;i>8); G = Y - ((88*Cb+183*Cr)>>8); B = Y + ((454*Cb)>>8); if (R>255)R=255; else if (R<0)R=0; if (G>255)G=255; else if (G<0)G=0; if (B>255)B=255; else if (B<0)B=0; /* Write data */ out[((i*width) + j)]=((((R & 0xFF) << 16) | ((G & 0xFF) << 8) | (B & 0xFF))& 0xFFFFFFFF); } } return 1; } ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/yuv2rgb.h ================================================ /* * yuv2rgb.h * * Created on: 29 juil. 2009 * Author: rglt1266 */ #include "oscl_types.h" #ifndef YUV2RGB_H_ #define YUV2RGB_H_ int convert (int width,int height, uint8 *in,uint32 *out); #endif /* YUV2RGB_H_ */ ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/zigzag.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef zigzag_H #define zigzag_H /*---------------------------------------------------------------------------- ; INCLUDES ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; MACROS ; Define module specific macros here ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; DEFINES ; Include all pre-processor statements here. ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; EXTERNAL VARIABLES REFERENCES ; Declare variables used in this module but defined elsewhere ----------------------------------------------------------------------------*/ #ifdef __cplusplus extern "C" { #endif extern const int zigzag_inv[3*NCOEFF_BLOCK]; /*---------------------------------------------------------------------------- ; SIMPLE TYPEDEF'S ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; ENUMERATED TYPEDEF'S ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; STRUCTURES TYPEDEF'S ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; GLOBAL FUNCTION DEFINITIONS ; Function Prototype declaration ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; END ----------------------------------------------------------------------------*/ #endif #ifdef __cplusplus } #endif ================================================ FILE: RtspCamera/jni/m4v_h263/dec/src/zigzag_tab.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "mp4dec_api.h" #include "mp4def.h" #include "zigzag.h" /*---------------------------------------------------------------------------- ; MACROS ; Define module specific macros here ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; DEFINES ; Include all pre-processor statements here. Include conditional ; compile variables also. ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; LOCAL FUNCTION DEFINITIONS ; Function Prototype declaration ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; LOCAL STORE/BUFFER/POINTER DEFINITIONS ; Variable declaration - defined here and used outside this module ----------------------------------------------------------------------------*/ const int zigzag_inv[3*NCOEFF_BLOCK] = { 0, 1, 8, 16, 9, 2, 3, 10, 17, 24, 32, 25, 18, 11, 4, 5, 12, 19, 26, 33, 40, 48, 41, 34, 27, 20, 13, 6, 7, 14, 21, 28, 35, 42, 49, 56, 57, 50, 43, 36, 29, 22, 15, 23, 30, 37, 44, 51, 58, 59, 52, 45, 38, 31, 39, 46, 53, 60, 61, 54, 47, 55, 62, 63, //}; /* Vertical inverse zigzag */ //const static Int zigzag_v_inv[NCOEFF_BLOCK] = { 0, 8, 16, 24, 1, 9, 2, 10, 17, 25, 32, 40, 48, 56, 57, 49, 41, 33, 26, 18, 3, 11, 4, 12, 19, 27, 34, 42, 50, 58, 35, 43, 51, 59, 20, 28, 5, 13, 6, 14, 21, 29, 36, 44, 52, 60, 37, 45, 53, 61, 22, 30, 7, 15, 23, 31, 38, 46, 54, 62, 39, 47, 55, 63, //}; /* Horizontal inverse zigzag*/ //const static Int zizag_h_inv[NCOEFF_BLOCK] = { 0, 1, 2, 3, 8, 9, 16, 17, 10, 11, 4, 5, 6, 7, 15, 14, 13, 12, 19, 18, 24, 25, 32, 33, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31, 34, 35, 40, 41, 48, 49, 42, 43, 36, 37, 38, 39, 44, 45, 46, 47, 50, 51, 56, 57, 58, 59, 52, 53, 54, 55, 60, 61, 62, 63 }; /*---------------------------------------------------------------------------- ; EXTERNAL FUNCTION REFERENCES ; Declare functions defined elsewhere and referenced in this module ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; EXTERNAL GLOBAL STORE/BUFFER/POINTER REFERENCES ; Declare variables used in this module but defined elsewhere ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; FUNCTION CODE ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; Define all local variables ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; Function body here ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; Return nothing or data or data pointer ----------------------------------------------------------------------------*/ ================================================ FILE: RtspCamera/jni/m4v_h263/enc/Android.mk ================================================ # # Copyright (C) 2008 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This makefile supplies the rules for building a library of JNI code for # use by our example platform shared library. LOCAL_PATH:= $(call my-dir) include $(CLEAR_VARS) LOCAL_MODULE_TAGS := optional # This is the target being built. LOCAL_MODULE:= libH263Encoder # All of the source files that we will compile. LOCAL_SRC_FILES:= \ src/com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder.cpp \ src/bitstream_io.cpp \ src/combined_encode.cpp \ src/datapart_encode.cpp \ src/dct.cpp \ src/fastcodemb.cpp \ src/fastidct.cpp \ src/fastquant.cpp \ src/findhalfpel.cpp \ src/me_utils.cpp \ src/motion_comp.cpp \ src/motion_est.cpp \ src/mp4enc_api.cpp \ src/rate_control.cpp \ src/sad.cpp \ src/sad_halfpel.cpp \ src/vlc_encode.cpp \ src/vop.cpp # All of the shared libraries we link against. LOCAL_SHARED_LIBRARIES := # No static libraries. LOCAL_STATIC_LIBRARIES := # Also need the JNI headers. LOCAL_C_INCLUDES += \ $(JNI_H_INCLUDE) \ $(LOCAL_PATH)/src \ $(LOCAL_PATH)/include \ $(LOCAL_PATH)/oscl # No specia compiler flags. LOCAL_CFLAGS += # Don't prelink this library. For more efficient code, you may want # to add this library to the prelink map and set this to true. LOCAL_PRELINK_MODULE := false include $(BUILD_SHARED_LIBRARY) ================================================ FILE: RtspCamera/jni/m4v_h263/enc/include/cvei.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2010 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /*********************************************************************************/ /* File: cvei.h */ /** @introduction Common Video Encoder Interface (CVEI) is intended to be used by application developers who want to create a multimedia application with video encoding feature. CVEI is designed such that new video encoder algorithms or modules can be plugged in seamlessly without user interaction. In other words, any changes to the CVEI library are transparent to the users. Users can still use the same set of APIs for new encoding tools. @requirement CVEI will take an input frame in one of several format supported by PV and encode it to an MPEG4 bitstream. It will also return a reconstructed image in YUV 4:2:0 format. Currently the input format supported are YUV 4:2:0, RGB24 and UYVY 4:2:2. CVEI is designed such that it is simple to use. It should hides implementation dependency from the users. In this version, we decided that the operation will be synchronous, i.e., the encoding will be a blocked call. Asynchronous operation will be in the level above CVEI, i.e., in Author Engine Video Module which will take care of capturing device as well. @brief The following classes are used to interface with codecs. Their names are CPVxxxVideoEncoder where xxx is codec specific such as MPEG4, H263, H26L, etc. All of them are subclasses of CPVCommonVideoEncoder. */ /*********************************************************************************/ #ifndef __CVEI_H #define __CVEI_H #include "oscl_scheduler_ao.h" #include "oscl_base.h" #include "mp4enc_api.h" /* for MP4HintTrack */ #define MAX_LAYER 2 /** General returned values. */ enum TCVEI_RETVAL { ECVEI_SUCCESS, ECVEI_FAIL, ECVEI_FLUSH, ECVEI_MORE_OUTPUT } ; /** Returned events with the callback function. */ enum TCVEI_EVENT { /** Called when a packet or a frame of output bitstream is ready. */ ECVEI_BUFFER_READY, /** Called when the last packet of a frame of output bitstream is ready. */ ECVEI_FRAME_DONE, /** Called when no buffers is available for output bitstream. A buffer can be added thru AddBuffer API. */ ECVEI_NO_BUFFERS, /** Called when there is an error with the encoding operation. */ ECVEI_ERROR }; /** Contains supported input format */ enum TPVVideoFormat { ECVEI_RGB24, ECVEI_RGB12, ECVEI_YUV420, ECVEI_UYVY, ECVEI_YUV420SEMIPLANAR }; /** Type of contents for optimal encoding mode. */ enum TPVContentType { /** Content is to be streamed in real-time. */ ECVEI_STREAMING, /** Content is to be downloaded and playbacked later.*/ ECVEI_DOWNLOAD, /** Content is to be 3gpp baseline compliant. */ ECVEI_H263 }; /** Rate control type. */ enum TMP4RateControlType { /** Constant quality, variable bit rate, fixed quantization level. */ ECONSTANT_Q, /** Short-term constant bit rate control. */ ECBR_1, /** Long-term constant bit rate control. */ EVBR_1 }; /** Targeted profile and level to encode. */ enum TPVM4VProfileLevel { /* Non-scalable profile */ ECVEI_SIMPLE_LEVEL0 = 0, ECVEI_SIMPLE_LEVEL1, ECVEI_SIMPLE_LEVEL2, ECVEI_SIMPLE_LEVEL3, ECVEI_SIMPLE_LEVEL4A, ECVEI_SIMPLE_LEVEL5, ECVEI_CORE_LEVEL1, ECVEI_CORE_LEVEL2, /* Scalable profile */ ECVEI_SIMPLE_SCALABLE_LEVEL0, ECVEI_SIMPLE_SCALABLE_LEVEL1, ECVEI_SIMPLE_SCALABLE_LEVEL2, ECVEI_CORE_SCALABLE_LEVEL1, ECVEI_CORE_SCALABLE_LEVEL2, ECVEI_CORE_SCALABLE_LEVEL3 }; /** This structure contains encoder settings. */ struct TPVVideoEncodeParam { /** Specifies an ID that will be used to specify this encoder while returning the bitstream in asynchronous mode. */ uint32 iEncodeID; /** Specifies whether base only (iNumLayer = 1) or base + enhancement layer (iNumLayer =2 ) is to be used. */ int32 iNumLayer; /** Specifies the width in pixels of the encoded frames. IFrameWidth[0] is for base layer and iFrameWidth[1] is for enhanced layer. */ int iFrameWidth[MAX_LAYER]; /** Specifies the height in pixels of the encoded frames. IFrameHeight[0] is for base layer and iFrameHeight[1] is for enhanced layer. */ int iFrameHeight[MAX_LAYER]; /** Specifies the cumulative bit rate in bit per second. IBitRate[0] is for base layer and iBitRate[1] is for base+enhanced layer.*/ int iBitRate[MAX_LAYER]; /** Specifies the cumulative frame rate in frame per second. IFrameRate[0] is for base layer and iFrameRate[1] is for base+enhanced layer. */ float iFrameRate[MAX_LAYER]; /** Specifies the picture quality factor on the scale of 1 to 10. It trades off the picture quality with the frame rate. Higher frame quality means lower frame rate. Lower frame quality for higher frame rate.*/ int32 iFrameQuality; /** Enable the use of iFrameQuality to determine the frame rate. If it is false, the encoder will try to meet the specified frame rate regardless of the frame quality.*/ bool iEnableFrameQuality; /** Specifies the maximum number of P-frames between 2 INTRA frames. An INTRA mode is forced to a frame once this interval is reached. When there is only one I-frame is present at the beginning of the clip, iIFrameInterval should be set to -1. */ int32 iIFrameInterval; /** According to iIFrameInterval setting, the minimum number of intra MB per frame is optimally calculated for error resiliency. However, when iIFrameInterval is set to -1, iNumIntraMBRefresh must be specified to guarantee the minimum number of intra macroblocks per frame.*/ uint32 iNumIntraMBRefresh; /** Specifies the VBV buffer size which determines the end-to-end delay between the encoder and the decoder. The size is in unit of seconds. For download application, the buffer size can be larger than the streaming application. For 2-way application, this buffer shall be kept minimal. For a special case, in VBR mode, iBufferDelay will be set to -1 to allow buffer underflow. */ float iBufferDelay; /** Specifies the type of the access whether it is streaming, CVEI_STREAMING (data partitioning mode) or download, CVEI_DOWNLOAD (combined mode).*/ TPVContentType iContentType; /** Specifies the rate control algorithm among one of the following constant Q, CBR and VBR. The structure TMP4RateControlType is defined below.*/ TMP4RateControlType iRateControlType; /** Specifies high quality but also high complexity mode for rate control. */ bool iRDOptimal; /** Specifies the initial quantization parameter for the first I-frame. If constant Q rate control is used, this QP will be used for all the I-frames. This number must be set between 1 and 31, otherwise, Initialize() will fail. */ int iIquant[2]; /** Specifies the initial quantization parameter for the first P-frame. If constant Q rate control is used, this QP will be used for all the P-frames. This number must be set between 1 and 31, otherwise, Initialize() will fail. */ int iPquant[2]; /** Specifies the initial quantization parameter for the first B-frame. If constant Q rate control is used, this QP will be used for all the B-frames. This number must be set between 1 and 31, otherwise, Initialize() will fail. */ int iBquant[2]; /** Specifies the search range in pixel unit for motion vector. The range of the motion vector will be of dimension [-iSearchRange.5, +iSearchRange.0]. */ int32 iSearchRange; /** Specifies the use of 8x8 motion vectors. */ bool iMV8x8; /** Specifies the use of half-pel motion vectors. */ bool iMVHalfPel; /** Specifies automatic scene detection where I-frame will be used the the first frame in a new scene. */ bool iSceneDetection; /** Specifies the packet size in bytes which represents the number of bytes between two resync markers. For ECVEI_DOWNLOAD and ECVEI_H263, if iPacketSize is set to 0, there will be no resync markers in the bitstream. For ECVEI_STREAMING is parameter must be set to a value greater than 0.*/ uint32 iPacketSize; /** Specifies whether the current frame skipping decision is allowed after encoding the current frame. If there is no memory of what has been coded for the current frame, iNoCurrentSkip has to be on. */ bool iNoCurrentSkip; /** Specifies that no frame skipping is allowed. Frame skipping is a tool used to control the average number of bits spent to meet the target bit rate. */ bool iNoFrameSkip; /** Specifies the duration of the clip in millisecond.*/ int32 iClipDuration; /** Specifies the profile and level used to encode the bitstream. When present, other settings will be checked against the range allowable by this target profile and level. Fail may be returned from the Initialize call. */ TPVM4VProfileLevel iProfileLevel; /** Specifies FSI Buffer input */ uint8* iFSIBuff; /** Specifies FSI Buffer Length */ int iFSIBuffLength; }; /** Structure for input format information */ struct TPVVideoInputFormat { /** Contains the width in pixels of the input frame. */ int32 iFrameWidth; /** Contains the height in pixels of the input frame. */ int32 iFrameHeight; /** Contains the input frame rate in the unit of frame per second. */ float iFrameRate; /** Contains Frame Orientation. Used for RGB input. 1 means Bottom_UP RGB, 0 means Top_Down RGB, -1 for video formats other than RGB*/ int iFrameOrientation; /** Contains the format of the input video, e.g., YUV 4:2:0, UYVY, RGB24, etc. */ TPVVideoFormat iVideoFormat; }; /** Contains the input data information */ struct TPVVideoInputData { /** Pointer to an input frame buffer in input source format.*/ uint8 *iSource; /** The corresponding time stamp of the input frame. */ uint32 iTimeStamp; }; /** Contains the output data information */ struct TPVVideoOutputData { /** Pointer to the reconstructed frame buffer in YUV 4:2:0 domain. */ uint8 *iFrame; /** The number of layer encoded, 0 for base, 1 for enhanced. */ int32 iLayerNumber; /** Pointer to the encoded bitstream buffer. */ uint8 *iBitStream; /** The size in bytes of iBStream. */ int32 iBitStreamSize; /** The time stamp of the encoded frame according to the bitstream. */ uint32 iVideoTimeStamp; /** The time stamp of the encoded frame as given before the encoding. */ uint32 iExternalTimeStamp; /** The hint track information. */ MP4HintTrack iHintTrack; }; /** An observer class for callbacks to report the status of the CVEI */ class MPVCVEIObserver { public: /** The callback funtion with aEvent being one of TCVEIEvent enumeration. */ virtual void HandlePVCVEIEvent (uint32 aId, uint32 aEvent, uint32 aParam1 = 0) = 0; virtual ~MPVCVEIObserver() {} }; /** This class is the base class for codec specific interface class. The users must maintain an instance of the codec specific class throughout the encoding session. */ class CommonVideoEncoder : public OsclTimerObject { public: /** Constructor for CVEI class. */ CommonVideoEncoder() : OsclTimerObject(OsclActiveObject::EPriorityNominal, "PVEncoder") {}; /** Initialization function to set the input video format and the encoding parameters. This function returns CVEI_ERROR if there is any errors. Otherwise, the function returns CVEI_SUCCESS.*/ virtual TCVEI_RETVAL Initialize(TPVVideoInputFormat *aVidInFormat, TPVVideoEncodeParam *aEncParam) = 0; /** Set the observer for asynchronous encoding mode. */ virtual TCVEI_RETVAL SetObserver(MPVCVEIObserver *aObserver) = 0; /** Add a buffer to the queue of output buffers for output bitstream in asynchronous encoding mode. */ virtual TCVEI_RETVAL AddBuffer(TPVVideoOutputData *aVidOut) = 0; /** This function sends in an input video data structure containing a source frame and the associated timestamp. The encoded bitstream will be returned by observer callback. The above 3 APIs only replace EncodeFrame() API. Other APIs such as initialization and update parameters remain the same. */ virtual TCVEI_RETVAL Encode(TPVVideoInputData *aVidIn) = 0; /** This function returns the maximum VBV buffer size such that the application can allocate a buffer that guarantees to fit one frame.*/ virtual int32 GetBufferSize() = 0; /** This function returns the VOL header part (starting from the VOS header) of the encoded bitstream. This function must be called after Initialize. The output is written to the memory (volHeader) allocated by the users.*/ virtual TCVEI_RETVAL GetVolHeader(uint8 *volHeader, int32 *size, int32 layer) = 0; /** This function sends in an input video data structure containing a source frame and the associated timestamp. It returns an output video data structure containing coded bit stream, reconstructed frame in YUV 4:2:0 (can be changed to source format) and the timestamp associated with the coded frame. The input timestamp may not correspond to the output timestamp. User can send an input structure in without getting any encoded data back or getting an encoded frame in the past. This function returns ECVEI_ERROR if there is any errors. Otherwise, the function returns ECVEI_SUCCESS. In case of Overrun Buffer usage, it is possible that return value is ECVEI_MORE_OUTPUT which indicates that frame cannot fit in the current buffer*/ virtual TCVEI_RETVAL EncodeFrame(TPVVideoInputData *aVidIn, TPVVideoOutputData *aVidOut, int *aRemainingBytes #ifdef PVAUTHOR_PROFILING , void *aParam1 = 0 #endif ) = 0; /** Before the termination of the encoding process, the users have to query whether there are any encoded frame pending inside the CVEI. The returned value will indicate whether there are more frames to be flushed (ECVEI_FLUSH). FlushOutput has to be called until there are no more frames, i.e., it returns ECVEI_SUCCESS. This function may be called during the encoding operation if there is no input frame and the application does not want to waste the time waiting for input frame. It can call this function to flush encoded frame out of the memory. */ virtual TCVEI_RETVAL FlushOutput(TPVVideoOutputData *aVidOut) = 0; /** This function cleanup the CVEI allocated resources. */ virtual TCVEI_RETVAL Terminate() = 0; /**This function dynamically changes the target bit rate of the encoder while encoding. aBitRate[n] is the new accumulate target bit rate of layer n. Successful update is returned with ECVEI_SUCCESS.*/ virtual TCVEI_RETVAL UpdateBitRate(int32 aNumLayer, int32 *aBitRate) = 0; /** This function dynamically changes the target frame rate of the encoder while encoding. aFrameRate[n] is the new accumulate target frame rate of layer n. Successful update is returned with ECVEI_SUCCESS. */ virtual TCVEI_RETVAL UpdateFrameRate(int32 aNumLayer, float *aFrameRate) = 0; /** This function dynamically changes the I-Vop update interval while encoding to a new value, aIFrameInterval. */ virtual TCVEI_RETVAL UpdateIFrameInterval(int32 aIFrameInterval) = 0; /** This function forces an I-Vop mode to the next frame to be encoded. */ virtual TCVEI_RETVAL IFrameRequest() = 0; /** This function returns the input width of a specific layer (not necessarily multiple of 16). */ virtual int32 GetEncodeWidth(int32 aLayer) = 0; /** This function returns the input height of a specific layer (not necessarily multiple of 16). */ virtual int32 GetEncodeHeight(int32 aLayer) = 0; /** This function returns the target encoded frame rate of a specific layer. */ virtual float GetEncodeFrameRate(int32 aLayer) = 0; protected: virtual void Run(void) = 0; virtual void DoCancel(void) = 0; /* internal enum */ enum TCVEIState { EIdle, EEncode }; TCVEIState iState; uint32 iId; }; #endif ================================================ FILE: RtspCamera/jni/m4v_h263/enc/include/mp4enc_api.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2010 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef _MP4ENC_API_H_ #define _MP4ENC_API_H_ #ifndef OSCL_BASE_H_INCLUDED #include "oscl_base.h" #endif #ifndef _PV_TYPES_ #define _PV_TYPES_ typedef unsigned char UChar; typedef char Char; typedef unsigned int UInt; typedef int Int; typedef unsigned short UShort; typedef short Short; typedef unsigned int Bool; typedef unsigned long ULong; #define PV_CODEC_INIT 0 #define PV_CODEC_STOP 1 #endif #define PV_TRUE 1 #define PV_FALSE 0 typedef enum { SHORT_HEADER, SHORT_HEADER_WITH_ERR_RES, H263_MODE, H263_MODE_WITH_ERR_RES, DATA_PARTITIONING_MODE, COMBINE_MODE_NO_ERR_RES, COMBINE_MODE_WITH_ERR_RES } MP4EncodingMode; typedef enum { CONSTANT_Q, CBR_1, VBR_1, CBR_2, VBR_2, CBR_LOWDELAY } MP4RateControlType; typedef enum { PASS1, PASS2 } PassNum; typedef enum { PV_OFF, PV_ON } ParamEncMode; /* {SPL0, SPL1, SPL2, SPL3, SPL4a, SPL5, CPL1, CPL2, CPL2, CPL2} , SPL0: Simple Profile@Level0 , CPL1: Core Profile@Level1 */ /* {SSPL0, SSPL1, SSPL2, SSPL2, CSPL1, CSPL2, CSPL3, CSPL3} , SSPL0: Simple Scalable Profile@Level0, CPL1: Core Scalable Profile@Level1 */ typedef enum { /* Non-scalable profile */ SIMPLE_PROFILE_LEVEL0 = 0, SIMPLE_PROFILE_LEVEL1, SIMPLE_PROFILE_LEVEL2, SIMPLE_PROFILE_LEVEL3, SIMPLE_PROFILE_LEVEL4A, SIMPLE_PROFILE_LEVEL5, CORE_PROFILE_LEVEL1, CORE_PROFILE_LEVEL2, MAX_BASE_PROFILE = CORE_PROFILE_LEVEL2, /* Scalable profile */ SIMPLE_SCALABLE_PROFILE_LEVEL0 = MAX_BASE_PROFILE + 1, SIMPLE_SCALABLE_PROFILE_LEVEL1, SIMPLE_SCALABLE_PROFILE_LEVEL2, CORE_SCALABLE_PROFILE_LEVEL1, CORE_SCALABLE_PROFILE_LEVEL2, CORE_SCALABLE_PROFILE_LEVEL3, MAX_SCALABLE_PROFILE = CORE_SCALABLE_PROFILE_LEVEL3 } ProfileLevelType; typedef struct tagMP4HintTrack { UChar MTB; UChar LayerID; UChar CodeType; UChar RefSelCode; } MP4HintTrack; typedef struct tagvideoEncControls { void *videoEncoderData; Int videoEncoderInit; } VideoEncControls; typedef struct tagvideoEncFrameIO { UChar *yChan; /* pointer to Y */ UChar *uChan; /* pointer to U */ UChar *vChan; /* pointer to V */ Int height; /* height for Y */ Int pitch; /* stride for Y */ ULong timestamp; /* modulo timestamp in millisecond*/ } VideoEncFrameIO ; /** @brief Encoding options structure */ typedef struct tagvideoEncOptions { /** @brief Sets the encoding mode, defined by the above enumaration. If there are conflicts between the encoding mode * and subsequent encoding options, encoding mode take precedent over encoding options. */ MP4EncodingMode encMode; /** @brief Sets the number of bytes per packet, only used in DATA_PARTITIONING_MODE or COMBINE_MODE_WITH_ERR_RES mode. * The resync marker will be inserted as often as the size of the packet.*/ Int packetSize; /** @brief Selects MPEG-4/H.263 profile and level, if specified other encoding options must conform with it. */ ProfileLevelType profile_level; /** @brief Enables reversible variable length code (RVLC) mode. Normally it is set to PV_OFF.*/ ParamEncMode rvlcEnable; /** @brief Set the frequency of GOB header interval */ Int gobHeaderInterval; /** @brief Sets the number of bitstream layers: 1 is base only: 2 is base + enhancement */ Int numLayers; /** @brief Sets the number of ticks per second used for timing information encoded in MPEG4 bitstream.*/ Int timeIncRes; /** @brief Sets the number of ticks in time increment resolution between 2 source frames (equivalent to source frame rate). */ Int tickPerSrc; /** @brief Specifies encoded heights in pixels, height[n] represents the n-th layer's height. */ Int encHeight[2]; /** @brief Specifies encoded widths in pixels, width[n] represents the n-th layer's width.*/ Int encWidth[2]; /** @brief Specifies target frame rates in frames per second, frameRate[n] represents the n-th layer's target frame rate.*/ float encFrameRate[2]; /** @brief Specifies target bit rates in bits per second unit, bitRate[n] represents the n-th layer's target bit rate. */ Int bitRate[2]; /** @brief Specifies default quantization parameters for I-Vop. Iquant[n] represents the n-th layer default quantization parameter. The default is Iquant[0]=12.*/ Int iQuant[2]; /** @brief Specifies default quantization parameters for P-Vop. Pquant[n] represents the n-th layer default quantization parameter. The default is Pquant[0]=10.*/ Int pQuant[2]; /** @brief specifies quantization mode (H263 mode or MPEG mode) of the encoded base and enhance layer (if any). * In Simple and Simple Scalable profile, we use only H263 mode.*/ Int quantType[2]; /** @brief Sets rate control algorithm, one of (CONSTANT_Q, CBR_1, or VBR_1). * CONSTANT_Q uses the default quantization values to encode the sequence. * CBR_1 (constant bit rate) controls the output at a desired bit rate * VBR_1 (variable bit rate) gives better picture quality at the expense of bit rate fluctuation * Note: type=CONSTANT_Q produces sequences with arbitrary bit rate. * type=CBR_1 produces sequences suitable for streaming. * type=VBR_1 produces sequences suitable for download. */ MP4RateControlType rcType; /** @brief Sets the VBV buffer size (in the unit of second delay) used to prevent buffer overflow and underflow * on the decoder side. This function is redundant to PVSetVBVSize. Either one of them is used at a time. */ float vbvDelay; /** @brief Specifies whether frame skipping is permitted or not. When rate control type is set to CONSTANT_Q * frame skipping is automatically banned. In CBR_1 and VBR_1 rate control, frame skipping is allowed by default. * However, users can force no frame skipping with this flag, but buffer constraint may be violated.*/ ParamEncMode noFrameSkipped; /** @brief Sets the maximum number of P-frames between two I-frames. I-frame mode is periodically forced * if no I-frame is encoded after the specified period to add error resiliency and help resynchronize in case of errors. * If scene change detection can add additional I-frame if new scenes are detected. * intraPeriod is the I frame interval in terms of second. * intraPeriod =0 indicates I-frame encoding only; * intraPeriod = -1 indicates I-frame followed by all P-frames; (default) * intraPeriod = N, indicates the number of P-frames between 2 I-frames.*/ Int intraPeriod; /** @brief Specifies the number Intra MBs to be refreshed in a P-frame. */ Int numIntraMB; /** * @brief Specifies whether the scene change detection (SCD) is enabled or disabled. * With SCD enable, when a new scene is detected, I-Vop mode will be used for the first frame of * the new scene resulting in better picture quality. An insertion of an I-VOP resets the intraPeriod * specified by the IntraPeriodAPI().*/ ParamEncMode sceneDetect; /** @brief Specifies the search range of motion estimation search. Larger value implies * larger search range, better motion vector match, but more complexity. * If searchRange=n, the motion vector search is in the range of [-n,n-1] pixels. * If half-pel mode is on, the range is [-n, (n-1)+1/2] pixels. The default value is 16.*/ Int searchRange; /** @brief Turns on/off 8x8 block motion estimation and compensation. * If on, four motion vectors may be used for motion estimation and compensation of a macroblock, * otherwise one motion vector per macroblock is used. When the 8x8 MV is off, the total encoding complexity * is less but the image quality is also worse. Therefore, it can be used in complexity limited environment.*/ ParamEncMode mv8x8Enable; /** @brief Set the threshold for using intra DC VLC. * Value must range from 0-7.*/ Int intraDCVlcTh; /** @brief This flag turns on the use of AC prediction */ Bool useACPred; } VideoEncOptions; #ifdef __cplusplus extern "C" { #endif /* API's */ /* Always start with this one !!*/ /** * @brief Gets default encoding options. This way users only have to set relevant encoding options and leave the one * they are unsure of. * @encOption Pointer to VideoEncOption structure. * @encUseCase This value determines the set of default encoding options, for example, different encoding options * are assigned to streaming use-case as compared to download use-case. It can be project dependent too. * @return true for correct operation; false if error happens */ OSCL_IMPORT_REF Bool PVGetDefaultEncOption(VideoEncOptions *encOption, Int encUseCase); /** * @brief Verifies the consistency of encoding parameters, allocates memory needed and set necessary internal variables. * @param encCtrl is video encoder control structure that is always passed as input in all APIs * @return true for correct operation; false if error happens */ OSCL_IMPORT_REF Bool PVInitVideoEncoder(VideoEncControls *encCtrl, VideoEncOptions *encOption); /* acquiring encoder info APIs */ /** * @brief This function returns VOL header. It has to be called before the frame is encoded. If so, * then the VOL Header is passed back to the application. Then all frames that are encoded do not contain the VOL Header. * If you do not call the API then the VOL Header is passed within the first frame that is encoded. * The behavior is unknown if it is called after the first frame is encoded. It is mainly used for MP4 file format authoring. * @param encCtrl is video encoder control structure that is always passed as input in all APIs. * @param volHeader is the Buffer for VOL header. * @param size is the size of VOL header in bytes. * @param layer is the layer of the requested VOL header. * @return true for correct operation; false if error happens. */ OSCL_IMPORT_REF Bool PVGetVolHeader(VideoEncControls *encCtrl, UChar *volHeader, Int *size, Int layer); /** * @brief This function returns the profile and level in H.263 coding when the encoding parameters are set * @param encCtrl is video encoder control structure that is always passed as input in all APIs. * @param profileID is the pointer of the profile ID. Right now we only support profile 0 * @param levelID is the pointer of the level ID that could be 10-70. * @return true for correct operation; false if error happens. */ OSCL_IMPORT_REF Bool PVGetH263ProfileLevelID(VideoEncControls *encCtrl, Int *profileID, Int *levelID); /** * @brief This function returns the profile and level of MPEG4 when the encoding parameters are set * @param encCtrl is video encoder control structure that is always passed as input in all APIs. * @param profile_level is the pointer of the profile enumeration * @param nLayer is the index of the layer of interest * @return true for correct operation; false if error happens. */ OSCL_IMPORT_REF Bool PVGetMPEG4ProfileLevelID(VideoEncControls *encCtrl, Int *profile_level, Int nLayer); /** * @brief This function returns maximum frame size in bytes * @param encCtrl is video encoder control structure that is always passed as input in all APIs * @param maxVideoFrameSize is the pointer of the maximum frame size * @return true for correct operation; false if error happens */ OSCL_IMPORT_REF Bool PVGetMaxVideoFrameSize(VideoEncControls *encCtrl, Int *maxVideoFrameSize); #ifndef LIMITED_API /** * @brief This function returns the total amount of memory (in bytes) allocated by the encoder library. * @param encCtrl is video encoder control structure that is always passed as input in all APIs * @return true for correct operation; false if error happens */ OSCL_IMPORT_REF Int PVGetEncMemoryUsage(VideoEncControls *encCtrl); /** * @brief This function is used by PVAuthor to get the size of the VBV buffer. * @param encCtrl is video encoder control structure that is always passed as input in all APIs * @param VBVSize is the pointer of The size of the VBV buffer in bytes. * @return true for correct operation; false if error happens */ OSCL_IMPORT_REF Bool PVGetVBVSize(VideoEncControls *encCtrl, Int *VBVSize); #endif /** * @brief This function encodes a frame in YUV 4:2:0 format from the *video_in input frame and put the result in YUV * for reconstructed frame and bstream for MPEG4 bitstream. The application is required to allocate memory for * bitstream buffer.The size of the input bitstream memory and the returned output buffer are specified in the * size field. The encoded layer is specified by the nLayer field. If the current frame is not encoded, size=0 and nLayer=-1. * Note: If the allocated buffer size is too small to fit a bitstream of a frame, then those extra bits will be left out * which can cause syntactic error at the decoder side. * @param encCtrl is video encoder control structure that is always passed as input in all APIs * @param vid_in is the pointer to VideoEncFrameIO structure containing the YUV input data * @param vid_out is the pointer to VideoEncFrameIO structure containing the reconstructed YUV output data after encoding * @param nextModTime is the timestamp encoder expects from the next input * @param bstream is the pointer to MPEG4 bitstream buffer * @param size is the size of bitstream buffer allocated (input) and size of the encoded bitstream (output). * @param nLayer is the layer of the encoded frame either 0 for base or 1 for enhancement layer. The value -1 indicates skipped frame due to buffer overflow. * @return true newfor correct operation; false if error happens */ OSCL_IMPORT_REF Bool PVEncodeVideoFrame(VideoEncControls *encCtrl, VideoEncFrameIO *vid_in, VideoEncFrameIO *vid_out, ULong *nextModTime, UChar *bstream, Int *size, Int *nLayer); /** * @brief This function is used to query overrun buffer. It is used when PVEncodeVideoFrame.returns size that is * larger than the input size. * @param encCtrl is video encoder control structure that is always passed as input in all APIs * @return Pointer to the overrun buffer. NULL if overrun buffer is not used. */ OSCL_IMPORT_REF UChar* PVGetOverrunBuffer(VideoEncControls *encCtrl); #ifndef NO_SLICE_ENCODE /* This set of APIs are not working. This functionality has been partially replaced by the introduction of overrun buffer. */ /* slice-based coding */ /** * @brief This function sets the input YUV frame and timestamp to be encoded by the slice-based encoding function PVEncodeSlice(). * It also return the memory address the reconstructed frame will be copied to (in advance) and the coded layer number. * The encoder library processes the timestamp and determine if this frame is to be encoded or not. If the current frame * is not encoded, nLayer=-1. For frame-based motion estimation, the motion estimation of the entire frame is also performed * in this function. For MB-based motion estimation, the motion vector is searched while coding each MB in PVEncodeSlice(). * @param encCtrl is video encoder control structure that is always passed as input in all APIs * @param vid_in is the pointer to VideoEncFrameIO structure containing the YUV input data * @param nextModTime is the timestamp encoder expects from the next input if this input is rejected and nLayer is set to -1. * @param nLayer is the layer of the encoded frame either 0 for base or 1 for enhancement layer. The value -1 indicates skipped frame due to buffer overflow. * @return true newfor correct operation; false if error happens */ OSCL_IMPORT_REF Bool PVEncodeFrameSet(VideoEncControls *encCtrl, VideoEncFrameIO *vid_in, ULong *nextModTime, Int *nLayer); /** * @brief This function encodes a GOB (short header mode) or a packet (data partitioning mode or combined mode with resync marker) * and output the reconstructed frame and MPEG4 bitstream. The application is required to allocate memory for the bitstream buffer. * The size of the input bitstream memory and the returned output buffer are specified in the size field. If the buffer size is * smaller than the requested packet size, user has to call PVEncodeSlice again to get the rest of that pending packet before moving * on to the next packet. For the combined mode without resync marker, the function returns when the buffer is full. * The end-of-frame flag indicates the completion of the frame encoding. Next frame must be sent in with PVEncodeFrameSet(). * At the end-of-frame, the next video input address and the next video modulo timestamp will be set. * @param encCtrl is video encoder control structure that is always passed as input in all APIs * @param bstream is the pointer to MPEG4 bitstream buffer. * @param size is the size of bitstream buffer allocated (input) and size of the encoded bitstream (output). * @param endofFrame is a flag indicating the end-of-frame, '1'. Otherwise, '0'. When PVSetNoCurrentFrameSkip is OFF, * end-of-frame '-1' indicates current frame bitstream must be disregarded. * @param vid_out is the pointer to VideoEncFrameIO structure containing the reconstructed YUV output data after encoding * @param nextModTime is the timestamp encoder expects from the next input * @return true newfor correct operation; false if error happens */ OSCL_IMPORT_REF Bool PVEncodeSlice(VideoEncControls *encCtrl, UChar *bstream, Int *size, Int *endofFrame, VideoEncFrameIO *vid_out, ULong *nextModTime); #endif /** * @brief This function returns MP4 file format hint track information. * @param encCtrl is video encoder control structure that is always passed as input in all APIs * @param info is the structure for MP4 hint track information * @return true for correct operation; false if error happens */ OSCL_IMPORT_REF Bool PVGetHintTrack(VideoEncControls *encCtrl, MP4HintTrack *info); #ifndef LIMITED_API /** * @brief updates target frame rates of the encoded base and enhance layer (if any) while encoding operation is ongoing. * @param encCtrl is video encoder control structure that is always passed as input in all APIs * @param frameRate is the pointers to array of target frame rates in frames per second, * frameRate[n] represents the n-th layer's target frame rate. * @return true for correct operation; false if error happens */ OSCL_IMPORT_REF Bool PVUpdateEncFrameRate(VideoEncControls *encCtrl, float *frameRate); /* for 2-way */ /** * @brief updates target bit rates of the encoded base and enhance layer (if any) while encoding operation is ongoing. * @param encCtrl is video encoder control structure that is always passed as input in all APIs * @param bitRate is the pointers to array of target bit rates in bits per second unit, * bitRate[n] represents the n-th layer's target bit rate. * @return true for correct operation; false if error happens */ OSCL_IMPORT_REF Bool PVUpdateBitRate(VideoEncControls *encCtrl, Int *bitRate); /* for 2-way */ /** * @brief updates the INTRA frame refresh interval while encoding operation is ongoing. * @param encCtrl is video encoder control structure that is always passed as input in all APIs * @param aIFramePeriod is a new value of INTRA frame interval in the unit of number of coded frames. * @return true for correct operation; false if error happens */ OSCL_IMPORT_REF Bool PVUpdateIFrameInterval(VideoEncControls *encCtrl, Int aIFramePeriod);/* for 2-way */ /** * @brief specifies the number Intra MBs to be refreshed * @param encCtrl is video encoder control structure that is always passed as input in all APIs * @param numMB is the number of Intra MBs to be refreshed * @return true for correct operation; false if error happens */ OSCL_IMPORT_REF Bool PVUpdateNumIntraMBRefresh(VideoEncControls *encCtrl, Int numMB); /* for 2-way */ /** * @brief This function is called whenever users want the next base frame to be encoded as an I-Vop. * @param encCtrl is video encoder control structure that is always passed as input in all APIs * @return true for correct operation; false if error happens */ OSCL_IMPORT_REF Bool PVIFrameRequest(VideoEncControls *encCtrl); /* for 2-way */ #endif // LIMITED_API /* finishing encoder */ /** * @brief This function frees up all the memory allocated by the encoder library. * @param encCtrl is video encoder control structure that is always passed as input in all APIs * @return true for correct operation; false if error happens */ OSCL_IMPORT_REF Bool PVCleanUpVideoEncoder(VideoEncControls *encCtrl); #ifdef __cplusplus } #endif #endif /* _MP4ENC_API_H_ */ ================================================ FILE: RtspCamera/jni/m4v_h263/enc/include/pvm4vencoder.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /* File: pvm4vencoder.h */ /** This file contains MP4 encoder related classes, structures and enumerations. */ #ifndef __PVM4VENCODER_H #define __PVM4VENCODER_H #include "cvei.h" #include "mp4enc_api.h" #include "ccrgb24toyuv420.h" #include "ccrgb12toyuv420.h" #include "ccyuv420semitoyuv420.h" #define KCVEIMaxOutputBuffer 10 #define VISUAL_OBJECT_SEQUENCE_START_CODE 0x01B0 #define VISUAL_OBJECT_SEQUENCE_END_CODE 0x01B1 #define VISUAL_OBJECT_START_CODE 0x01B5 #define VO_START_CODE 0x8 #define VO_HEADER_LENGTH 32 #define VOL_START_CODE 0x12 #define VOL_START_CODE_LENGTH 28 #define GROUP_START_CODE 0x01B3 #define GROUP_START_CODE_LENGTH 32 #define VOP_ID_CODE_LENGTH 5 #define VOP_TEMP_REF_CODE_LENGTH 16 #define USER_DATA_START_CODE 0x01B2 #define USER_DATA_START_CODE_LENGTH 32 #define SHORT_VIDEO_START_MARKER 0x20 #define SHORT_VIDEO_START_MARKER_LENGTH 22 /** Encoding mode specific to MPEG4. */ enum TMP4EncodingMode { /** H263 mode. */ EH263_MODE, /** Data partitioning mode, packet size must be specified. */ EDATA_PARTITIONG_MODE, /** Combined mode without resync markers. */ ECOMBINING_MODE_NO_ERR_RES, /** COmbined mode with resync markers, packet size must be specified. */ ECOMBINING_MODE_WITH_ERR_RES }; /** Generic ON/OFF. */ enum TParamEncMode { EPV_OFF, EPV_ON }; typedef struct { uint8 *data; uint32 numBytes; uint32 bytePos; uint32 bitBuf; uint32 dataBitPos; uint32 bitPos; } mp4StreamType; static const uint32 MASK[33] = { 0x00000000, 0x00000001, 0x00000003, 0x00000007, 0x0000000f, 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff, 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff, 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff, 0x0001ffff, 0x0003ffff, 0x0007ffff, 0x000fffff, 0x001fffff, 0x003fffff, 0x007fffff, 0x00ffffff, 0x01ffffff, 0x03ffffff, 0x07ffffff, 0x0fffffff, 0x1fffffff, 0x3fffffff, 0x7fffffff, 0xffffffff }; /** MPEG4 encoder class interface. See CommonVideoEncoder APIs for virtual functions definitions. */ class CPVM4VEncoder : public CommonVideoEncoder { public: OSCL_IMPORT_REF static CPVM4VEncoder* New(int32 aThreadId); OSCL_IMPORT_REF ~CPVM4VEncoder(); OSCL_IMPORT_REF virtual TCVEI_RETVAL SetObserver(MPVCVEIObserver *aObserver); OSCL_IMPORT_REF virtual TCVEI_RETVAL AddBuffer(TPVVideoOutputData *aVidOut); OSCL_IMPORT_REF virtual TCVEI_RETVAL Encode(TPVVideoInputData *aVidIn); OSCL_IMPORT_REF virtual TCVEI_RETVAL Initialize(TPVVideoInputFormat *aVidInFormat, TPVVideoEncodeParam *aEncParam); OSCL_IMPORT_REF virtual int32 GetBufferSize(); OSCL_IMPORT_REF virtual TCVEI_RETVAL GetVolHeader(uint8 *volHeader, int32 *size, int32 layer); OSCL_IMPORT_REF virtual TCVEI_RETVAL EncodeFrame(TPVVideoInputData *aVidIn, TPVVideoOutputData *aVidOut, int *aRemainingBytes #ifdef PVAUTHOR_PROFILING , void *aParam1 = 0 #endif ); OSCL_IMPORT_REF virtual TCVEI_RETVAL FlushOutput(TPVVideoOutputData *aVidOut); OSCL_IMPORT_REF virtual TCVEI_RETVAL Terminate(); OSCL_IMPORT_REF virtual TCVEI_RETVAL UpdateBitRate(int32 aNumLayer, int32 *aBitRate); OSCL_IMPORT_REF virtual TCVEI_RETVAL UpdateFrameRate(int32 aNumLayer, float *aFrameRate); OSCL_IMPORT_REF virtual TCVEI_RETVAL UpdateIFrameInterval(int32 aIFrameInterval); OSCL_IMPORT_REF virtual TCVEI_RETVAL IFrameRequest(); /** Set the forced number of intra macroblock per frame for error resiliency. */ OSCL_IMPORT_REF TCVEI_RETVAL SetIntraMBRefresh(int32 aNumMBRefresh); OSCL_IMPORT_REF virtual int32 GetEncodeWidth(int32 aLayer); OSCL_IMPORT_REF virtual int32 GetEncodeHeight(int32 aLayer); OSCL_IMPORT_REF virtual float GetEncodeFrameRate(int32 aLayer); private: CPVM4VEncoder(); bool Construct(int32 aThreadId); #ifdef YUV_INPUT void CopyToYUVIn(uint8 *YUV, int width, int height, int width_16, int height_16); #endif /** Color conversion instance RGB24/RGB12/YUV420SEMI to YUV 420 */ #if defined(RGB24_INPUT) || defined (RGB12_INPUT) || defined(YUV420SEMIPLANAR_INPUT) ColorConvertBase *ccRGBtoYUV; #endif #ifdef FOR_3GPP_COMPLIANCE void Check3GPPCompliance(TPVVideoEncodeParam *aEncParam, int *aEncWidth, int *aEncHeight); #endif /* Parsing FSI */ TCVEI_RETVAL ParseFSI(uint8* aFSIBuff, int FSILength, VideoEncOptions *aEncOption); int16 ShowBits(mp4StreamType *pStream, uint8 ucNBits, uint32 *pulOutData); int16 FlushBits(mp4StreamType *pStream, uint8 ucNBits); int16 ReadBits(mp4StreamType *pStream, uint8 ucNBits, uint32 *pulOutData); int16 ByteAlign(mp4StreamType *pStream); int16 iDecodeShortHeader(mp4StreamType *psBits, VideoEncOptions *aEncOption); /* Pure virtuals from OsclActiveObject implemented in this derived class */ virtual void Run(void); virtual void DoCancel(void); MPVCVEIObserver *iObserver; int iSrcWidth; int iSrcHeight; int iSrcFrameRate; int iFrameOrientation; int iEncWidth[4]; int iEncHeight[4]; float iEncFrameRate[4]; TPVVideoFormat iVideoFormat; /* variables needed in operation */ VideoEncControls iEncoderControl; bool iInitialized; uint8 *iYUVIn; uint8 *iVideoIn; uint8 *iVideoOut; TPVVideoOutputData *iOutputData[KCVEIMaxOutputBuffer]; int32 iNumOutputData; uint32 iTimeStamp; uint32 iNextModTime; uint8 *iOverrunBuffer; int iOBSize; /* Tables in color coversion */ uint8 *iY_Table; uint16 *iCb_Table, *iCr_Table, *ipCb_Table, *ipCr_Table; int iNumLayer; }; #endif ================================================ FILE: RtspCamera/jni/m4v_h263/enc/oscl/oscl_base.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef OSCL_BASE_H_INCLUDED #define OSCL_BASE_H_INCLUDED #include "oscl_config.h" #include "oscl_types.h" #include "oscl_error.h" class OsclBase { public: OSCL_IMPORT_REF static void Init() {}; OSCL_IMPORT_REF static void Cleanup() {}; }; class OsclErrorTrap { public: OSCL_IMPORT_REF static void Init() {}; OSCL_IMPORT_REF static void Cleanup() {}; OSCL_IMPORT_REF static void leave(int) {}; }; class OsclMem { public: OSCL_IMPORT_REF static void Init() {}; OSCL_IMPORT_REF static void Cleanup() {}; }; class OsclRequestStatus { public: OsclRequestStatus(); OsclRequestStatus(int32 aVal) { (void)(aVal); }; int32 operator=(int32 aVal); int32 operator==(int32 aVal) const; int32 operator!=(int32 aVal) const; int32 operator>=(int32 aVal) const; int32 operator<=(int32 aVal) const; int32 operator>(int32 aVal) const; int32 operator<(int32 aVal) const; int32 Int() const; private: int32 iStatus; }; class OsclActiveObject { public: /** * Scheduling priorities. */ enum TPriority { /** A low priority, useful for active objects representing background processing. */ EPriorityIdle = -100, /** A priority higher than EPriorityIdle but lower than EPriorityStandard. */ EPriorityLow = -20, /** Most active objects will have this priority. */ EPriorityStandard = 0, /** A priority higher than EPriorityStandard; useful for active objects handling user input. */ EPriorityUserInput = 10, /** A priority higher than EPriorityUserInput. */ EPriorityHigh = 20 }; /** * Constructor. * @param aPriority (input param): scheduling priority * @param name (inpup param): optional name for this AO. */ OSCL_IMPORT_REF OsclActiveObject(int32 aPriority, const char name[]); /** * Destructor. */ OSCL_IMPORT_REF virtual ~OsclActiveObject(); /** * Set request active for this AO. * Will panic if the request is already active, * or the active object is not added to any scheduler, * or the calling thread context does not match * the scheduler thread. */ OSCL_IMPORT_REF void SetBusy(); /** * Return true if this AO is active, * false otherwise. */ OSCL_IMPORT_REF bool IsBusy() const; /** * Set request active for this AO and set the status to pending. * PendForExec is identical to SetBusy, but it * additionally sets the request status to OSCL_REQUEST_PENDING. * */ OSCL_IMPORT_REF void PendForExec(); /** * Complate the active request for the AO. Can be * called from any thread. * @param aStatus: request completion status. */ OSCL_IMPORT_REF void PendComplete(int32 aStatus); /** * Add this AO to the current thread's scheduler. */ OSCL_IMPORT_REF void AddToScheduler(); /** * Return true if this AO is added to the scheduler, * false otherwise. */ OSCL_IMPORT_REF bool IsAdded() const; /** * Remove this AO from its scheduler. * Will panic if the calling thread context does * not match the scheduling thread. * Cancels any active request before removing. */ OSCL_IMPORT_REF void RemoveFromScheduler(); /** * Deque is identical to RemoveFromScheduler * It's only needed to prevent accidental usage * of Symbian CActive::Deque. */ OSCL_IMPORT_REF void Deque(); /** * Complete this AO's request immediately. * If the AO is already active, this will do nothing. * Will panic if the AO is not acced to any scheduler, * or if the calling thread context does not match the * scheduling thread. */ OSCL_IMPORT_REF void RunIfNotReady(); /** * Cancel any active request. * If the request is active, this will call the DoCancel * routine, wait for the request to cancel, then set the * request inactive. The AO will not run. * If the request is not active, it does nothing. * Request must be canceled from the same thread * in which it is scheduled. */ OSCL_IMPORT_REF void Cancel(); /** * Return scheduling priority of this active object. */ OSCL_IMPORT_REF int32 Priority() const; /** * Request status access */ OSCL_IMPORT_REF int32 Status()const; OSCL_IMPORT_REF void SetStatus(int32); OSCL_IMPORT_REF int32 StatusRef(); protected: /** * Cancel request handler. * This gets called by scheduler when the request * is cancelled. The default routine will complete * the request. If any additional action is needed, * the derived class may override this. If the derived class * does override DoCancel, it must complete the request. */ //OSCL_IMPORT_REF virtual void DoCancel(); /** * Run Error handler. * This gets called by scheduler when the Run routine leaves. * The default implementation simply returns the leave code. * If the derived class wants to handle errors from Run, * it may override this. The RunError should return OsclErrNone * if it handles the error, otherwise it should return the * input error code. * @param aError: the leave code generated by the Run. */ //OSCL_IMPORT_REF virtual int32 RunError(int32 aError); }; class OsclTimerObject { public: /** * Constructor. * @param aPriority (input param): scheduling priority * @param name (input param): optional name for this AO. */ OSCL_IMPORT_REF OsclTimerObject(int32 aPriority, const char name[]); /** * Destructor. */ //OSCL_IMPORT_REF virtual ~OsclTimerObject(); /** * Add this AO to the current thread's scheduler. */ OSCL_IMPORT_REF void AddToScheduler(); /** * Return true if this AO is added to the scheduler, * false otherwise. */ OSCL_IMPORT_REF bool IsAdded() const; /** * Remove this AO from its scheduler. * Will panic if the calling thread context does * not match the scheduling thread. * Cancels any active request before removing. */ OSCL_IMPORT_REF void RemoveFromScheduler(); /** * Deque is identical to RemoveFromScheduler * It's only needed to prevent accidental usage * of Symbian CActive::Deque. */ OSCL_IMPORT_REF void Deque(); /** * 'After' sets the request active, with request status * OSCL_REQUEST_STATUS_PENDING, and starts a timer. * When the timer expires, the request will complete with * status OSCL_REQUEST_ERR_NONE. * Must be called from the same thread in which the * active object is scheduled. * Will panic if the request is already active, the object * is not added to any scheduler, or the calling thread * does not match the scheduling thread. * @param anInterval: timeout interval in microseconds. */ OSCL_IMPORT_REF void After(int32 aDelayMicrosec); /** * Complete the request after a time interval. * RunIfNotReady is identical to After() except that it * first checks the request status, and if it is already * active, it does nothing. * * @param aDelayMicrosec (input param): delay in microseconds. */ OSCL_IMPORT_REF void RunIfNotReady(uint32 aDelayMicrosec = 0); /** * Set request active for this AO. * Will panic if the request is already active, * or the active object is not added to any scheduler, * or the calling thread context does not match * the scheduler thread. */ OSCL_IMPORT_REF void SetBusy(); /** * Return true if this AO is active, * false otherwise. */ OSCL_IMPORT_REF bool IsBusy() const; /** * Cancel any active request. * If the request is active, this will call the DoCancel * routine, wait for the request to cancel, then set the * request inactive. The AO will not run. * If the request is not active, it does nothing. * Request must be canceled from the same thread * in which it is scheduled. */ OSCL_IMPORT_REF void Cancel(); /** * Return scheduling priority of this active object. */ OSCL_IMPORT_REF int32 Priority() const; /** * Request status access */ OSCL_IMPORT_REF int32 Status()const; OSCL_IMPORT_REF void SetStatus(int32); OSCL_IMPORT_REF int32 StatusRef(); protected: /** * Cancel request handler. * This gets called by scheduler when the request * is cancelled. The default routine will cancel * the timer. If any additional action is needed, * the derived class may override this. If the * derived class does override this, it should explicitly * call OsclTimerObject::DoCancel in its own DoCancel * routine. */ //OSCL_IMPORT_REF virtual void DoCancel(); /** * Run Error handler. * This gets called by scheduler when the Run routine leaves. * The default implementation simply returns the leave code. * If the derived class wants to handle errors from Run, * it may override this. The RunError should return OsclErrNone * if it handles the error, otherwise it should return the * input error code. * @param aError: the leave code generated by the Run. */ //OSCL_IMPORT_REF virtual int32 RunError(int32 aError); }; #endif // OSCL_BASE_H_INCLUDED ================================================ FILE: RtspCamera/jni/m4v_h263/enc/oscl/oscl_base_macros.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef OSCL_BASE_MACROS_H_INCLUDED #define OSCL_BASE_MACROS_H_INCLUDED #ifndef OSCL_UNUSED_ARG #define OSCL_UNUSED_ARG(x) (void)(x) #endif #endif // OSCL_BASE_MACROS_H_INCLUDED ================================================ FILE: RtspCamera/jni/m4v_h263/enc/oscl/oscl_config.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef OSCL_CONFIG_H_INCLUDED #define OSCL_CONFIG_H_INCLUDED #define OSCL_HAS_BREW_SUPPORT 0 //Not yet supported #define OSCL_HAS_SYMBIAN_SUPPORT 0 // Not yet supported #define OSCL_HAS_LINUX_SUPPORT 1 #endif // OSCL_CONFIG_H_INCLUDED ================================================ FILE: RtspCamera/jni/m4v_h263/enc/oscl/oscl_dll.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef OSCL_DLL_H_INCLUDED #define OSCL_DLL_H_INCLUDED #define OSCL_DLL_ENTRY_POINT() void oscl_dll_entry_point() {} /** * Default DLL entry/exit point function. * * The body of the DLL entry point is given. The macro * only needs to be declared within the source file. * * Usage : * * OSCL_DLL_ENTRY_POINT_DEFAULT() */ #define OSCL_DLL_ENTRY_POINT_DEFAULT() #endif // OSCL_DLL_H_INCLUDED ================================================ FILE: RtspCamera/jni/m4v_h263/enc/oscl/oscl_error.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef OSCL_ERROR_H_INCLUDED #define OSCL_ERROR_H_INCLUDED #define OSCL_LEAVE(x) #endif //OSCL_ERROR_H_INCLUDED ================================================ FILE: RtspCamera/jni/m4v_h263/enc/oscl/oscl_error_codes.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef OSCL_ERROR_CODES_H_INCLUDED #define OSCL_ERROR_CODES_H_INCLUDED /** Leave Codes */ typedef int32 OsclLeaveCode; #define OsclErrNone 0 #define OsclErrGeneral 100 #define OsclErrNoMemory 101 #define OsclErrCancelled 102 #define OsclErrNotSupported 103 #define OsclErrArgument 104 #define OsclErrBadHandle 105 #define OsclErrAlreadyExists 106 #define OsclErrBusy 107 #define OsclErrNotReady 108 #define OsclErrCorrupt 109 #define OsclErrTimeout 110 #define OsclErrOverflow 111 #define OsclErrUnderflow 112 #define OsclErrInvalidState 113 #define OsclErrNoResources 114 /** For backward compatibility with old definitions */ #define OSCL_ERR_NONE OsclErrNone #define OSCL_BAD_ALLOC_EXCEPTION_CODE OsclErrNoMemory /** Return Codes */ typedef int32 OsclReturnCode; #define OsclSuccess 0 #define OsclPending 1 #define OsclFailure -1 #endif /*! @} */ ================================================ FILE: RtspCamera/jni/m4v_h263/enc/oscl/oscl_exception.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef OSCL_EXCEPTION_H_INCLUDED #define OSCL_EXCEPTION_H_INCLUDED #endif // INCLUDED_OSCL_EXCEPTION_H ================================================ FILE: RtspCamera/jni/m4v_h263/enc/oscl/oscl_math.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef OSCL_MATH_H_INCLUDED #define OSCL_MATH_H_INCLUDED #include #define oscl_pow pow #define oscl_exp exp #define oscl_sqrt sqrt #define oscl_log log #define oscl_cos cos #define oscl_sin sin #define oscl_tan tan #define oscl_asin asin #endif // OSCL_MATH_H_INCLUDED ================================================ FILE: RtspCamera/jni/m4v_h263/enc/oscl/oscl_mem.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef OSCL_MEM_H_INCLUDED #define OSCL_MEM_H_INCLUDED #include "oscl_types.h" #define OSCLMemSizeT size_t #define oscl_memcpy(dest, src, count) memcpy((void *)(dest), (const void *)(src), (OSCLMemSizeT)(count)) #define oscl_memset(dest, ch, count) memset((void *)(dest), (unsigned char)(ch), (OSCLMemSizeT)(count)) #define oscl_memmove(dest, src, bytecount) memmove((void *)(dest), (const void *)(src), (OSCLMemSizeT)(bytecount)) #define oscl_memcmp(buf1, buf2, count) memcmp( (const void *)(buf1), (const void *)(buf2), (OSCLMemSizeT)(count)) #define oscl_malloc(size) malloc((OSCLMemSizeT)(size)) #define oscl_free(memblock) free((void *)(memblock)) #define OSCL_ARRAY_DELETE(ptr) delete [] ptr #define OSCL_ARRAY_NEW(T, count) new T[count] #define OSCL_DELETE(memblock) delete memblock #define OSCL_NEW(arg) new arg #endif // OSCL_MEM_H_INCLUDED ================================================ FILE: RtspCamera/jni/m4v_h263/enc/oscl/oscl_types.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /*! \file oscl_types.h \brief This file contains basic type definitions for common use across platforms. */ #ifndef OSCL_TYPES_H_INCLUDED #define OSCL_TYPES_H_INCLUDED #include #include #include #include #include #include //! A typedef for a signed 8 bit integer. #ifndef int8 typedef signed char int8; #endif //! A typedef for an unsigned 8 bit integer. #ifndef uint8 typedef unsigned char uint8; #endif //! A typedef for a signed 16 bit integer. #ifndef int16 typedef short int16; #endif //! A typedef for an unsigned 16 bit integer. #ifndef uint16 typedef unsigned short uint16; #endif //! A typedef for a signed 32 bit integer. #ifndef int32 typedef long int32; #endif //! A typedef for an unsigned 32 bit integer. #ifndef uint32 typedef unsigned long uint32; #endif #ifndef sint8 typedef signed char sint8; #endif #ifndef OsclFloat typedef float OsclFloat; #endif #ifndef uint typedef unsigned int uint; #endif #ifndef int64 #define OSCL_HAS_NATIVE_INT64_TYPE 1 #define OSCL_NATIVE_INT64_TYPE long long typedef OSCL_NATIVE_INT64_TYPE int64; #endif // int64 #ifndef uint64 #define OSCL_HAS_NATIVE_UINT64_TYPE 1 #define OSCL_NATIVE_UINT64_TYPE unsigned long long typedef OSCL_NATIVE_UINT64_TYPE uint64; #endif // uint64 #ifndef OSCL_UNUSED_ARG #define OSCL_UNUSED_ARG(x) (void)(x) #endif #ifndef OSCL_EXPORT_REF #define OSCL_EXPORT_REF #endif #ifndef OSCL_IMPORT_REF #define OSCL_IMPORT_REF #endif #if defined(OSCL_DISABLE_INLINES) #define OSCL_INLINE #define OSCL_COND_EXPORT_REF OSCL_EXPORT_REF #define OSCL_COND_IMPORT_REF OSCL_IMPORT_REF #else #define OSCL_INLINE inline #define OSCL_COND_IMPORT_REF #define OSCL_COND_IMPORT_REF #endif #ifndef INT64 #define INT64 int64 #endif #ifndef UINT64 #define UINT64 uint64 #endif #ifndef UINT64_HILO #define UINT64_HILO(a,b) ((a<<32) | b) #endif #endif // OSCL_TYPES_H_INCLUDED ================================================ FILE: RtspCamera/jni/m4v_h263/enc/oscl/osclconfig_compiler_warnings.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ // -*- c++ -*- // = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = // O S C L C O N F I G _ C O M P I L E R _ W A R N I N G S // = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = /*! \file osclconfig_compiler_warnings.h * \brief This file contains the ability to turn off/on compiler warnings * */ // This macro enables the "#pragma GCC system_header" found in any header file that // includes this config file. // "#pragma GCC system_header" suppresses compiler warnings in the rest of that header // file by treating the header as a system header file. // For instance, foo.h has 30 lines, "#pragma GCC system_header" is inserted at line 10, // from line 11 to the end of file, all compiler warnings are disabled. // However, this does not affect any files that include foo.h. // #ifdef __GNUC__ #define OSCL_DISABLE_GCC_WARNING_SYSTEM_HEADER #endif #define OSCL_FUNCTION_PTR(x) (&x) ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/bitstream_io.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /* Date: 8/02/04 */ /* Description: */ /* Change the bitstream parsing algorithm. Use temporary word of 2 or 4 bytes */ /* before writing it to the bitstream buffer. */ /* Note byteCount doesn't have to be multiple of 2 or 4 */ /*********************************************************************************/ #include "bitstream_io.h" #include "m4venc_oscl.h" static const UChar Mask[ ] = { 0x00, 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3F, 0x7F, 0xFF }; #define WORD_SIZE 4 /* for 32-bit machine */ /*Note: 1. There is a problem when output the last bits(which can not form a byte yet so when you output, you need to stuff to make sure it is a byte 2. I now hard coded byte to be 8 bits*/ /* ======================================================================== */ /* Function : BitStreamCreateEnc(Int bufferSize ) */ /* Date : 08/29/2000 */ /* Purpose : Create a bitstream to hold one encoded video packet or frame */ /* In/out : */ /* bufferSize : size of the bitstream buffer in bytes */ /* Return : Pointer to the BitstreamEncVideo */ /* Modified : */ /* ======================================================================== */ BitstreamEncVideo *BitStreamCreateEnc(Int bufferSize) { BitstreamEncVideo *stream; stream = (BitstreamEncVideo *) M4VENC_MALLOC(sizeof(BitstreamEncVideo)); if (stream == NULL) { return NULL; } stream->bufferSize = bufferSize; stream->bitstreamBuffer = (UChar *) M4VENC_MALLOC(stream->bufferSize * sizeof(UChar)); if (stream->bitstreamBuffer == NULL) { M4VENC_FREE(stream); stream = NULL; return NULL; } M4VENC_MEMSET(stream->bitstreamBuffer, 0, stream->bufferSize*sizeof(UChar)); stream->word = 0; #if WORD_SIZE==4 stream->bitLeft = 32; #else stream->bitLeft = 16; #endif stream->byteCount = 0; stream->overrunBuffer = NULL; stream->oBSize = 0; return stream; } /* ======================================================================== */ /* Function : BitstreamCloseEnc( ) */ /* Date : 08/29/2000 */ /* Purpose : close a bitstream */ /* In/out : stream : the bitstream to be closed */ /* Return : */ /* Modified : */ /* ======================================================================== */ Void BitstreamCloseEnc(BitstreamEncVideo *stream) { if (stream) { if (stream->bitstreamBuffer) { M4VENC_FREE(stream->bitstreamBuffer); } M4VENC_FREE(stream); } } /* ======================================================================== */ /* Function : BitstreamPutBits(BitstreamEncVideo *stream, Int Length, Int Value) */ /* Date : 08/29/2000 */ /* Purpose : put Length (1-16) number of bits to the stream */ /* for 32-bit machine this function can do upto 32 bit input */ /* In/out : */ /* stream the bitstream where the bits are put in */ /* Length bits length (should belong to 1 to 16) */ /* Value those bits value */ /* Return : PV_STATUS */ /* Modified : */ /* ======================================================================== */ PV_STATUS BitstreamPutBits(BitstreamEncVideo *stream, Int Length, UInt Value) { PV_STATUS status; if (stream->bitLeft > Length) { stream->word <<= Length; stream->word |= Value; /* assuming Value is not larger than Length */ stream->bitLeft -= Length; return PV_SUCCESS; } else { stream->word <<= stream->bitLeft; Length -= stream->bitLeft; stream->word |= ((UInt)Value >> Length); status = BitstreamSaveWord(stream); if (status != PV_SUCCESS) { return status; } /* we got new Length and Value */ /* note that Value is not "clean" because of msb are not masked out */ stream->word = Value; stream->bitLeft -= Length; /* assuming that Length is no more than 16 bits */ /* stream->bitLeft should be greater than zero at this point */ //if(stream->bitLeft<=0) // exit(-1); return PV_SUCCESS; } } /* ======================================================================== */ /* Function : BitstreamPutGT16Bits(BitstreamEncVideo *stream, Int Length, UInt32 Value) */ /* Date : 08/29/2000 */ /* Purpose : Use this function to put Length (17-32) number of bits to */ /* for 16-bit machine the stream. */ /* In/out : */ /* stream the bitstream where the bits are put in */ /* Length bits length (should belong to 17 to 32) */ /* Value those bits value */ /* Return : PV_STATUS */ /* Modified : */ /* ======================================================================== */ PV_STATUS BitstreamPutGT16Bits(BitstreamEncVideo *stream, Int Length, ULong Value) { PV_STATUS status; UInt topValue; Int topLength; topValue = (Value >> 16); topLength = Length - 16; if (topLength > 0) { status = BitstreamPutBits(stream, topLength, topValue); if (status != PV_SUCCESS) { return status; } status = BitstreamPutBits(stream, 16, (UInt)(Value & 0xFFFF)); return status; } else { status = BitstreamPutBits(stream, Length, (UInt)Value); return status; } } /* ======================================================================== */ /* Function : BitstreamSaveWord */ /* Date : 08/03/2004 */ /* Purpose : save written word into the bitstream buffer. */ /* In/out : */ /* stream the bitstream where the bits are put in */ /* Return : PV_STATUS */ /* Modified : */ /* ======================================================================== */ PV_STATUS BitstreamSaveWord(BitstreamEncVideo *stream) { UChar *ptr; UInt word; /* assume that stream->bitLeft is always zero when this function is called */ if (stream->byteCount + WORD_SIZE > stream->bufferSize) { if (PV_SUCCESS != BitstreamUseOverrunBuffer(stream, WORD_SIZE)) { stream->byteCount += WORD_SIZE; return PV_FAIL; } } ptr = stream->bitstreamBuffer + stream->byteCount; word = stream->word; stream->word = 0; /* important to reset to zero */ /* NOTE: byteCount does not have to be multiple of 2 or 4 */ #if (WORD_SIZE == 4) *ptr++ = word >> 24; *ptr++ = 0xFF & (word >> 16); #endif *ptr++ = 0xFF & (word >> 8); *ptr = 0xFF & word; #if (WORD_SIZE == 4) stream->byteCount += 4; stream->bitLeft = 32; #else stream->byteCount += 2; stream->bitLeft = 16; #endif return PV_SUCCESS; } /* ======================================================================== */ /* Function : BitstreamSavePartial */ /* Date : 08/03/2004 */ /* Purpose : save unfinished written word into the bitstream buffer. */ /* In/out : */ /* stream the bitstream where the bits are put in */ /* Return : PV_STATUS */ /* Modified : */ /* ======================================================================== */ PV_STATUS BitstreamSavePartial(BitstreamEncVideo *stream, Int *fraction) { UChar *ptr; UInt word, shift; Int numbyte, bitleft, bitused; bitleft = stream->bitLeft; bitused = (WORD_SIZE << 3) - bitleft; /* number of bits used */ numbyte = bitused >> 3; /* number of byte fully used */ if (stream->byteCount + numbyte > stream->bufferSize) { if (PV_SUCCESS != BitstreamUseOverrunBuffer(stream, numbyte)) { stream->byteCount += numbyte; return PV_FAIL; } } ptr = stream->bitstreamBuffer + stream->byteCount; word = stream->word; word <<= bitleft; /* word is not all consumed */ bitleft = bitused - (numbyte << 3); /* number of bits used (fraction) */ stream->byteCount += numbyte; if (bitleft) { *fraction = 1; } else { *fraction = 0; } bitleft = (WORD_SIZE << 3) - bitleft; /* save new value */ stream->bitLeft = bitleft; shift = ((WORD_SIZE - 1) << 3); while (numbyte) { *ptr++ = (UChar)((word >> shift) & 0xFF); word <<= 8; numbyte--; } if (*fraction) {// this could lead to buffer overrun when ptr is already out of bound. // *ptr = (UChar)((word>>shift)&0xFF); /* need to do it for the last fractional byte */ } /* save new values */ stream->word = word >> bitleft; /* note we don't update byteCount, bitLeft and word */ /* so that encoder can continue PutBits if they don't */ return PV_SUCCESS; } /* ======================================================================== */ /* Function : BitstreamShortHeaderByteAlignStuffing( */ /* BitstreamEncVideo *stream) */ /* Date : 08/29/2000 */ /* Purpose : bit stuffing for next start code in short video header */ /* In/out : */ /* Return : number of bits to be stuffed */ /* Modified : */ /* ======================================================================== */ Int BitstreamShortHeaderByteAlignStuffing(BitstreamEncVideo *stream) { UInt restBits; Int fraction; restBits = (stream->bitLeft & 0x7); /* modulo 8 */ if (restBits) /*short_video_header[0] is 1 in h263 baseline*/ { /* H.263 style stuffing */ BitstreamPutBits(stream, restBits, 0); } if (stream->bitLeft != (WORD_SIZE << 3)) { BitstreamSavePartial(stream, &fraction); } return restBits; } /* ======================================================================== */ /* Function : BitstreamMpeg4ByteAlignStuffing(BitstreamEncVideo *stream) */ /* Date : 08/29/2000 */ /* Purpose : bit stuffing for next start code in MPEG-4 */ /* In/out : */ /* Return : number of bits to be stuffed */ /* Modified : */ /* ======================================================================== */ Int BitstreamMpeg4ByteAlignStuffing(BitstreamEncVideo *stream) { UInt restBits; Int fraction; /* Question: in MPEG-4 , short_video_header[0]==0 => even already byte aligned, will still stuff 8 bits need to check with */ /*if (!(getPointerENC(index1, index2)%8) && short_video_header[0]) return 0;*/ /* need stuffing bits, */ BitstreamPutBits(stream, 1, 0); restBits = (stream->bitLeft & 0x7); /* modulo 8 */ if (restBits) /*short_video_header[0] is 1 in h263 baseline*/ { /* need stuffing bits, */ BitstreamPutBits(stream, restBits, Mask[restBits]); } if (stream->bitLeft != (WORD_SIZE << 3)) { BitstreamSavePartial(stream, &fraction); } return (restBits); } /*does bit stuffing for next resync marker*/ /* does bit stuffing for next resync marker * "0" * "01" * "011" * "0111" * "01111" * "011111" * "0111111" * "01111111" (8-bit codeword) */ /*Int BitstreamNextResyncMarkerEnc(BitstreamEncVideo *stream) { Int count; BitstreamPut1Bits(stream,0); count=8-stream->totalBits & 8; BitstreamPutBits(stream,count,Mask[count]); return count; }*/ /* ======================================================================== */ /* Function : BitstreamAppendEnc( BitstreamEncVideo *bitstream1, */ /* BitstreamEncVideo *bitstream2 ) */ /* Date : 08/29/2000 */ /* Purpose : Append the intermediate bitstream (bitstream2) to the end of */ /* output bitstream(bitstream1) */ /* In/out : */ /* Return : */ /* Modified : */ /* ======================================================================== */ PV_STATUS BitstreamAppendEnc(BitstreamEncVideo *bitstream1, BitstreamEncVideo *bitstream2) { PV_STATUS status; UChar *ptrBS2, *ptrBS1; UChar byteBS2, byteBS1; Int numbyte2; Int bitused, bitleft, offset, fraction; status = BitstreamSavePartial(bitstream1, &fraction); if (status != PV_SUCCESS) { return status; } offset = fraction; status = BitstreamSavePartial(bitstream2, &fraction); if (status != PV_SUCCESS) { return status; } if (!offset) /* bitstream1 is byte-aligned */ { return BitstreamAppendPacket(bitstream1, bitstream2); } offset += fraction; /* since bitstream1 doesn't have to be byte-aligned, we have to process byte by byte */ /* we read one byte from bitstream2 and use BitstreamPutBits to do the job */ if (bitstream1->byteCount + bitstream2->byteCount + offset > bitstream1->bufferSize) { if (PV_SUCCESS != BitstreamUseOverrunBuffer(bitstream1, bitstream2->byteCount + offset)) { bitstream1->byteCount += (bitstream2->byteCount + offset); return PV_FAIL; } } ptrBS1 = bitstream1->bitstreamBuffer + bitstream1->byteCount; /* move ptr bs1*/ ptrBS2 = bitstream2->bitstreamBuffer; bitused = (WORD_SIZE << 3) - bitstream1->bitLeft; /* this must be between 1-7 */ bitleft = 8 - bitused; numbyte2 = bitstream2->byteCount; /* number of byte to copy from bs2 */ bitstream1->byteCount += numbyte2; /* new byteCount */ byteBS1 = ((UChar) bitstream1->word) << bitleft; /* fraction byte from bs1 */ while (numbyte2) { byteBS2 = *ptrBS2++; byteBS1 |= (byteBS2 >> bitused); *ptrBS1++ = byteBS1; byteBS1 = byteBS2 << bitleft; numbyte2--; } bitstream1->word = byteBS1 >> bitleft; /* bitstream->bitLeft remains the same */ /* now save bs2->word in bs1 */ status = BitstreamPutBits(bitstream1, (WORD_SIZE << 3) - bitstream2->bitLeft, bitstream2->word); return status; } /* ======================================================================== */ /* Function : BitstreamAppendPacket( BitstreamEncVideo *bitstream1, */ /* BitstreamEncVideo *bitstream2 ) */ /* Date : 05/31/2001 */ /* Purpose : Append the intermediate bitstream (bitstream2) to the end of */ /* output bitstream(bitstream1) knowing that bitstream1 is byte-aligned*/ /* In/out : */ /* Return : */ /* Modified : */ /* ======================================================================== */ PV_STATUS BitstreamAppendPacket(BitstreamEncVideo *bitstream1, BitstreamEncVideo *bitstream2) { UChar *ptrBS2, *ptrBS1; Int numbyte2; if (bitstream1->byteCount + bitstream2->byteCount > bitstream1->bufferSize) { if (PV_SUCCESS != BitstreamUseOverrunBuffer(bitstream1, bitstream2->byteCount)) { bitstream1->byteCount += bitstream2->byteCount; /* legacy, to keep track of total bytes */ return PV_FAIL; } } ptrBS1 = bitstream1->bitstreamBuffer + bitstream1->byteCount; /* move ptr bs1*/ ptrBS2 = bitstream2->bitstreamBuffer; numbyte2 = bitstream2->byteCount; bitstream1->byteCount += numbyte2; /* new byteCount */ /*copy all the bytes in bitstream2*/ M4VENC_MEMCPY(ptrBS1, ptrBS2, sizeof(UChar)*numbyte2); bitstream1->word = bitstream2->word; /* bitstream1->bitLeft is the same */ bitstream1->bitLeft = bitstream2->bitLeft; return PV_SUCCESS; } /* ======================================================================== */ /* Function : BitstreamAppendPacketNoOffset( BitstreamEncVideo *bitstream1,*/ /* BitstreamEncVideo *bitstream2 ) */ /* Date : 04/23/2002 */ /* Purpose : Append the intermediate bitstream (bitstream2) to the end of */ /* output bitstream(bitstream1) , for slice-based coding only */ /* In/out : */ /* Return : */ /* Modified : */ /* ======================================================================== */ PV_STATUS BitstreamAppendPacketNoOffset(BitstreamEncVideo *bitstream1, BitstreamEncVideo *bitstream2) { PV_STATUS status = PV_SUCCESS; UChar *ptrBS2, *ptrBS1; Int numbyte2; Int byteleft; numbyte2 = bitstream2->byteCount; if (bitstream1->byteCount + bitstream2->byteCount > bitstream1->bufferSize) { numbyte2 = bitstream1->bufferSize - bitstream1->byteCount; status = PV_END_OF_BUF; /* signal end of buffer */ } ptrBS1 = bitstream1->bitstreamBuffer; /* move ptr bs1*/ ptrBS2 = bitstream2->bitstreamBuffer; bitstream1->byteCount += numbyte2; /* should be equal to bufferSize */ /*copy all the bytes in bitstream2*/ M4VENC_MEMCPY(ptrBS1, ptrBS2, sizeof(UChar)*numbyte2); bitstream1->word = 0; bitstream1->bitLeft = (WORD_SIZE << 3); if (status == PV_END_OF_BUF) /* re-position bitstream2 */ { byteleft = bitstream2->byteCount - numbyte2; M4VENC_MEMCPY(ptrBS2, ptrBS2 + numbyte2, sizeof(UChar)*byteleft); bitstream2->byteCount = byteleft; /* bitstream2->word and bitstream->bitLeft are unchanged. they should be 0 and (WORD_SIZE<<3) */ } return status; } #ifndef NO_SLICE_ENCODE /* ======================================================================== */ /* Function : BitstreamRepos( BitstreamEncVideo *bitstream, */ /* Int byteCount, Int bitCount) */ /* Date : 04/28/2002 */ /* Purpose : Reposition the size of the buffer content (curtail) */ /* In/out : */ /* Return : */ /* Modified : */ /* ======================================================================== */ PV_STATUS BitstreamRepos(BitstreamEncVideo *bitstream, Int byteCount, Int bitCount) { UChar *ptr, byte; UInt word; Int fraction; BitstreamSavePartial(bitstream, &fraction); bitstream->byteCount = byteCount; ptr = bitstream->bitstreamBuffer + byteCount; /* get fraction of the byte */ if (bitCount) { bitstream->bitLeft = (WORD_SIZE << 3) - bitCount; /* bitCount should be 0-31 */ word = *ptr++; byte = *ptr++; word = byte | (word << 8); #if (WORD_SIZE == 4) byte = *ptr++; word = byte | (word << 8); byte = *ptr++; word = byte | (word << 8); #endif bitstream->word = word >> (bitstream->bitLeft); } else { bitstream->word = 0; bitstream->bitLeft = (WORD_SIZE << 3); } return PV_SUCCESS; } /* ======================================================================== */ /* Function : BitstreamFlushBits(BitstreamEncVideo *bitstream1, */ /* Int num_bit_left) */ /* Date : 04/24/2002 */ /* Purpose : Flush buffer except the last num_bit_left bits. */ /* In/out : */ /* Return : */ /* Modified : */ /* ======================================================================== */ PV_STATUS BitstreamFlushBits(BitstreamEncVideo *bitstream1, Int num_bit_left) { Int i; UChar *ptrDst, *ptrSrc; Int leftover, bitused; Int new_byte = (num_bit_left >> 3); Int new_bit = num_bit_left - (new_byte << 3); /* between 0-7 */ ptrSrc = bitstream1->bitstreamBuffer + bitstream1->byteCount; ptrDst = bitstream1->bitstreamBuffer; bitused = (WORD_SIZE << 3) - bitstream1->bitLeft; leftover = 8 - bitused; /* bitused should be between 0-7 */ bitstream1->byteCount = new_byte; bitstream1->bitLeft = (WORD_SIZE << 3) - new_bit; if (!bitused) /* byte aligned */ { M4VENC_MEMCPY(ptrDst, ptrSrc, new_byte + 1); } else { /*copy all the bytes in bitstream2*/ for (i = 0; i < new_byte; i++) { *ptrDst++ = (ptrSrc[0] << bitused) | (ptrSrc[1] >> leftover); ptrSrc++; } /* copy for the last byte of ptrSrc, copy extra bits doesn't hurt */ if (new_bit) { *ptrDst++ = (ptrSrc[0] << bitused) | (ptrSrc[1] >> leftover); ptrSrc++; } } if (new_bit) { ptrSrc = bitstream1->bitstreamBuffer + new_byte; bitstream1->word = (*ptrSrc) >> (8 - new_bit); } return PV_SUCCESS; } /* ======================================================================== */ /* Function : BitstreamPrependPacket( BitstreamEncVideo *bitstream1, */ /* BitstreamEncVideo *bitstream2 ) */ /* Date : 04/26/2002 */ /* Purpose : Prepend the intermediate bitstream (bitstream2) to the beginning of */ /* output bitstream(bitstream1) */ /* In/out : */ /* Return : */ /* Modified : */ /* ======================================================================== */ PV_STATUS BitstreamPrependPacket(BitstreamEncVideo *bitstream1, BitstreamEncVideo *bitstream2) { UChar *pSrc, *pDst, byte; Int movebyte, bitused, leftover, i, fraction; BitstreamSavePartial(bitstream2, &fraction); /* make sure only fraction of byte left */ BitstreamSavePartial(bitstream1, &fraction); if (bitstream1->byteCount + bitstream2->byteCount >= bitstream1->bufferSize) { bitstream1->byteCount += bitstream2->byteCount; return PV_END_OF_BUF; } movebyte = bitstream1->byteCount; if (movebyte < bitstream2->byteCount) movebyte = bitstream2->byteCount; movebyte++; /* shift bitstream1 to the right by movebyte */ pSrc = bitstream1->bitstreamBuffer; pDst = pSrc + movebyte; M4VENC_MEMCPY(pDst, pSrc, bitstream1->byteCount + 1); /* copy bitstream2 to the beginning of bitstream1 */ M4VENC_MEMCPY(pSrc, bitstream2->bitstreamBuffer, bitstream2->byteCount + 1); /* now shift back previous bitstream1 buffer to the end */ pSrc = pDst; pDst = bitstream1->bitstreamBuffer + bitstream2->byteCount; bitused = (WORD_SIZE << 3) - bitstream2->bitLeft; leftover = 8 - bitused; /* bitused should be 0-7 */ byte = (bitstream2->word) << leftover; *pDst++ = byte | (pSrc[0] >> bitused); for (i = 0; i < bitstream1->byteCount + 1; i++) { *pDst++ = ((pSrc[0] << leftover) | (pSrc[1] >> bitused)); pSrc++; } bitstream1->byteCount += bitstream2->byteCount; //bitstream1->bitCount += bitstream2->bitCount; bitused = (WORD_SIZE << 4) - (bitstream1->bitLeft + bitstream2->bitLeft); if (bitused >= 8) { bitused -= 8; bitstream1->byteCount++; } bitstream1->bitLeft = (WORD_SIZE << 3) - bitused; bitstream2->byteCount = bitstream2->word = 0; bitstream2->bitLeft = (WORD_SIZE << 3); pSrc = bitstream1->bitstreamBuffer + bitstream1->byteCount; leftover = 8 - bitused; //*pSrc = (pSrc[0]>>leftover)<word = (UInt)((pSrc[0]) >> leftover); return PV_SUCCESS; } #endif /* NO_SLICE_ENCODE */ /* ======================================================================== */ /* Function : BitstreamGetPos( BitstreamEncVideo *stream */ /* Date : 08/05/2004 */ /* Purpose : Get the bit position. */ /* In/out : */ /* Return : */ /* Modified : */ /* ======================================================================== */ Int BitstreamGetPos(BitstreamEncVideo *stream) { return stream->byteCount*8 + (WORD_SIZE << 3) - stream->bitLeft; } void BitstreamEncReset(BitstreamEncVideo *stream) { stream->bitLeft = (WORD_SIZE << 3); stream->word = 0; stream->byteCount = 0; return ; } /* This function set the overrun buffer, and VideoEncData context for callback to reallocate overrun buffer. */ Void BitstreamSetOverrunBuffer(BitstreamEncVideo* stream, UChar* overrunBuffer, Int oBSize, VideoEncData *video) { stream->overrunBuffer = overrunBuffer; stream->oBSize = oBSize; stream->video = video; return ; } /* determine whether overrun buffer can be used or not */ PV_STATUS BitstreamUseOverrunBuffer(BitstreamEncVideo* stream, Int numExtraBytes) { VideoEncData *video = stream->video; if (stream->overrunBuffer != NULL) // overrunBuffer is set { if (stream->bitstreamBuffer != stream->overrunBuffer) // not already used { if (stream->byteCount + numExtraBytes >= stream->oBSize) { stream->oBSize = stream->byteCount + numExtraBytes + 100; stream->oBSize &= (~0x3); // make it multiple of 4 // allocate new overrun Buffer if (video->overrunBuffer) { M4VENC_FREE(video->overrunBuffer); } video->oBSize = stream->oBSize; video->overrunBuffer = (UChar*) M4VENC_MALLOC(sizeof(UChar) * stream->oBSize); stream->overrunBuffer = video->overrunBuffer; if (stream->overrunBuffer == NULL) { return PV_FAIL; } } // copy everything to overrun buffer and start using it. oscl_memcpy(stream->overrunBuffer, stream->bitstreamBuffer, stream->byteCount); stream->bitstreamBuffer = stream->overrunBuffer; stream->bufferSize = stream->oBSize; } else // overrun buffer is already used { if (stream->byteCount + numExtraBytes >= stream->oBSize) { stream->oBSize = stream->byteCount + numExtraBytes + 100; } // allocate new overrun buffer stream->oBSize &= (~0x3); // make it multiple of 4 video->oBSize = stream->oBSize; video->overrunBuffer = (UChar*) M4VENC_MALLOC(sizeof(UChar) * stream->oBSize); if (video->overrunBuffer == NULL) { return PV_FAIL; } // copy from the old buffer to new buffer oscl_memcpy(video->overrunBuffer, stream->overrunBuffer, stream->byteCount); // free old buffer M4VENC_FREE(stream->overrunBuffer); // assign pointer to new buffer stream->overrunBuffer = video->overrunBuffer; stream->bitstreamBuffer = stream->overrunBuffer; stream->bufferSize = stream->oBSize; } return PV_SUCCESS; } else // overrunBuffer is not enable. { return PV_FAIL; } } ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/bitstream_io.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef _BITSTREAM_IO_H_ #define _BITSTREAM_IO_H_ #define BitstreamPut1Bits(x,y) BitstreamPutBits(x,1,y) #define BitstreamPutGT8Bits(x,y,z) BitstreamPutBits(x,y,z) #include "mp4lib_int.h" #ifdef __cplusplus extern "C" { #endif BitstreamEncVideo *BitStreamCreateEnc(Int bufferSize); Void BitstreamCloseEnc(BitstreamEncVideo *stream); PV_STATUS BitstreamPutBits(BitstreamEncVideo *stream, Int Length, UInt Value); PV_STATUS BitstreamPutGT16Bits(BitstreamEncVideo *stream, Int Length, ULong Value); PV_STATUS BitstreamSaveWord(BitstreamEncVideo *stream); PV_STATUS BitstreamSavePartial(BitstreamEncVideo *stream, Int *fraction); Int BitstreamGetPos(BitstreamEncVideo *stream); void BitstreamEncReset(BitstreamEncVideo *stream); Int BitstreamShortHeaderByteAlignStuffing(BitstreamEncVideo *stream); Int BitstreamMpeg4ByteAlignStuffing(BitstreamEncVideo *stream); PV_STATUS BitstreamAppendEnc(BitstreamEncVideo *bitstream1, BitstreamEncVideo *bitstream2); PV_STATUS BitstreamAppendPacket(BitstreamEncVideo *bitstream1, BitstreamEncVideo *bitstream2); PV_STATUS BitstreamAppendPacketNoOffset(BitstreamEncVideo *bitstream1, BitstreamEncVideo *bitstream2); PV_STATUS BitstreamRepos(BitstreamEncVideo *bitstream, Int byteCount, Int bitCount); PV_STATUS BitstreamFlushBits(BitstreamEncVideo *bitstream1, Int num_bit_left); PV_STATUS BitstreamPrependPacket(BitstreamEncVideo *bitstream1, BitstreamEncVideo *bitstream2); Void BitstreamSetOverrunBuffer(BitstreamEncVideo *stream, UChar *overrunBuffer, Int oBSize, VideoEncData *video); PV_STATUS BitstreamUseOverrunBuffer(BitstreamEncVideo* stream, Int numExtraBytes); #ifdef __cplusplus } #endif #endif /* _BITSTREAM_IO_H_ */ ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 2009 OrangeLabs * * Author: Alexis Gilabert Senar * Date: 2009-07-01 * ------------------------------------------------------------------- */ #define LOG_TAG "NativeEnc" #include "com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder.h" #include #include "mp4enc_api.h" VideoEncControls iEncoderControl; VideoEncOptions aEncOption; int iSrcHeight; int iSrcWidth; unsigned long long NextTimestamp; uint8* aOutBuffer; uint8* YUV; uint8* yuvPtr; ULong modTime; /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder * Method: InitEncoder * Signature: (Lcom/orangelabs/rcs/core/ims/protocol/rtp/codec/h263/encoder/EncOptions;)I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder_InitEncoder (JNIEnv *env, jclass iclass, jobject params) { /** * Clean encoder */ memset((void*) &iEncoderControl, 0, sizeof(VideoEncControls)); PVCleanUpVideoEncoder(&iEncoderControl); /** * Setting encoder options */ PVGetDefaultEncOption(&aEncOption, 0); jclass objClass = (env)->GetObjectClass(params); if (objClass == NULL) return -2; jfieldID encMode = (env)->GetFieldID(objClass,"encMode","I"); if (encMode == NULL) return -3; aEncOption.encMode= (MP4EncodingMode)(env)->GetIntField(params,encMode); //LOGI("[H263Encoder parameters] encMode = %d",aEncOption.encMode); jfieldID packetSize = (env)->GetFieldID(objClass,"packetSize","I"); if (packetSize == NULL) return -4; aEncOption.packetSize= (env)->GetIntField(params,packetSize); //LOGI("[H263Encoder parameters] packetSize = %d",aEncOption.packetSize); jfieldID profile_level = (env)->GetFieldID(objClass,"profile_level","I"); if (profile_level == NULL) return -5; aEncOption.profile_level= (ProfileLevelType)(env)->GetIntField(params,profile_level); //LOGI("[H263Encoder parameters] profile_level = %d",aEncOption.profile_level); jfieldID rvlcEnable = (env)->GetFieldID(objClass,"rvlcEnable","Z"); if (rvlcEnable == NULL) return -6; aEncOption.rvlcEnable = ((env)->GetBooleanField(params,rvlcEnable) == true)? (ParamEncMode)1:(ParamEncMode)0; //LOGI("[H263Encoder parameters] rvlcEnable = %d",aEncOption.rvlcEnable); jfieldID gobHeaderInterval = (env)->GetFieldID(objClass,"gobHeaderInterval","I"); if (gobHeaderInterval == NULL) return -7; aEncOption.gobHeaderInterval = (env)->GetIntField(params,gobHeaderInterval); //LOGI("[H263Encoder parameters] gobHeaderInterval = %d",aEncOption.gobHeaderInterval); jfieldID numLayers = (env)->GetFieldID(objClass,"numLayers","I"); if (numLayers == NULL) return -8; aEncOption.numLayers= (env)->GetIntField(params,numLayers); //LOGI("[H263Encoder parameters] numLayers = %d",aEncOption.numLayers); jfieldID timeIncRes = (env)->GetFieldID(objClass,"timeIncRes","I"); if (timeIncRes == NULL) return -9; aEncOption.timeIncRes = (env)->GetIntField(params,timeIncRes); //LOGI("[H263Encoder parameters] timeIncRes = %d",aEncOption.timeIncRes); jfieldID tickPerSrc = (env)->GetFieldID(objClass,"tickPerSrc","I"); if (tickPerSrc == NULL) return -10; aEncOption.tickPerSrc= (env)->GetIntField(params,tickPerSrc); //LOGI("[H263Encoder parameters] tickPerSrc = %d",aEncOption.tickPerSrc); jfieldID encHeight = (env)->GetFieldID(objClass,"encHeight","I"); if (encHeight == NULL) return -11; aEncOption.encHeight[0] = aEncOption.encHeight[1] = (env)->GetIntField(params,encHeight); //LOGI("[H263Encoder parameters] encHeight = %d",aEncOption.encHeight[0]); jfieldID encWidth = (env)->GetFieldID(objClass,"encWidth","I"); if (encWidth == NULL) return -12; aEncOption.encWidth[0] = aEncOption.encWidth[1] = (env)->GetIntField(params,encWidth); //LOGI("[H263Encoder parameters] encWidth = %d",aEncOption.encWidth[0]); jfieldID encFrameRate = (env)->GetFieldID(objClass,"encFrameRate","F"); if (encFrameRate == NULL) return -13; aEncOption.encFrameRate[0] = aEncOption.encFrameRate[1] = (env)->GetFloatField(params,encFrameRate); //LOGI("[H263Encoder parameters] encFrameRate = %f",aEncOption.encFrameRate[0]); jfieldID bitRate = (env)->GetFieldID(objClass,"bitRate","I"); if (bitRate == NULL) return -14; aEncOption.bitRate[0] = aEncOption.bitRate[1] = (env)->GetIntField(params,bitRate); //LOGI("[H263Encoder parameters] bitRate = %d",aEncOption.bitRate[0]); jfieldID iQuant = (env)->GetFieldID(objClass,"iQuant","I"); if (iQuant == NULL) return -15; aEncOption.iQuant[0] = aEncOption.iQuant[1] = (env)->GetIntField(params,iQuant); //LOGI("[H263Encoder parameters] iQuant = %d",aEncOption.iQuant[0]); jfieldID pQuant = (env)->GetFieldID(objClass,"pQuant","I"); if (pQuant == NULL) return -16; aEncOption.pQuant[0] = aEncOption.pQuant[1] = (env)->GetIntField(params,pQuant); //LOGI("[H263Encoder parameters] pQuant = %d",aEncOption.pQuant[0]); jfieldID quantType = (env)->GetFieldID(objClass,"quantType","I"); if (quantType == NULL) return -17; aEncOption.quantType[0] = aEncOption.quantType[1] = (env)->GetIntField(params,quantType); //LOGI("[H263Encoder parameters] quantType = %d",aEncOption.quantType[0]); jfieldID rcType = (env)->GetFieldID(objClass,"rcType","I"); if (rcType == NULL) return -18; aEncOption.rcType = (MP4RateControlType)(env)->GetIntField(params,rcType); //LOGI("[H263Encoder parameters] rcType = %d",aEncOption.rcType); jfieldID vbvDelay = (env)->GetFieldID(objClass,"vbvDelay","F"); if (vbvDelay == NULL) return -19; aEncOption.vbvDelay = (env)->GetFloatField(params,vbvDelay); //LOGI("[H263Encoder parameters] vbvDelay = %f",aEncOption.vbvDelay); jfieldID noFrameSkipped = (env)->GetFieldID(objClass,"noFrameSkipped","Z"); if (noFrameSkipped == NULL) return -20; aEncOption.noFrameSkipped = ((env)->GetBooleanField(params,noFrameSkipped) == true)? (ParamEncMode)1:(ParamEncMode)0; //LOGI("[H263Encoder parameters] noFrameSkipped = %d",aEncOption.noFrameSkipped); jfieldID intraPeriod = (env)->GetFieldID(objClass,"intraPeriod","I"); if (intraPeriod == NULL) return -21; aEncOption.intraPeriod = (env)->GetIntField(params,intraPeriod); //LOGI("[H263Encoder parameters] intraPeriod = %d",aEncOption.intraPeriod); jfieldID numIntraMB = (env)->GetFieldID(objClass,"numIntraMB","I"); if (numIntraMB == NULL) return -22; aEncOption.numIntraMB = (env)->GetIntField(params,numIntraMB); //LOGI("[H263Encoder parameters] numIntraMB = %d",aEncOption.numIntraMB); jfieldID sceneDetect = (env)->GetFieldID(objClass,"sceneDetect","Z"); if (sceneDetect == NULL) return -23; aEncOption.sceneDetect = ((env)->GetBooleanField(params,sceneDetect) == true)?(ParamEncMode)1:(ParamEncMode)0; //LOGI("[H263Encoder parameters] sceneDetect = %d",aEncOption.sceneDetect); jfieldID searchRange = (env)->GetFieldID(objClass,"searchRange","I"); if (searchRange == NULL) return -24; aEncOption.searchRange = (env)->GetIntField(params,searchRange); //LOGI("[H263Encoder parameters] searchRange = %d",aEncOption.searchRange); jfieldID mv8x8Enable = (env)->GetFieldID(objClass,"mv8x8Enable","Z"); if (mv8x8Enable == NULL) return -25; aEncOption.mv8x8Enable = ((env)->GetBooleanField(params,mv8x8Enable)==true)?(ParamEncMode)1:(ParamEncMode)0; //LOGI("[H263Encoder parameters] mv8x8Enable = %d",aEncOption.mv8x8Enable); jfieldID intraDCVlcTh = (env)->GetFieldID(objClass,"intraDCVlcTh","I"); if (intraDCVlcTh == NULL) return -26; aEncOption.intraDCVlcTh = (env)->GetIntField(params,intraDCVlcTh); //LOGI("[H263Encoder parameters] intraDCVlcTh= %d",aEncOption.intraDCVlcTh); jfieldID useACPred = (env)->GetFieldID(objClass,"useACPred","Z"); if (useACPred == NULL) return -27; aEncOption.useACPred = ((env)->GetBooleanField(params,useACPred)==true)?1:0; //LOGI("[H263Encoder parameters] useACPred = %d",aEncOption.useACPred); /** * Init */ iSrcWidth = aEncOption.encWidth[0]; //LOGI("[H263Encoder parameters] iSrcWidth = %d",iSrcWidth); iSrcHeight = aEncOption.encHeight[0]; //LOGI("[H263Encoder parameters] iSrcHeight = %d",iSrcHeight); modTime = 0; /** * Init ptr for encode method */ YUV = (uint8*)malloc((iSrcWidth*iSrcHeight*3/2)); if (YUV == NULL){ return -1; } yuvPtr = YUV; aOutBuffer = (uint8*)malloc((iSrcWidth*iSrcHeight*3/2)); if (aOutBuffer == NULL){ return -1; } /** * Init encoder */ return PVInitVideoEncoder(&iEncoderControl, &aEncOption); } /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder * Method: EncodeFrame * Signature: ([BIJ)[B */ JNIEXPORT jbyteArray JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder_EncodeFrame (JNIEnv *env, jclass iclass, jbyteArray frame, jlong timestamp) { VideoEncFrameIO vid_in, vid_out; int Size = 0; uint status; int nLayer = 0; jbyteArray result ; /** * Init for EncodeFrame */ jint len = env->GetArrayLength(frame); // Read the byte array uint8* data = (uint8*)malloc(len); uint8* whereToFree = data; env->GetByteArrayRegion (frame, (jint)0, (jint)len, (jbyte*)data); // Convert YUV input to have distinct Y U and V channels // Copy Y data yuvPtr = YUV; for (int i=0;i>2); uint16 temp = 0; uint16* iVideoPtr = (uint16*)data; for (int i=0;i<(iSrcHeight>>1);i++){ for (int j=0;j<(iSrcWidth>>1);j++){ temp = *iVideoPtr++; // U1V1 *vPos++= (uint8)(temp & 0xFF); *uPos++= (uint8)((temp >> 8) & 0xFF); } } vid_in.height = iSrcHeight; vid_in.pitch = iSrcWidth; vid_in.timestamp = (ULong)(timestamp & 0xFFFFFFFF); vid_in.yChan = YUV; vid_in.uChan = (YUV + vid_in.height * vid_in.pitch); vid_in.vChan = vid_in.uChan + ((vid_in.height * vid_in.pitch) >> 2); Size = len; // encode the frame status = PVEncodeVideoFrame(&iEncoderControl, &vid_in, &vid_out, &modTime, (UChar*)aOutBuffer, &Size, &nLayer); if (status != 1) return (env)->NewByteArray(1); // Copy aOutBuffer into result result=(env)->NewByteArray(Size); (env)->SetByteArrayRegion(result, 0, Size, (jbyte*)aOutBuffer); free(whereToFree); // Return return result; } /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder * Method: DeinitEncoder * Signature: ()I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder_DeinitEncoder (JNIEnv *env, jclass clazz){ return PVCleanUpVideoEncoder(&iEncoderControl); } /* * This is called by the VM when the shared library is first loaded. */ jint JNI_OnLoad(JavaVM* vm, void* reserved) { JNIEnv* env = NULL; jint result = -1; if (vm->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) { goto bail; } /* success -- return valid version number */ result = JNI_VERSION_1_4; bail: return result; } ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder.h ================================================ /* DO NOT EDIT THIS FILE - it is machine generated */ #include /* Header for class com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder */ #ifndef _Included_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder #define _Included_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder #ifdef __cplusplus extern "C" { #endif /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder * Method: InitEncoder * Signature: (Lcom/orangelabs/rcs/core/ims/protocol/rtp/codec/h263/encoder/EncOptions;)I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder_InitEncoder (JNIEnv *, jclass, jobject); /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder * Method: EncodeFrame * Signature: ([BJ)[B */ JNIEXPORT jbyteArray JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder_EncodeFrame (JNIEnv *, jclass, jbyteArray, jlong); /* * Class: com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder * Method: DeinitEncoder * Signature: ()I */ JNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder_DeinitEncoder (JNIEnv *, jclass); #ifdef __cplusplus } #endif #endif ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/combined_encode.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "mp4def.h" #include "mp4enc_lib.h" #include "mp4lib_int.h" #include "bitstream_io.h" #include "vlc_encode.h" #include "m4venc_oscl.h" PV_STATUS EncodeGOBHeader(VideoEncData *video, Int GOB_number, Int quant_scale, Int bs1stream); /* ======================================================================== */ /* Function : EncodeFrameCombinedMode() */ /* Date : 09/01/2000 */ /* History : */ /* Purpose : Encode a frame of MPEG4 bitstream in Combined mode. */ /* In/out : */ /* Return : PV_SUCCESS if successful else PV_FAIL */ /* Modified : */ /* */ /* ======================================================================== */ PV_STATUS EncodeFrameCombinedMode(VideoEncData *video) { PV_STATUS status = PV_SUCCESS; Vol *currVol = video->vol[video->currLayer]; Vop *currVop = video->currVop; VideoEncParams *encParams = video->encParams; Int width = currVop->width; /* has to be Vop, for multiple of 16 */ Int lx = currVop->pitch; /* with padding */ Int offset = 0; Int ind_x, ind_y; Int start_packet_header = 0; UChar *QPMB = video->QPMB; Int QP; Int mbnum = 0, slice_counter = 0, curr_slice_counter = 0; Int num_bits, packet_size = encParams->ResyncPacketsize; Int GOB_Header_Interval = encParams->GOB_Header_Interval; BitstreamEncVideo *bs1 = video->bitstream1; Int numHeaderBits; approxDCT fastDCTfunction; Int ncoefblck[6] = {64, 64, 64, 64, 64, 64}; /* for FastCodeMB, 5/18/2001 */ PV_STATUS(*CodeMB)(VideoEncData *, approxDCT *, Int, Int[]); void (*MBVlcEncode)(VideoEncData*, Int[], void *); void (*BlockCodeCoeff)(RunLevelBlock*, BitstreamEncVideo*, Int, Int, UChar); /* for H263 GOB changes */ //MP4RateControlType rc_type = encParams->RC_Type; video->QP_prev = currVop->quantizer; numHeaderBits = BitstreamGetPos(bs1); /* determine type of quantization */ #ifndef NO_MPEG_QUANT if (currVol->quantType == 0) CodeMB = &CodeMB_H263; else CodeMB = &CodeMB_MPEG; #else CodeMB = &CodeMB_H263; #endif /* determine which functions to be used, in MB-level */ if (currVop->predictionType == P_VOP) MBVlcEncode = &MBVlcEncodeCombined_P_VOP; else if (currVop->predictionType == I_VOP) MBVlcEncode = &MBVlcEncodeCombined_I_VOP; else /* B_VOP not implemented yet */ return PV_FAIL; /* determine which VLC table to be used */ #ifndef H263_ONLY if (currVol->shortVideoHeader) BlockCodeCoeff = &BlockCodeCoeff_ShortHeader; #ifndef NO_RVLC else if (currVol->useReverseVLC) BlockCodeCoeff = &BlockCodeCoeff_RVLC; #endif else BlockCodeCoeff = &BlockCodeCoeff_Normal; #else BlockCodeCoeff = &BlockCodeCoeff_ShortHeader; #endif /* gob_frame_id is the same for different vop types - the reason should be SCD */ if (currVol->shortVideoHeader && currVop->gobFrameID != currVop->predictionType) currVop->gobFrameID = currVop->predictionType; video->usePrevQP = 0; for (ind_y = 0; ind_y < currVol->nMBPerCol; ind_y++) /* Col MB Loop */ { video->outputMB->mb_y = ind_y; /* 5/28/01 */ if (currVol->shortVideoHeader) /* ShortVideoHeader Mode */ { if (slice_counter && GOB_Header_Interval && (ind_y % GOB_Header_Interval == 0)) /* Encode GOB Header */ { QP = QPMB[mbnum]; /* Get quant_scale */ video->header_bits -= BitstreamGetPos(currVol->stream); /* Header Bits */ status = EncodeGOBHeader(video, slice_counter, QP, 0); //ind_y /* Encode GOB Header */ video->header_bits += BitstreamGetPos(currVol->stream); /* Header Bits */ curr_slice_counter = slice_counter; } } for (ind_x = 0; ind_x < currVol->nMBPerRow; ind_x++) /* Row MB Loop */ { video->outputMB->mb_x = ind_x; /* 5/28/01 */ video->mbnum = mbnum; QP = QPMB[mbnum]; /* always read new QP */ if (GOB_Header_Interval) video->sliceNo[mbnum] = curr_slice_counter; /* Update MB slice number */ else video->sliceNo[mbnum] = slice_counter; /****************************************************************************************/ /* MB Prediction:Put into MC macroblock, substract from currVop, put in predMB */ /****************************************************************************************/ getMotionCompensatedMB(video, ind_x, ind_y, offset); #ifndef H263_ONLY if (start_packet_header) { slice_counter++; /* Increment slice counter */ video->sliceNo[mbnum] = slice_counter; /* Update MB slice number*/ video->header_bits -= BitstreamGetPos(bs1); /* Header Bits */ video->QP_prev = currVop->quantizer; status = EncodeVideoPacketHeader(video, mbnum, video->QP_prev, 0); video->header_bits += BitstreamGetPos(bs1); /* Header Bits */ numHeaderBits = BitstreamGetPos(bs1); start_packet_header = 0; video->usePrevQP = 0; } #endif /***********************************************/ /* Code_MB: DCT, Q, Q^(-1), IDCT, Motion Comp */ /***********************************************/ status = (*CodeMB)(video, &fastDCTfunction, (offset << 5) + QP, ncoefblck); /************************************/ /* MB VLC Encode: VLC Encode MB */ /************************************/ (*MBVlcEncode)(video, ncoefblck, (void*)BlockCodeCoeff); /*************************************************************/ /* Assemble Packets: Assemble the MB VLC codes into Packets */ /*************************************************************/ /* Assemble_Packet(video) */ #ifndef H263_ONLY if (!currVol->shortVideoHeader) /* Not in ShortVideoHeader mode */ { if (!currVol->ResyncMarkerDisable) /* RESYNC MARKER MODE */ { num_bits = BitstreamGetPos(bs1) - numHeaderBits; if (num_bits > packet_size) { video->header_bits += BitstreamMpeg4ByteAlignStuffing(bs1); /* Byte align Packet */ status = BitstreamAppendPacket(currVol->stream, bs1); /* Put Packet to Buffer */ /* continue even if status == PV_END_OF_BUF, to get the stats */ BitstreamEncReset(bs1); start_packet_header = 1; } } else /* NO RESYNC MARKER MODE */ { status = BitstreamAppendEnc(currVol->stream, bs1); /* Initialize to 0 */ /* continue even if status == PV_END_OF_BUF, to get the stats */ BitstreamEncReset(bs1); } } else #endif /* H263_ONLY */ { /* ShortVideoHeader Mode */ status = BitstreamAppendEnc(currVol->stream, bs1); /* Initialize to 0 */ /* continue even if status == PV_END_OF_BUF, to get the stats */ BitstreamEncReset(bs1); } mbnum++; offset += 16; } /* End of For ind_x */ offset += (lx << 4) - width; if (currVol->shortVideoHeader) /* ShortVideoHeader = 1 */ { if (GOB_Header_Interval) slice_counter++; } } /* End of For ind_y */ if (currVol->shortVideoHeader) /* ShortVideoHeader = 1 */ { video->header_bits += BitstreamShortHeaderByteAlignStuffing(currVol->stream); /* Byte Align */ } #ifndef H263_ONLY else /* Combined Mode*/ { if (!currVol->ResyncMarkerDisable) /* Resync Markers */ { if (!start_packet_header) { video->header_bits += BitstreamMpeg4ByteAlignStuffing(bs1);/* Byte Align */ status = BitstreamAppendPacket(currVol->stream, bs1); /* Put Packet to Buffer */ /* continue even if status == PV_END_OF_BUF, to get the stats */ BitstreamEncReset(bs1); } } else /* No Resync Markers */ { video->header_bits += BitstreamMpeg4ByteAlignStuffing(currVol->stream); /* Byte Align */ } } #endif /* H263_ONLY */ return status; /* if status == PV_END_OF_BUF, this frame will be pre-skipped */ } #ifndef NO_SLICE_ENCODE /* ======================================================================== */ /* Function : EncodeSliceCombinedMode() */ /* Date : 04/19/2002 */ /* History : */ /* Purpose : Encode a slice of MPEG4 bitstream in Combined mode and save */ /* the current MB to continue next time it is called. */ /* In/out : */ /* Return : PV_SUCCESS if successful else PV_FAIL */ /* Modified : */ /* */ /* ======================================================================== */ PV_STATUS EncodeSliceCombinedMode(VideoEncData *video) { PV_STATUS status = PV_SUCCESS; Vol *currVol = video->vol[video->currLayer]; Vop *currVop = video->currVop; UChar mode = MODE_INTRA; UChar *Mode = video->headerInfo.Mode; VideoEncParams *encParams = video->encParams; Int nTotalMB = currVol->nTotalMB; Int width = currVop->width; /* has to be Vop, for multiple of 16 */ Int lx = currVop->pitch; /* , with padding */ // rateControl *rc = encParams->rc[video->currLayer]; UChar *QPMB = video->QPMB; Int QP; Int ind_x = video->outputMB->mb_x, ind_y = video->outputMB->mb_y; Int offset = video->offset; /* get current MB location */ Int mbnum = video->mbnum, slice_counter = video->sliceNo[mbnum]; /* get current MB location */ Int firstMB = mbnum; Int start_packet_header = 0; Int num_bits = 0; Int packet_size = encParams->ResyncPacketsize - 1; Int resync_marker = ((!currVol->shortVideoHeader) && (!currVol->ResyncMarkerDisable)); BitstreamEncVideo *bs1 = video->bitstream1; Int byteCount = 0, byteCount1 = 0, bitCount = 0; Int numHeaderBits = 0; approxDCT fastDCTfunction; Int ncoefblck[6] = {64, 64, 64, 64, 64, 64}; /* for FastCodeMB, 5/18/2001 */ UChar CBP = 0; Short outputMB[6][64]; Int k; PV_STATUS(*CodeMB)(VideoEncData *, approxDCT *, Int, Int[]); void (*MBVlcEncode)(VideoEncData*, Int[], void *); void (*BlockCodeCoeff)(RunLevelBlock*, BitstreamEncVideo*, Int, Int, UChar); video->QP_prev = 31; #define H263_GOB_CHANGES if (video->end_of_buf) /* left-over from previous run */ { status = BitstreamAppendPacketNoOffset(currVol->stream, bs1); if (status != PV_END_OF_BUF) { BitstreamEncReset(bs1); video->end_of_buf = 0; } return status; } if (mbnum == 0) /* only do this at the start of a frame */ { QPMB[0] = video->QP_prev = QP = currVop->quantizer; video->usePrevQP = 0; numHeaderBits = BitstreamGetPos(bs1); } /* Re-assign fast functions on every slice, don't have to put it in the memory */ QP = QPMB[mbnum]; if (mbnum > 0) video->QP_prev = QPMB[mbnum-1]; /* determine type of quantization */ #ifndef NO_MPEG_QUANT if (currVol->quantType == 0) CodeMB = &CodeMB_H263; else CodeMB = &CodeMB_MPEG; #else CodeMB = &CodeMB_H263; #endif /* determine which functions to be used, in MB-level */ if (currVop->predictionType == P_VOP) MBVlcEncode = &MBVlcEncodeCombined_P_VOP; else if (currVop->predictionType == I_VOP) MBVlcEncode = &MBVlcEncodeCombined_I_VOP; else /* B_VOP not implemented yet */ return PV_FAIL; /* determine which VLC table to be used */ #ifndef H263_ONLY if (currVol->shortVideoHeader) BlockCodeCoeff = &BlockCodeCoeff_ShortHeader; #ifndef NO_RVLC else if (currVol->useReverseVLC) BlockCodeCoeff = &BlockCodeCoeff_RVLC; #endif else BlockCodeCoeff = &BlockCodeCoeff_Normal; #else BlockCodeCoeff = &BlockCodeCoeff_ShortHeader; #endif /* (gob_frame_id is the same for different vop types) The reason should be SCD */ if (currVol->shortVideoHeader && currVop->gobFrameID != currVop->predictionType) currVop->gobFrameID = currVop->predictionType; if (mbnum != 0) { if (currVol->shortVideoHeader) { /* Encode GOB Header */ bitCount = BitstreamGetPos(bs1); byteCount1 = byteCount = bitCount >> 3; /* save the position before GOB header */ bitCount = bitCount & 0x7; #ifdef H263_GOB_CHANGES video->header_bits -= BitstreamGetPos(bs1); /* Header Bits */ status = EncodeGOBHeader(video, slice_counter, QP, 1); //ind_y /* Encode GOB Header */ video->header_bits += BitstreamGetPos(bs1); /* Header Bits */ #endif goto JUMP_IN_SH; } else if (currVol->ResyncMarkerDisable) { goto JUMP_IN_SH; } else { start_packet_header = 1; goto JUMP_IN; } } for (ind_y = 0; ind_y < currVol->nMBPerCol; ind_y++) /* Col MB Loop */ { video->outputMB->mb_y = ind_y; /* 5/28/01, do not remove */ for (ind_x = 0; ind_x < currVol->nMBPerRow; ind_x++) /* Row MB Loop */ { video->outputMB->mb_x = ind_x; /* 5/28/01, do not remove */ video->mbnum = mbnum; video->sliceNo[mbnum] = slice_counter; /* Update MB slice number */ JUMP_IN_SH: /****************************************************************************************/ /* MB Prediction:Put into MC macroblock, substract from currVop, put in predMB */ /****************************************************************************************/ getMotionCompensatedMB(video, ind_x, ind_y, offset); JUMP_IN: QP = QPMB[mbnum]; /* always read new QP */ #ifndef H263_ONLY if (start_packet_header) { slice_counter++; /* Increment slice counter */ video->sliceNo[mbnum] = slice_counter; /* Update MB slice number*/ video->QP_prev = currVop->quantizer; /* store QP */ num_bits = BitstreamGetPos(bs1); status = EncodeVideoPacketHeader(video, mbnum, video->QP_prev, 1); numHeaderBits = BitstreamGetPos(bs1) - num_bits; video->header_bits += numHeaderBits; /* Header Bits */ start_packet_header = 0; video->usePrevQP = 0; } else /* don't encode the first MB in packet again */ #endif /* H263_ONLY */ { /***********************************************/ /* Code_MB: DCT, Q, Q^(-1), IDCT, Motion Comp */ /***********************************************/ status = (*CodeMB)(video, &fastDCTfunction, (offset << 5) + QP, ncoefblck); } /************************************/ /* MB VLC Encode: VLC Encode MB */ /************************************/ /* save the state before VLC encoding */ if (resync_marker) { bitCount = BitstreamGetPos(bs1); byteCount = bitCount >> 3; /* save the state before encoding */ bitCount = bitCount & 0x7; mode = Mode[mbnum]; CBP = video->headerInfo.CBP[mbnum]; for (k = 0; k < 6; k++) { M4VENC_MEMCPY(outputMB[k], video->outputMB->block[k], sizeof(Short) << 6); } } /*************************************/ (*MBVlcEncode)(video, ncoefblck, (void*)BlockCodeCoeff); /*************************************************************/ /* Assemble Packets: Assemble the MB VLC codes into Packets */ /*************************************************************/ /* Assemble_Packet(video) */ #ifndef H263_ONLY if (!currVol->shortVideoHeader) { if (!currVol->ResyncMarkerDisable) { /* Not in ShortVideoHeader mode and RESYNC MARKER MODE */ num_bits = BitstreamGetPos(bs1) ;//- numHeaderBits; // include header /* Assemble packet and return when size reached */ if (num_bits > packet_size && mbnum != firstMB) { BitstreamRepos(bs1, byteCount, bitCount); /* rewind one MB */ video->header_bits += BitstreamMpeg4ByteAlignStuffing(bs1); /* Byte align Packet */ status = BitstreamAppendPacketNoOffset(currVol->stream, bs1); /* Put Packet to Buffer */ if (status == PV_END_OF_BUF) { video->end_of_buf = 1; } else { BitstreamEncReset(bs1); } start_packet_header = 1; if (mbnum < nTotalMB || video->end_of_buf) /* return here */ { video->mbnum = mbnum; video->sliceNo[mbnum] = slice_counter; video->offset = offset; Mode[mbnum] = mode; video->headerInfo.CBP[mbnum] = CBP; for (k = 0; k < 6; k++) { M4VENC_MEMCPY(video->outputMB->block[k], outputMB[k], sizeof(Short) << 6); } return status; } } } else /* NO RESYNC MARKER , return when buffer is full*/ { if (mbnum < nTotalMB - 1 && currVol->stream->byteCount + bs1->byteCount + 1 >= currVol->stream->bufferSize) { /* find maximum bytes to fit in the buffer */ byteCount = currVol->stream->bufferSize - currVol->stream->byteCount - 1; num_bits = BitstreamGetPos(bs1) - (byteCount << 3); BitstreamRepos(bs1, byteCount, 0); status = BitstreamAppendPacketNoOffset(currVol->stream, bs1); BitstreamFlushBits(bs1, num_bits); /* move on to next MB */ mbnum++ ; offset += 16; video->outputMB->mb_x++; if (video->outputMB->mb_x >= currVol->nMBPerRow) { video->outputMB->mb_x = 0; video->outputMB->mb_y++; offset += (lx << 4) - width; } video->mbnum = mbnum; video->offset = offset; video->sliceNo[mbnum] = slice_counter; return status; } } } #endif /* H263_ONLY */ offset += 16; mbnum++; /* has to increment before SCD, to preserve Mode[mbnum] */ } /* End of For ind_x */ offset += (lx << 4) - width; if (currVol->shortVideoHeader) /* ShortVideoHeader = 1 */ { #ifdef H263_GOB_CHANGES slice_counter++; video->header_bits += BitstreamShortHeaderByteAlignStuffing(bs1); #endif //video->header_bits+=BitstreamShortHeaderByteAlignStuffing(bs1); /* check if time to packetize */ if (currVol->stream->byteCount + bs1->byteCount > currVol->stream->bufferSize) { if (byteCount == byteCount1) /* a single GOB bigger than packet size */ { status = BitstreamAppendPacketNoOffset(currVol->stream, bs1); status = PV_END_OF_BUF; video->end_of_buf = 1; start_packet_header = 1; } else /* for short_header scooch back to previous GOB */ { num_bits = ((bs1->byteCount - byteCount) << 3); //num_bits = ((bs1->byteCount<<3) + bs1->bitCount) - ((byteCount<<3) + bitCount); BitstreamRepos(bs1, byteCount, 0); //BitstreamRepos(bs1,byteCount,bitCount); // k = currVol->stream->byteCount; /* save state before appending */ status = BitstreamAppendPacketNoOffset(currVol->stream, bs1); BitstreamFlushBits(bs1, num_bits); // if(mbnum == nTotalMB || k + bs1->byteCount >= currVol->stream->bufferSize){ /* last GOB or current one with larger size will be returned next run */ // status = PV_END_OF_BUF; // video->end_of_buf = 1; // } start_packet_header = 1; if (mbnum == nTotalMB) /* there's one more GOB to packetize for the next round */ { status = PV_END_OF_BUF; video->end_of_buf = 1; } } if (mbnum < nTotalMB) /* return here */ { /* move on to next MB */ video->outputMB->mb_x = 0; video->outputMB->mb_y++; video->mbnum = mbnum; video->offset = offset; video->sliceNo[mbnum] = slice_counter; return status; } } else if (mbnum < nTotalMB) /* do not write GOB header if end of vop */ { bitCount = BitstreamGetPos(bs1); byteCount = bitCount >> 3; /* save the position before GOB header */ bitCount = bitCount & 0x7; #ifdef H263_GOB_CHANGES video->header_bits -= BitstreamGetPos(bs1); /* Header Bits */ status = EncodeGOBHeader(video, slice_counter, QP, 1); /* Encode GOB Header */ video->header_bits += BitstreamGetPos(bs1); /* Header Bits */ #endif } } } /* End of For ind_y */ #ifndef H263_ONLY if (!currVol->shortVideoHeader) /* Combined Mode*/ { if (!currVol->ResyncMarkerDisable) /* Resync Markers */ { if (!start_packet_header) { video->header_bits += BitstreamMpeg4ByteAlignStuffing(bs1);/* Byte Align */ status = BitstreamAppendPacketNoOffset(currVol->stream, bs1); /* Put Packet to Buffer */ if (status == PV_END_OF_BUF) { video->end_of_buf = 1; } else { BitstreamEncReset(bs1); } } } else /* No Resync Markers */ { video->header_bits += BitstreamMpeg4ByteAlignStuffing(bs1); /* Byte Align */ status = BitstreamAppendPacketNoOffset(currVol->stream, bs1); /* Initialize to 0 */ if (status == PV_END_OF_BUF) { video->end_of_buf = 1; } else { BitstreamEncReset(bs1); } } } else #endif /* H263_ONLY */ { if (!start_packet_header) /* not yet packetized */ { video->header_bits += BitstreamShortHeaderByteAlignStuffing(bs1); status = BitstreamAppendPacketNoOffset(currVol->stream, bs1); if (status == PV_END_OF_BUF) { video->end_of_buf = 1; } else { BitstreamEncReset(bs1); video->end_of_buf = 0; } } } video->mbnum = mbnum; if (mbnum < nTotalMB) video->sliceNo[mbnum] = slice_counter; video->offset = offset; return status; } #endif /* NO_SLICE_ENCODE */ /* ======================================================================== */ /* Function : EncodeGOBHeader() */ /* Date : 09/05/2000 */ /* History : */ /* Purpose : Encode a frame of MPEG4 bitstream in Combined mode. */ /* In/out : */ /* Return : PV_SUCCESS if successful else PV_FAIL */ /* Modified : */ /* */ /* ======================================================================== */ PV_STATUS EncodeGOBHeader(VideoEncData *video, Int GOB_number, Int quant_scale, Int bs1stream) { PV_STATUS status = PV_SUCCESS; BitstreamEncVideo *stream = (bs1stream ? video->bitstream1 : video->vol[video->currLayer]->stream); status = BitstreamPutGT16Bits(stream, 17, GOB_RESYNC_MARKER); /* gob_resync_marker */ status = BitstreamPutBits(stream, 5, GOB_number); /* Current gob_number */ status = BitstreamPutBits(stream, 2, video->currVop->gobFrameID); /* gob_frame_id */ status = BitstreamPutBits(stream, 5, quant_scale); /* quant_scale */ return status; } ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/datapart_encode.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef H263_ONLY #include "mp4def.h" #include "mp4lib_int.h" #include "bitstream_io.h" #include "mp4enc_lib.h" #include "m4venc_oscl.h" /* ======================================================================== */ /* Function : EncodeFrameDataPartMode() */ /* Date : 09/6/2000 */ /* History : */ /* Purpose : Encode a frame of MPEG4 bitstream in datapartitioning mode. */ /* In/out : */ /* Return : PV_SUCCESS if successful else PV_FAIL */ /* Modified : */ /* */ /* ======================================================================== */ PV_STATUS EncodeFrameDataPartMode(VideoEncData *video) { PV_STATUS status = PV_SUCCESS; Vol *currVol = video->vol[video->currLayer]; Vop *currVop = video->currVop; VideoEncParams *encParams = video->encParams; Int width = currVop->width; /* has to be Vop, for multiple of 16 */ Int lx = currVop->pitch; /* with padding */ Int offset = 0; Int ind_x, ind_y; Int start_packet_header = 0; UChar *QPMB = video->QPMB; Int QP; Int mbnum = 0, slice_counter = 0; Int num_bits, packet_size = encParams->ResyncPacketsize; BitstreamEncVideo *bs1 = video->bitstream1; BitstreamEncVideo *bs2 = video->bitstream2; BitstreamEncVideo *bs3 = video->bitstream3; Int numHeaderBits; approxDCT fastDCTfunction; Int ncoefblck[6] = {64, 64, 64, 64, 64, 64}; /* for FastCodeMB, 5/18/2001 */ PV_STATUS(*CodeMB)(VideoEncData *, approxDCT *, Int, Int[]); void (*MBVlcEncode)(VideoEncData*, Int[], void *); void (*BlockCodeCoeff)(RunLevelBlock*, BitstreamEncVideo*, Int, Int, UChar); video->QP_prev = currVop->quantizer; numHeaderBits = BitstreamGetPos(bs1); /* Number of bits in VOP Header */ /* determine type of quantization */ #ifndef NO_MPEG_QUANT if (currVol->quantType == 0) CodeMB = &CodeMB_H263; else CodeMB = &CodeMB_MPEG; #else CodeMB = &CodeMB_H263; #endif /* determine which functions to be used, in MB-level */ if (currVop->predictionType == P_VOP) MBVlcEncode = &MBVlcEncodeDataPar_P_VOP; else if (currVop->predictionType == I_VOP) MBVlcEncode = &MBVlcEncodeDataPar_I_VOP; else /* B_VOP not implemented yet */ return PV_FAIL; /* determine which VLC table to be used */ if (currVol->shortVideoHeader) BlockCodeCoeff = &BlockCodeCoeff_ShortHeader; #ifndef NO_RVLC else if (currVol->useReverseVLC) BlockCodeCoeff = &BlockCodeCoeff_RVLC; #endif else BlockCodeCoeff = &BlockCodeCoeff_Normal; video->usePrevQP = 0; for (ind_y = 0; ind_y < currVol->nMBPerCol; ind_y++) /* Col MB Loop */ { video->outputMB->mb_y = ind_y; /* 5/28/01 */ for (ind_x = 0; ind_x < currVol->nMBPerRow; ind_x++) /* Row MB Loop */ { video->outputMB->mb_x = ind_x; /* 5/28/01 */ video->mbnum = mbnum; video->sliceNo[mbnum] = slice_counter; /* Update MB slice number */ QP = QPMB[mbnum]; /* always read new QP */ /****************************************************************************************/ /* MB Prediction:Put into MC macroblock, substract from currVop, put in predMB */ /****************************************************************************************/ getMotionCompensatedMB(video, ind_x, ind_y, offset); if (start_packet_header) { slice_counter++; /* Increment slice counter */ video->sliceNo[mbnum] = slice_counter; /* Update MB slice number*/ video->header_bits -= BitstreamGetPos(bs1); /* Header Bits */ video->QP_prev = currVop->quantizer; /* store QP */ status = EncodeVideoPacketHeader(video, mbnum, video->QP_prev, 0); video->header_bits += BitstreamGetPos(bs1); /* Header Bits */ numHeaderBits = BitstreamGetPos(bs1); start_packet_header = 0; video->usePrevQP = 0; } /***********************************************/ /* Code_MB: DCT, Q, Q^(-1), IDCT, Motion Comp */ /***********************************************/ status = (*CodeMB)(video, &fastDCTfunction, (offset << 5) + QP, ncoefblck); /************************************/ /* MB VLC Encode: VLC Encode MB */ /************************************/ MBVlcEncode(video, ncoefblck, (void*)BlockCodeCoeff); /*************************************************************/ /* Assemble Packets: Assemble the MB VLC codes into Packets */ /*************************************************************/ /* INCLUDE VOP HEADER IN COUNT */ num_bits = BitstreamGetPos(bs1) + BitstreamGetPos(bs2) + BitstreamGetPos(bs3) - numHeaderBits; /* Assemble_Packet(video) */ if (num_bits > packet_size) { if (video->currVop->predictionType == I_VOP) BitstreamPutGT16Bits(bs1, 19, DC_MARKER); /* Add dc_marker */ else BitstreamPutGT16Bits(bs1, 17, MOTION_MARKER_COMB); /*Add motion_marker*/ BitstreamAppendEnc(bs1, bs2); /* Combine bs1 and bs2 */ BitstreamAppendEnc(bs1, bs3); /* Combine bs1 and bs3 */ video->header_bits += BitstreamMpeg4ByteAlignStuffing(bs1); /* Byte align Packet */ status = BitstreamAppendPacket(currVol->stream, bs1); /* Put Packet to Buffer */ /* continue even if status == PV_END_OF_BUF, to get the stats */ BitstreamEncReset(bs1); /* Initialize to 0 */ BitstreamEncReset(bs2); BitstreamEncReset(bs3); start_packet_header = 1; } mbnum++; offset += 16; } /* End of For ind_x */ offset += (lx << 4) - width; } /* End of For ind_y */ if (!start_packet_header) { if (video->currVop->predictionType == I_VOP) { BitstreamPutGT16Bits(bs1, 19, DC_MARKER); /* Add dc_marker */ video->header_bits += 19; } else { BitstreamPutGT16Bits(bs1, 17, MOTION_MARKER_COMB); /* Add motion_marker */ video->header_bits += 17; } BitstreamAppendEnc(bs1, bs2); BitstreamAppendEnc(bs1, bs3); video->header_bits += BitstreamMpeg4ByteAlignStuffing(bs1); /* Byte align Packet */ status = BitstreamAppendPacket(currVol->stream, bs1); /* Put Packet to Buffer */ /* continue even if status == PV_END_OF_BUF, to get the stats */ BitstreamEncReset(bs1); /* Initialize to 0 */ BitstreamEncReset(bs2); BitstreamEncReset(bs3); } return status; /* if status == PV_END_OF_BUF, this frame will be pre-skipped */ } #ifndef NO_SLICE_ENCODE /* ======================================================================== */ /* Function : EncodeSliceDataPartMode() */ /* Date : 04/19/2002 */ /* History : */ /* Purpose : Encode a slice of MPEG4 bitstream in DataPar mode and save */ /* the current MB to continue next time it is called. */ /* In/out : */ /* Return : PV_SUCCESS if successful else PV_FAIL */ /* Modified : */ /* */ /* ======================================================================== */ PV_STATUS EncodeSliceDataPartMode(VideoEncData *video) { PV_STATUS status = PV_SUCCESS; Vol *currVol = video->vol[video->currLayer]; Vop *currVop = video->currVop; UChar mode, *Mode = video->headerInfo.Mode; VideoEncParams *encParams = video->encParams; Int nTotalMB = currVol->nTotalMB; Int width = currVop->width; /* has to be Vop, for multiple of 16 */ Int lx = currVop->pitch; /* , with pading */ UChar *QPMB = video->QPMB; Int QP; Int ind_x = video->outputMB->mb_x, ind_y = video->outputMB->mb_y; Int offset = video->offset; /* get current MB location */ Int mbnum = video->mbnum, slice_counter = video->sliceNo[mbnum]; /* get current MB location */ Int firstMB = mbnum; Int start_packet_header = (mbnum != 0); Int num_bits = 0; Int packet_size = encParams->ResyncPacketsize - 1 - (currVop->predictionType == I_VOP ? 19 : 17); BitstreamEncVideo *bs1 = video->bitstream1; BitstreamEncVideo *bs2 = video->bitstream2; BitstreamEncVideo *bs3 = video->bitstream3; Int bitCount1 = 0, bitCount2 = 0, bitCount3 = 0, byteCount1 = 0, byteCount2 = 0, byteCount3 = 0; Int numHeaderBits = 0; approxDCT fastDCTfunction; Int ncoefblck[6] = {64, 64, 64, 64, 64, 64}; /* for FastCodeMB, 5/18/2001 */ UChar CBP; Short outputMB[6][64]; PV_STATUS(*CodeMB)(VideoEncData *, approxDCT *, Int, Int[]); void (*MBVlcEncode)(VideoEncData*, Int[], void *); void (*BlockCodeCoeff)(RunLevelBlock*, BitstreamEncVideo*, Int, Int, UChar); Int k; video->QP_prev = 31; if (video->end_of_buf) /* left-over from previous run */ { status = BitstreamAppendPacketNoOffset(currVol->stream, bs1); if (status != PV_END_OF_BUF) { BitstreamEncReset(bs1); video->end_of_buf = 0; } return status; } if (mbnum == 0) /* only do this at the start of a frame */ { QPMB[0] = video->QP_prev = QP = currVop->quantizer; video->usePrevQP = 0; numHeaderBits = BitstreamGetPos(bs1); /* Number of bits in VOP Header */ } /* Re-assign fast functions on every slice, don't have to put it in the memory */ QP = QPMB[mbnum]; if (mbnum > 0) video->QP_prev = QPMB[mbnum-1]; /* determine type of quantization */ #ifndef NO_MPEG_QUANT if (currVol->quantType == 0) CodeMB = &CodeMB_H263; else CodeMB = &CodeMB_MPEG; #else CodeMB = &CodeMB_H263; #endif /* determine which functions to be used, in MB-level */ if (currVop->predictionType == P_VOP) MBVlcEncode = &MBVlcEncodeDataPar_P_VOP; else if (currVop->predictionType == I_VOP) MBVlcEncode = &MBVlcEncodeDataPar_I_VOP; else /* B_VOP not implemented yet */ return PV_FAIL; /* determine which VLC table to be used */ #ifndef NO_RVLC if (currVol->useReverseVLC) BlockCodeCoeff = &BlockCodeCoeff_RVLC; else #endif BlockCodeCoeff = &BlockCodeCoeff_Normal; if (mbnum != 0) { goto JUMP_IN; } for (ind_y = 0; ind_y < currVol->nMBPerCol; ind_y++) /* Col MB Loop */ { video->outputMB->mb_y = ind_y; /* 5/28/01 */ for (ind_x = 0; ind_x < currVol->nMBPerRow; ind_x++) /* Row MB Loop */ { video->outputMB->mb_x = ind_x; /* 5/28/01 */ video->mbnum = mbnum; video->sliceNo[mbnum] = slice_counter; /* Update MB slice number */ /****************************************************************************************/ /* MB Prediction:Put into MC macroblock, substract from currVop, put in predMB */ /****************************************************************************************/ getMotionCompensatedMB(video, ind_x, ind_y, offset); JUMP_IN: QP = QPMB[mbnum]; /* always read new QP */ if (start_packet_header) { slice_counter++; /* Increment slice counter */ video->sliceNo[mbnum] = slice_counter; /* Update MB slice number*/ video->QP_prev = currVop->quantizer; /* store QP */ num_bits = BitstreamGetPos(bs1); status = EncodeVideoPacketHeader(video, mbnum, video->QP_prev, 0); numHeaderBits = BitstreamGetPos(bs1) - num_bits; video->header_bits += numHeaderBits; /* Header Bits */ start_packet_header = 0; video->usePrevQP = 0; } else /* don't encode the first MB in packet again */ { /***********************************************/ /* Code_MB: DCT, Q, Q^(-1), IDCT, Motion Comp */ /***********************************************/ status = (*CodeMB)(video, &fastDCTfunction, (offset << 5) + QP, ncoefblck); for (k = 0; k < 6; k++) { M4VENC_MEMCPY(outputMB[k], video->outputMB->block[k], sizeof(Short) << 6); } } /************************************/ /* MB VLC Encode: VLC Encode MB */ /************************************/ /* save the state before VLC encoding */ bitCount1 = BitstreamGetPos(bs1); bitCount2 = BitstreamGetPos(bs2); bitCount3 = BitstreamGetPos(bs3); byteCount1 = bitCount1 >> 3; byteCount2 = bitCount2 >> 3; byteCount3 = bitCount3 >> 3; bitCount1 &= 0x7; bitCount2 &= 0x7; bitCount3 &= 0x7; mode = Mode[mbnum]; CBP = video->headerInfo.CBP[mbnum]; /*************************************/ MBVlcEncode(video, ncoefblck, (void*)BlockCodeCoeff); /*************************************************************/ /* Assemble Packets: Assemble the MB VLC codes into Packets */ /*************************************************************/ num_bits = BitstreamGetPos(bs1) + BitstreamGetPos(bs2) + BitstreamGetPos(bs3);// - numHeaderBits; //include header bits /* Assemble_Packet(video) */ if (num_bits > packet_size && mbnum != firstMB) /* encoding at least one more MB*/ { BitstreamRepos(bs1, byteCount1, bitCount1); /* rewind one MB */ BitstreamRepos(bs2, byteCount2, bitCount2); /* rewind one MB */ BitstreamRepos(bs3, byteCount3, bitCount3); /* rewind one MB */ if (video->currVop->predictionType == I_VOP) { BitstreamPutGT16Bits(bs1, 19, DC_MARKER); /* Add dc_marker */ video->header_bits += 19; } else { BitstreamPutGT16Bits(bs1, 17, MOTION_MARKER_COMB); /*Add motion_marker*/ video->header_bits += 17; } status = BitstreamAppendEnc(bs1, bs2); /* Combine with bs2 */ status = BitstreamAppendEnc(bs1, bs3); /* Combine with bs3 */ video->header_bits += BitstreamMpeg4ByteAlignStuffing(bs1); /* Byte align Packet */ status = BitstreamAppendPacketNoOffset(currVol->stream, bs1); BitstreamEncReset(bs2); BitstreamEncReset(bs3); if (status == PV_END_OF_BUF) /* if cannot fit a buffer */ { video->end_of_buf = 1; } else { BitstreamEncReset(bs1); } start_packet_header = 1; if (mbnum < nTotalMB || video->end_of_buf) /* return here */ { video->mbnum = mbnum; video->sliceNo[mbnum] = slice_counter; video->offset = offset; Mode[mbnum] = mode; video->headerInfo.CBP[mbnum] = CBP; for (k = 0; k < 6; k++) { M4VENC_MEMCPY(video->outputMB->block[k], outputMB[k], sizeof(Short) << 6); } return status; } } offset += 16; mbnum++; /* has to increment before SCD, to preserve Mode[mbnum] */ } /* End of For ind_x */ offset += (lx << 4) - width; } /* End of For ind_y */ if (!start_packet_header) { if (video->currVop->predictionType == I_VOP) { BitstreamPutGT16Bits(bs1, 19, DC_MARKER); /* Add dc_marker */ video->header_bits += 19; } else { BitstreamPutGT16Bits(bs1, 17, MOTION_MARKER_COMB); /*Add motion_marker*/ video->header_bits += 17; } status = BitstreamAppendEnc(bs1, bs2); /* Combine with bs2 */ status = BitstreamAppendEnc(bs1, bs3); /* Combine with bs3 */ video->header_bits += BitstreamMpeg4ByteAlignStuffing(bs1); /* Byte align Packet */ status = BitstreamAppendPacketNoOffset(currVol->stream, bs1); BitstreamEncReset(bs2); BitstreamEncReset(bs3); if (status == PV_END_OF_BUF) { video->end_of_buf = 1; } else { BitstreamEncReset(bs1); } } video->mbnum = mbnum; if (mbnum < nTotalMB) video->sliceNo[mbnum] = slice_counter; video->offset = offset; return status; } #endif /* NO_SLICE_ENCODE */ #endif /* H263_ONLY */ ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/dct.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "oscl_base_macros.h" // for OSCL_UNUSED_ARG #include "mp4enc_lib.h" #include "mp4lib_int.h" #include "dct_inline.h" #define FDCT_SHIFT 10 #ifdef __cplusplus extern "C" { #endif /**************************************************************************/ /* Function: BlockDCT_AANwSub Date: 7/31/01 Input: Output: out[64] ==> next block Purpose: Do subtraction for zero MV first Modified: **************************************************************************/ Void BlockDCT_AANwSub(Short *out, UChar *cur, UChar *pred, Int width) { Short *dst; Int k0, k1, k2, k3, k4, k5, k6, k7; Int round; Int k12 = 0x022A02D4; Int k14 = 0x0188053A; Int abs_sum; Int mask; Int tmp, tmp2; Int ColTh; dst = out + 64 ; ColTh = *dst; out += 128; round = 1 << (FDCT_SHIFT - 1); do /* fdct_nextrow */ { /* assuming the block is word-aligned */ mask = 0x1FE; tmp = *((Int*) cur); /* contains 4 pixels */ tmp2 = *((Int*) pred); /* prediction 4 pixels */ k0 = tmp2 & 0xFF; k1 = mask & (tmp << 1); k0 = k1 - (k0 << 1); k1 = (tmp2 >> 8) & 0xFF; k2 = mask & (tmp >> 7); k1 = k2 - (k1 << 1); k2 = (tmp2 >> 16) & 0xFF; k3 = mask & (tmp >> 15); k2 = k3 - (k2 << 1); k3 = (tmp2 >> 24) & 0xFF; k4 = mask & (tmp >> 23); k3 = k4 - (k3 << 1); tmp = *((Int*)(cur + 4)); /* another 4 pixels */ tmp2 = *((Int*)(pred + 4)); k4 = tmp2 & 0xFF; k5 = mask & (tmp << 1); k4 = k5 - (k4 << 1); k5 = (tmp2 >> 8) & 0xFF; k6 = mask & (tmp >> 7); k5 = k6 - (k5 << 1); k6 = (tmp2 >> 16) & 0xFF; k7 = mask & (tmp >> 15); k6 = k7 - (k6 << 1); k7 = (tmp2 >> 24) & 0xFF; tmp = mask & (tmp >> 23); k7 = tmp - (k7 << 1); cur += width; pred += 16; /* fdct_1 */ k0 = k0 + k7; k7 = k0 - (k7 << 1); k1 = k1 + k6; k6 = k1 - (k6 << 1); k2 = k2 + k5; k5 = k2 - (k5 << 1); k3 = k3 + k4; k4 = k3 - (k4 << 1); k0 = k0 + k3; k3 = k0 - (k3 << 1); k1 = k1 + k2; k2 = k1 - (k2 << 1); k0 = k0 + k1; k1 = k0 - (k1 << 1); /**********/ dst[0] = k0; dst[4] = k1; /* col. 4 */ /* fdct_2 */ k4 = k4 + k5; k5 = k5 + k6; k6 = k6 + k7; k2 = k2 + k3; /* MUL2C k2,k5,724,FDCT_SHIFT */ /* k0, k1 become scratch */ /* assume FAST MULTIPLY */ k1 = mla724(k12, k5, round); k0 = mla724(k12, k2, round); k5 = k1 >> FDCT_SHIFT; k2 = k0 >> FDCT_SHIFT; /*****************/ k2 = k2 + k3; k3 = (k3 << 1) - k2; /********/ dst[2] = k2; /* col. 2 */ k3 <<= 1; /* scale up col. 6 */ dst[6] = k3; /* col. 6 */ /* fdct_3 */ /* ROTATE k4,k6,392,946, FDCT_SHIFT */ /* assume FAST MULTIPLY */ /* k0, k1 are output */ k0 = k4 - k6; k1 = mla392(k0, k14, round); k0 = mla554(k4, k12, k1); k1 = mla1338(k6, k14, k1); k4 = k0 >> FDCT_SHIFT; k6 = k1 >> FDCT_SHIFT; /***********************/ k5 = k5 + k7; k7 = (k7 << 1) - k5; k4 = k4 + k7; k7 = (k7 << 1) - k4; k5 = k5 + k6; k4 <<= 1; /* scale up col.5 */ k6 = k5 - (k6 << 1); /********/ dst[5] = k4; /* col. 5 */ k6 <<= 2; /* scale up col. 7 */ dst[1] = k5; /* col. 1 */ dst[7] = k6; /* col. 7 */ dst[3] = k7; /* col. 3 */ dst += 8; } while (dst < out); out -= 64; dst = out + 8; /* Vertical Block Loop */ do /* Vertical 8xDCT loop */ { k0 = out[0]; k1 = out[8]; k2 = out[16]; k3 = out[24]; k4 = out[32]; k5 = out[40]; k6 = out[48]; k7 = out[56]; /* deadzone thresholding for column */ abs_sum = sum_abs(k0, k1, k2, k3, k4, k5, k6, k7); if (abs_sum < ColTh) { out[0] = 0x7fff; out++; continue; } /* fdct_1 */ k0 = k0 + k7; k7 = k0 - (k7 << 1); k1 = k1 + k6; k6 = k1 - (k6 << 1); k2 = k2 + k5; k5 = k2 - (k5 << 1); k3 = k3 + k4; k4 = k3 - (k4 << 1); k0 = k0 + k3; k3 = k0 - (k3 << 1); k1 = k1 + k2; k2 = k1 - (k2 << 1); k0 = k0 + k1; k1 = k0 - (k1 << 1); /**********/ out[32] = k1; /* row 4 */ out[0] = k0; /* row 0 */ /* fdct_2 */ k4 = k4 + k5; k5 = k5 + k6; k6 = k6 + k7; k2 = k2 + k3; /* MUL2C k2,k5,724,FDCT_SHIFT */ /* k0, k1 become scratch */ /* assume FAST MULTIPLY */ k1 = mla724(k12, k5, round); k0 = mla724(k12, k2, round); k5 = k1 >> FDCT_SHIFT; k2 = k0 >> FDCT_SHIFT; /*****************/ k2 = k2 + k3; k3 = (k3 << 1) - k2; k3 <<= 1; /* scale up col. 6 */ /********/ out[48] = k3; /* row 6 */ out[16] = k2; /* row 2 */ /* fdct_3 */ /* ROTATE k4,k6,392,946, FDCT_SHIFT */ /* assume FAST MULTIPLY */ /* k0, k1 are output */ k0 = k4 - k6; k1 = mla392(k0, k14, round); k0 = mla554(k4, k12, k1); k1 = mla1338(k6, k14, k1); k4 = k0 >> FDCT_SHIFT; k6 = k1 >> FDCT_SHIFT; /***********************/ k5 = k5 + k7; k7 = (k7 << 1) - k5; k4 = k4 + k7; k7 = (k7 << 1) - k4; k5 = k5 + k6; k4 <<= 1; /* scale up col. 5 */ k6 = k5 - (k6 << 1); /********/ out[24] = k7 ; /* row 3 */ k6 <<= 2; /* scale up col. 7 */ out[56] = k6 ; /* row 7 */ out[8] = k5 ; /* row 1 */ out[40] = k4 ; /* row 5 */ out++; } while ((UInt)out < (UInt)dst) ; return ; } /**************************************************************************/ /* Function: Block4x4DCT_AANwSub Date: 7/31/01 Input: Output: out[64] ==> next block Purpose: Do subtraction for zero MV first before 4x4 DCT Modified: **************************************************************************/ Void Block4x4DCT_AANwSub(Short *out, UChar *cur, UChar *pred, Int width) { Short *dst; register Int k0, k1, k2, k3, k4, k5, k6, k7; Int round; Int k12 = 0x022A02D4; Int k14 = 0x0188053A; Int mask; Int tmp, tmp2; Int abs_sum; Int ColTh; dst = out + 64 ; ColTh = *dst; out += 128; round = 1 << (FDCT_SHIFT - 1); do /* fdct_nextrow */ { /* assuming the block is word-aligned */ mask = 0x1FE; tmp = *((Int*) cur); /* contains 4 pixels */ tmp2 = *((Int*) pred); /* prediction 4 pixels */ k0 = tmp2 & 0xFF; k1 = mask & (tmp << 1); k0 = k1 - (k0 << 1); k1 = (tmp2 >> 8) & 0xFF; k2 = mask & (tmp >> 7); k1 = k2 - (k1 << 1); k2 = (tmp2 >> 16) & 0xFF; k3 = mask & (tmp >> 15); k2 = k3 - (k2 << 1); k3 = (tmp2 >> 24) & 0xFF; k4 = mask & (tmp >> 23); k3 = k4 - (k3 << 1); tmp = *((Int*)(cur + 4)); /* another 4 pixels */ tmp2 = *((Int*)(pred + 4)); k4 = tmp2 & 0xFF; k5 = mask & (tmp << 1); k4 = k5 - (k4 << 1); k5 = (tmp2 >> 8) & 0xFF; k6 = mask & (tmp >> 7); k5 = k6 - (k5 << 1); k6 = (tmp2 >> 16) & 0xFF; k7 = mask & (tmp >> 15); k6 = k7 - (k6 << 1); k7 = (tmp2 >> 24) & 0xFF; tmp = mask & (tmp >> 23); k7 = tmp - (k7 << 1); cur += width; pred += 16; /* fdct_1 */ k0 = k0 + k7; k7 = k0 - (k7 << 1); k1 = k1 + k6; k6 = k1 - (k6 << 1); k2 = k2 + k5; k5 = k2 - (k5 << 1); k3 = k3 + k4; k4 = k3 - (k4 << 1); k0 = k0 + k3; k3 = k0 - (k3 << 1); k1 = k1 + k2; k2 = k1 - (k2 << 1); k0 = k0 + k1; /**********/ dst[0] = k0; /* fdct_2 */ k4 = k4 + k5; k5 = k5 + k6; k6 = k6 + k7; k2 = k2 + k3; /* MUL2C k2,k5,724,FDCT_SHIFT */ /* k0, k1 become scratch */ /* assume FAST MULTIPLY */ k1 = mla724(k12, k5, round); k0 = mla724(k12, k2, round); k5 = k1 >> FDCT_SHIFT; k2 = k0 >> FDCT_SHIFT; /*****************/ k2 = k2 + k3; /********/ dst[2] = k2; /* col. 2 */ /* fdct_3 */ /* ROTATE k4,k6,392,946, FDCT_SHIFT */ /* assume FAST MULTIPLY */ /* k0, k1 are output */ k0 = k4 - k6; k1 = mla392(k0, k14, round); k0 = mla554(k4, k12, k1); k1 = mla1338(k6, k14, k1); k4 = k0 >> FDCT_SHIFT; k6 = k1 >> FDCT_SHIFT; /***********************/ k5 = k5 + k7; k7 = (k7 << 1) - k5; k7 = k7 - k4; k5 = k5 + k6; /********/ dst[1] = k5; /* col. 1 */ dst[3] = k7; /* col. 3 */ dst += 8; } while (dst < out); out -= 64; dst = out + 4; /* Vertical Block Loop */ do /* Vertical 8xDCT loop */ { k0 = out[0]; k1 = out[8]; k2 = out[16]; k3 = out[24]; k4 = out[32]; k5 = out[40]; k6 = out[48]; k7 = out[56]; abs_sum = sum_abs(k0, k1, k2, k3, k4, k5, k6, k7); if (abs_sum < ColTh) { out[0] = 0x7fff; out++; continue; } /* fdct_1 */ k0 = k0 + k7; k7 = k0 - (k7 << 1); k1 = k1 + k6; k6 = k1 - (k6 << 1); k2 = k2 + k5; k5 = k2 - (k5 << 1); k3 = k3 + k4; k4 = k3 - (k4 << 1); k0 = k0 + k3; k3 = k0 - (k3 << 1); k1 = k1 + k2; k2 = k1 - (k2 << 1); k0 = k0 + k1; /**********/ out[0] = k0; /* row 0 */ /* fdct_2 */ k4 = k4 + k5; k5 = k5 + k6; k6 = k6 + k7; k2 = k2 + k3; /* MUL2C k2,k5,724,FDCT_SHIFT */ /* k0, k1 become scratch */ /* assume FAST MULTIPLY */ k1 = mla724(k12, k5, round); k0 = mla724(k12, k2, round); k5 = k1 >> FDCT_SHIFT; k2 = k0 >> FDCT_SHIFT; /*****************/ k2 = k2 + k3; /********/ out[16] = k2; /* row 2 */ /* fdct_3 */ /* ROTATE k4,k6,392,946, FDCT_SHIFT */ /* assume FAST MULTIPLY */ /* k0, k1 are output */ k0 = k4 - k6; k1 = mla392(k0, k14, round); k0 = mla554(k4, k12, k1); k1 = mla1338(k6, k14, k1); k4 = k0 >> FDCT_SHIFT; k6 = k1 >> FDCT_SHIFT; /***********************/ k5 = k5 + k7; k7 = (k7 << 1) - k5; k7 = k7 - k4 ; k5 = k5 + k6; /********/ out[24] = k7 ; /* row 3 */ out[8] = k5 ; /* row 1 */ out++; } while ((UInt)out < (UInt)dst) ; return ; } /**************************************************************************/ /* Function: Block2x2DCT_AANwSub Date: 7/31/01 Input: Output: out[64] ==> next block Purpose: Do subtraction for zero MV first before 2x2 DCT Modified: **************************************************************************/ Void Block2x2DCT_AANwSub(Short *out, UChar *cur, UChar *pred, Int width) { Short *dst; register Int k0, k1, k2, k3, k4, k5, k6, k7; Int round; Int k12 = 0x022A02D4; Int k14 = 0x018803B2; Int mask; Int tmp, tmp2; Int abs_sum; Int ColTh; dst = out + 64 ; ColTh = *dst; out += 128; round = 1 << (FDCT_SHIFT - 1); do /* fdct_nextrow */ { /* assuming the block is word-aligned */ mask = 0x1FE; tmp = *((Int*) cur); /* contains 4 pixels */ tmp2 = *((Int*) pred); /* prediction 4 pixels */ k0 = tmp2 & 0xFF; k1 = mask & (tmp << 1); k0 = k1 - (k0 << 1); k1 = (tmp2 >> 8) & 0xFF; k2 = mask & (tmp >> 7); k1 = k2 - (k1 << 1); k2 = (tmp2 >> 16) & 0xFF; k3 = mask & (tmp >> 15); k2 = k3 - (k2 << 1); k3 = (tmp2 >> 24) & 0xFF; k4 = mask & (tmp >> 23); k3 = k4 - (k3 << 1); tmp = *((Int*)(cur + 4)); /* another 4 pixels */ tmp2 = *((Int*)(pred + 4)); k4 = tmp2 & 0xFF; k5 = mask & (tmp << 1); k4 = k5 - (k4 << 1); k5 = (tmp2 >> 8) & 0xFF; k6 = mask & (tmp >> 7); k5 = k6 - (k5 << 1); k6 = (tmp2 >> 16) & 0xFF; k7 = mask & (tmp >> 15); k6 = k7 - (k6 << 1); k7 = (tmp2 >> 24) & 0xFF; tmp = mask & (tmp >> 23); k7 = tmp - (k7 << 1); cur += width; pred += 16; /* fdct_1 */ k0 = k0 + k7; k7 = k0 - (k7 << 1); k1 = k1 + k6; k6 = k1 - (k6 << 1); k2 = k2 + k5; k5 = k2 - (k5 << 1); k3 = k3 + k4; k4 = k3 - (k4 << 1); k0 = k0 + k3; k3 = k0 - (k3 << 1); k1 = k1 + k2; k2 = k1 - (k2 << 1); k0 = k0 + k1; /**********/ dst[0] = k0; /* fdct_2 */ k4 = k4 + k5; k5 = k5 + k6; k6 = k6 + k7; /* MUL2C k2,k5,724,FDCT_SHIFT */ /* k0, k1 become scratch */ /* assume FAST MULTIPLY */ k1 = mla724(k12, k5, round); k5 = k1 >> FDCT_SHIFT; /*****************/ /********/ /* fdct_3 */ /* ROTATE k4,k6,392,946, FDCT_SHIFT */ /* assume FAST MULTIPLY */ /* k0, k1 are output */ k1 = mla392(k4, k14, round); k1 = mla946(k6, k14, k1); k6 = k1 >> FDCT_SHIFT; /***********************/ k5 = k5 + k7; k5 = k5 + k6; /********/ dst[1] = k5; dst += 8; } while (dst < out); out -= 64; dst = out + 2; /* Vertical Block Loop */ do /* Vertical 8xDCT loop */ { k0 = out[0]; k1 = out[8]; k2 = out[16]; k3 = out[24]; k4 = out[32]; k5 = out[40]; k6 = out[48]; k7 = out[56]; abs_sum = sum_abs(k0, k1, k2, k3, k4, k5, k6, k7); if (abs_sum < ColTh) { out[0] = 0x7fff; out++; continue; } /* fdct_1 */ k0 = k0 + k7; k7 = k0 - (k7 << 1); k1 = k1 + k6; k6 = k1 - (k6 << 1); k2 = k2 + k5; k5 = k2 - (k5 << 1); k3 = k3 + k4; k4 = k3 - (k4 << 1); k0 = k0 + k3; k3 = k0 - (k3 << 1); k1 = k1 + k2; k2 = k1 - (k2 << 1); k0 = k0 + k1; /**********/ out[0] = k0; /* row 0 */ /* fdct_2 */ k4 = k4 + k5; k5 = k5 + k6; k6 = k6 + k7; /* MUL2C k2,k5,724,FDCT_SHIFT */ /* k0, k1 become scratch */ /* assume FAST MULTIPLY */ k1 = mla724(k12, k5, round); k5 = k1 >> FDCT_SHIFT; /*****************/ /********/ /* fdct_3 */ /* ROTATE k4,k6,392,946, FDCT_SHIFT */ /* assume FAST MULTIPLY */ /* k0, k1 are output */ k1 = mla392(k4, k14, round); k1 = mla946(k6, k14, k1); k6 = k1 >> FDCT_SHIFT; /***********************/ k5 = k5 + k7; k5 = k5 + k6; /********/ out[8] = k5 ; /* row 1 */ out++; } while ((UInt)out < (UInt)dst) ; return ; } /**************************************************************************/ /* Function: BlockDCT_AANIntra Date: 8/9/01 Input: rec Output: out[64] ==> next block Purpose: Input directly from rec frame. Modified: **************************************************************************/ Void BlockDCT_AANIntra(Short *out, UChar *cur, UChar *dummy2, Int width) { Short *dst; Int k0, k1, k2, k3, k4, k5, k6, k7; Int round; Int k12 = 0x022A02D4; Int k14 = 0x0188053A; Int abs_sum; Int mask; Int *curInt, tmp; Int ColTh; OSCL_UNUSED_ARG(dummy2); dst = out + 64 ; ColTh = *dst; out += 128; round = 1 << (FDCT_SHIFT - 1); do /* fdct_nextrow */ { mask = 0x1FE; curInt = (Int*) cur; tmp = curInt[0]; /* contains 4 pixels */ k0 = mask & (tmp << 1); k1 = mask & (tmp >> 7); k2 = mask & (tmp >> 15); k3 = mask & (tmp >> 23); tmp = curInt[1]; /* another 4 pixels */ k4 = mask & (tmp << 1); k5 = mask & (tmp >> 7); k6 = mask & (tmp >> 15); k7 = mask & (tmp >> 23); cur += width; /* fdct_1 */ k0 = k0 + k7; k7 = k0 - (k7 << 1); k1 = k1 + k6; k6 = k1 - (k6 << 1); k2 = k2 + k5; k5 = k2 - (k5 << 1); k3 = k3 + k4; k4 = k3 - (k4 << 1); k0 = k0 + k3; k3 = k0 - (k3 << 1); k1 = k1 + k2; k2 = k1 - (k2 << 1); k0 = k0 + k1; k1 = k0 - (k1 << 1); /**********/ dst[0] = k0; dst[4] = k1; /* col. 4 */ /* fdct_2 */ k4 = k4 + k5; k5 = k5 + k6; k6 = k6 + k7; k2 = k2 + k3; /* MUL2C k2,k5,724,FDCT_SHIFT */ /* k0, k1 become scratch */ /* assume FAST MULTIPLY */ k1 = mla724(k12, k5, round); k0 = mla724(k12, k2, round); k5 = k1 >> FDCT_SHIFT; k2 = k0 >> FDCT_SHIFT; /*****************/ k2 = k2 + k3; k3 = (k3 << 1) - k2; /********/ dst[2] = k2; /* col. 2 */ k3 <<= 1; /* scale up col. 6 */ dst[6] = k3; /* col. 6 */ /* fdct_3 */ /* ROTATE k4,k6,392,946, FDCT_SHIFT */ /* assume FAST MULTIPLY */ /* k0, k1 are output */ k0 = k4 - k6; k1 = mla392(k0, k14, round); k0 = mla554(k4, k12, k1); k1 = mla1338(k6, k14, k1); k4 = k0 >> FDCT_SHIFT; k6 = k1 >> FDCT_SHIFT; /***********************/ k5 = k5 + k7; k7 = (k7 << 1) - k5; k4 = k4 + k7; k7 = (k7 << 1) - k4; k5 = k5 + k6; k4 <<= 1; /* scale up col.5 */ k6 = k5 - (k6 << 1); /********/ dst[5] = k4; /* col. 5 */ k6 <<= 2; /* scale up col. 7 */ dst[1] = k5; /* col. 1 */ dst[7] = k6; /* col. 7 */ dst[3] = k7; /* col. 3 */ dst += 8; } while (dst < out); out -= 64; dst = out + 8; /* Vertical Block Loop */ do /* Vertical 8xDCT loop */ { k0 = out[0]; k1 = out[8]; k2 = out[16]; k3 = out[24]; k4 = out[32]; k5 = out[40]; k6 = out[48]; k7 = out[56]; /* deadzone thresholding for column */ abs_sum = sum_abs(k0, k1, k2, k3, k4, k5, k6, k7); if (abs_sum < ColTh) { out[0] = 0x7fff; out++; continue; } /* fdct_1 */ k0 = k0 + k7; k7 = k0 - (k7 << 1); k1 = k1 + k6; k6 = k1 - (k6 << 1); k2 = k2 + k5; k5 = k2 - (k5 << 1); k3 = k3 + k4; k4 = k3 - (k4 << 1); k0 = k0 + k3; k3 = k0 - (k3 << 1); k1 = k1 + k2; k2 = k1 - (k2 << 1); k0 = k0 + k1; k1 = k0 - (k1 << 1); /**********/ out[32] = k1; /* row 4 */ out[0] = k0; /* row 0 */ /* fdct_2 */ k4 = k4 + k5; k5 = k5 + k6; k6 = k6 + k7; k2 = k2 + k3; /* MUL2C k2,k5,724,FDCT_SHIFT */ /* k0, k1 become scratch */ /* assume FAST MULTIPLY */ k1 = mla724(k12, k5, round); k0 = mla724(k12, k2, round); k5 = k1 >> FDCT_SHIFT; k2 = k0 >> FDCT_SHIFT; /*****************/ k2 = k2 + k3; k3 = (k3 << 1) - k2; k3 <<= 1; /* scale up col. 6 */ /********/ out[48] = k3; /* row 6 */ out[16] = k2; /* row 2 */ /* fdct_3 */ /* ROTATE k4,k6,392,946, FDCT_SHIFT */ /* assume FAST MULTIPLY */ /* k0, k1 are output */ k0 = k4 - k6; k1 = mla392(k0, k14, round); k0 = mla554(k4, k12, k1); k1 = mla1338(k6, k14, k1); k4 = k0 >> FDCT_SHIFT; k6 = k1 >> FDCT_SHIFT; /***********************/ k5 = k5 + k7; k7 = (k7 << 1) - k5; k4 = k4 + k7; k7 = (k7 << 1) - k4; k5 = k5 + k6; k4 <<= 1; /* scale up col. 5 */ k6 = k5 - (k6 << 1); /********/ out[24] = k7 ; /* row 3 */ k6 <<= 2; /* scale up col. 7 */ out[56] = k6 ; /* row 7 */ out[8] = k5 ; /* row 1 */ out[40] = k4 ; /* row 5 */ out++; } while ((UInt)out < (UInt)dst) ; return ; } /**************************************************************************/ /* Function: Block4x4DCT_AANIntra Date: 8/9/01 Input: prev Output: out[64] ==> next block Purpose: Input directly from prev frame. output 2x2 DCT Modified: **************************************************************************/ Void Block4x4DCT_AANIntra(Short *out, UChar *cur, UChar *dummy2, Int width) { Short *dst; register Int k0, k1, k2, k3, k4, k5, k6, k7; Int round; Int k12 = 0x022A02D4; Int k14 = 0x0188053A; Int mask; Int *curInt, tmp; Int abs_sum; Int ColTh; OSCL_UNUSED_ARG(dummy2); dst = out + 64 ; ColTh = *dst; out += 128; round = 1 << (FDCT_SHIFT - 1); do /* fdct_nextrow */ { mask = 0x1FE; curInt = (Int*) cur; tmp = curInt[0]; /* contains 4 pixels */ k0 = mask & (tmp << 1); k1 = mask & (tmp >> 7); k2 = mask & (tmp >> 15); k3 = mask & (tmp >> 23); tmp = curInt[1]; /* another 4 pixels */ k4 = mask & (tmp << 1); k5 = mask & (tmp >> 7); k6 = mask & (tmp >> 15); k7 = mask & (tmp >> 23); cur += width; /* fdct_1 */ k0 = k0 + k7; k7 = k0 - (k7 << 1); k1 = k1 + k6; k6 = k1 - (k6 << 1); k2 = k2 + k5; k5 = k2 - (k5 << 1); k3 = k3 + k4; k4 = k3 - (k4 << 1); k0 = k0 + k3; k3 = k0 - (k3 << 1); k1 = k1 + k2; k2 = k1 - (k2 << 1); k0 = k0 + k1; /**********/ dst[0] = k0; /* fdct_2 */ k4 = k4 + k5; k5 = k5 + k6; k6 = k6 + k7; k2 = k2 + k3; /* MUL2C k2,k5,724,FDCT_SHIFT */ /* k0, k1 become scratch */ /* assume FAST MULTIPLY */ k1 = mla724(k12, k5, round); k0 = mla724(k12, k2, round); k5 = k1 >> FDCT_SHIFT; k2 = k0 >> FDCT_SHIFT; /*****************/ k2 = k2 + k3; /********/ dst[2] = k2; /* col. 2 */ /* fdct_3 */ /* ROTATE k4,k6,392,946, FDCT_SHIFT */ /* assume FAST MULTIPLY */ /* k0, k1 are output */ k0 = k4 - k6; k1 = mla392(k0, k14, round); k0 = mla554(k4, k12, k1); k1 = mla1338(k6, k14, k1); k4 = k0 >> FDCT_SHIFT; k6 = k1 >> FDCT_SHIFT; /***********************/ k5 = k5 + k7; k7 = (k7 << 1) - k5; k7 = k7 - k4; k5 = k5 + k6; /********/ dst[1] = k5; /* col. 1 */ dst[3] = k7; /* col. 3 */ dst += 8; } while (dst < out); out -= 64; dst = out + 4; /* Vertical Block Loop */ do /* Vertical 8xDCT loop */ { k0 = out[0]; k1 = out[8]; k2 = out[16]; k3 = out[24]; k4 = out[32]; k5 = out[40]; k6 = out[48]; k7 = out[56]; abs_sum = sum_abs(k0, k1, k2, k3, k4, k5, k6, k7); if (abs_sum < ColTh) { out[0] = 0x7fff; out++; continue; } /* fdct_1 */ k0 = k0 + k7; k7 = k0 - (k7 << 1); k1 = k1 + k6; k6 = k1 - (k6 << 1); k2 = k2 + k5; k5 = k2 - (k5 << 1); k3 = k3 + k4; k4 = k3 - (k4 << 1); k0 = k0 + k3; k3 = k0 - (k3 << 1); k1 = k1 + k2; k2 = k1 - (k2 << 1); k0 = k0 + k1; /**********/ out[0] = k0; /* row 0 */ /* fdct_2 */ k4 = k4 + k5; k5 = k5 + k6; k6 = k6 + k7; k2 = k2 + k3; /* MUL2C k2,k5,724,FDCT_SHIFT */ /* k0, k1 become scratch */ /* assume FAST MULTIPLY */ k1 = mla724(k12, k5, round); k0 = mla724(k12, k2, round); k5 = k1 >> FDCT_SHIFT; k2 = k0 >> FDCT_SHIFT; /*****************/ k2 = k2 + k3; /********/ out[16] = k2; /* row 2 */ /* fdct_3 */ /* ROTATE k4,k6,392,946, FDCT_SHIFT */ /* assume FAST MULTIPLY */ /* k0, k1 are output */ k0 = k4 - k6; k1 = mla392(k0, k14, round); k0 = mla554(k4, k12, k1); k1 = mla1338(k6, k14, k1); k4 = k0 >> FDCT_SHIFT; k6 = k1 >> FDCT_SHIFT; /***********************/ k5 = k5 + k7; k7 = (k7 << 1) - k5; k7 = k7 - k4 ; k5 = k5 + k6; /********/ out[24] = k7 ; /* row 3 */ out[8] = k5 ; /* row 1 */ out++; } while ((UInt)out < (UInt)dst) ; return ; } /**************************************************************************/ /* Function: Block2x2DCT_AANIntra Date: 8/9/01 Input: prev Output: out[64] ==> next block Purpose: Input directly from prev frame. output 2x2 DCT Modified: **************************************************************************/ Void Block2x2DCT_AANIntra(Short *out, UChar *cur, UChar *dummy2, Int width) { Short *dst; register Int k0, k1, k2, k3, k4, k5, k6, k7; Int round; Int k12 = 0x022A02D4; Int k14 = 0x018803B2; Int mask; Int *curInt, tmp; Int abs_sum; Int ColTh; OSCL_UNUSED_ARG(dummy2); dst = out + 64 ; ColTh = *dst; out += 128; round = 1 << (FDCT_SHIFT - 1); do /* fdct_nextrow */ { mask = 0x1FE; curInt = (Int*) cur; tmp = curInt[0]; /* contains 4 pixels */ k0 = mask & (tmp << 1); k1 = mask & (tmp >> 7); k2 = mask & (tmp >> 15); k3 = mask & (tmp >> 23); tmp = curInt[1]; /* another 4 pixels */ k4 = mask & (tmp << 1); k5 = mask & (tmp >> 7); k6 = mask & (tmp >> 15); k7 = mask & (tmp >> 23); cur += width; /* fdct_1 */ k0 = k0 + k7; k7 = k0 - (k7 << 1); k1 = k1 + k6; k6 = k1 - (k6 << 1); k2 = k2 + k5; k5 = k2 - (k5 << 1); k3 = k3 + k4; k4 = k3 - (k4 << 1); k0 = k0 + k3; k3 = k0 - (k3 << 1); k1 = k1 + k2; k2 = k1 - (k2 << 1); k0 = k0 + k1; /**********/ dst[0] = k0; /* fdct_2 */ k4 = k4 + k5; k5 = k5 + k6; k6 = k6 + k7; /* MUL2C k2,k5,724,FDCT_SHIFT */ /* k0, k1 become scratch */ /* assume FAST MULTIPLY */ k1 = mla724(k12, k5, round); k5 = k1 >> FDCT_SHIFT; /*****************/ /********/ /* fdct_3 */ /* ROTATE k4,k6,392,946, FDCT_SHIFT */ /* assume FAST MULTIPLY */ /* k0, k1 are output */ k1 = mla392(k4, k14, round); k1 = mla946(k6, k14, k1); k6 = k1 >> FDCT_SHIFT; /***********************/ k5 = k5 + k7; k5 = k5 + k6; /********/ dst[1] = k5; dst += 8; } while (dst < out); out -= 64; dst = out + 2; /* Vertical Block Loop */ do /* Vertical 8xDCT loop */ { k0 = out[0]; k1 = out[8]; k2 = out[16]; k3 = out[24]; k4 = out[32]; k5 = out[40]; k6 = out[48]; k7 = out[56]; abs_sum = sum_abs(k0, k1, k2, k3, k4, k5, k6, k7); if (abs_sum < ColTh) { out[0] = 0x7fff; out++; continue; } /* fdct_1 */ k0 = k0 + k7; k7 = k0 - (k7 << 1); k1 = k1 + k6; k6 = k1 - (k6 << 1); k2 = k2 + k5; k5 = k2 - (k5 << 1); k3 = k3 + k4; k4 = k3 - (k4 << 1); k0 = k0 + k3; k3 = k0 - (k3 << 1); k1 = k1 + k2; k2 = k1 - (k2 << 1); k0 = k0 + k1; /**********/ out[0] = k0; /* row 0 */ /* fdct_2 */ k4 = k4 + k5; k5 = k5 + k6; k6 = k6 + k7; /* MUL2C k2,k5,724,FDCT_SHIFT */ /* k0, k1 become scratch */ /* assume FAST MULTIPLY */ k1 = mla724(k12, k5, round); k5 = k1 >> FDCT_SHIFT; /*****************/ /********/ /* fdct_3 */ /* ROTATE k4,k6,392,946, FDCT_SHIFT */ /* assume FAST MULTIPLY */ /* k0, k1 are output */ k1 = mla392(k4, k14, round); k1 = mla946(k6, k14, k1); k6 = k1 >> FDCT_SHIFT; /***********************/ k5 = k5 + k7; k5 = k5 + k6; /********/ out[8] = k5 ; /* row 1 */ out++; } while ((UInt)out < (UInt)dst) ; return ; } /**************************************************************************/ /* Function: Block1x1DCTwSub Date: 8/9/01 Input: block Output: y Purpose: Compute DC value only Modified: **************************************************************************/ void Block1x1DCTwSub(Short *out, UChar *cur, UChar *pred, Int width) { UChar *end; Int temp = 0; Int offset2; offset2 = width - 8; end = pred + (16 << 3); do { temp += (*cur++ - *pred++); temp += (*cur++ - *pred++); temp += (*cur++ - *pred++); temp += (*cur++ - *pred++); temp += (*cur++ - *pred++); temp += (*cur++ - *pred++); temp += (*cur++ - *pred++); temp += (*cur++ - *pred++); cur += offset2; pred += 8; } while (pred < end) ; out[1] = out[2] = out[3] = out[4] = out[5] = out[6] = out[7] = 0; out[0] = temp >> 3; return ; } /**************************************************************************/ /* Function: Block1x1DCTIntra Date: 8/9/01 Input: prev Output: out Purpose: Compute DC value only Modified: **************************************************************************/ void Block1x1DCTIntra(Short *out, UChar *cur, UChar *dummy2, Int width) { UChar *end; Int temp = 0; ULong word; OSCL_UNUSED_ARG(dummy2); end = cur + (width << 3); do { word = *((ULong*)cur); temp += (word >> 24); temp += ((word >> 16) & 0xFF); temp += ((word >> 8) & 0xFF); temp += (word & 0xFF); word = *((ULong*)(cur + 4)); temp += (word >> 24); temp += ((word >> 16) & 0xFF); temp += ((word >> 8) & 0xFF); temp += (word & 0xFF); cur += width; } while (cur < end) ; out[1] = out[2] = out[3] = out[4] = out[5] = out[6] = out[7] = 0; out[0] = temp >> 3; return ; } #ifdef __cplusplus } #endif ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/dct.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef _DCT_H_ #define _DCT_H_ const static Int ColThInter[32] = {0, 0x1C, 0x4C, 0x6C, 0x9C, 0xBC, 0xEC, 0x10C, 0x13C, 0x15C, 0x18C, 0x1AC, 0x1DC, 0x1FC, 0x22C, 0x24C, 0x27C, 0x29C, 0x2CC, 0x2EC, 0x31C, 0x33C, 0x36C, 0x38C, 0x3BC, 0x3DC, 0x40C, 0x42C, 0x45C, 0x47C, 0x4AC, 0x4CC }; const static Int ColThIntra[32] = {0, 0x1C, 0x3C, 0x5C, 0x7C, 0x9C, 0xBC, 0xDC, 0xFC, 0x11C, 0x13C, 0x15C, 0x17C, 0x19C, 0x1BC, 0x1DC, 0x1FC, 0x21C, 0x23C, 0x25C, 0x27C, 0x29C, 0x2BC, 0x2DC, 0x2FC, 0x31C, 0x33C, 0x35C, 0x37C, 0x39C, 0x3BC, 0x3DC }; /******************************************************/ /********** IDCT part **************************/ const static unsigned char imask[8] = {128, 64, 32, 16, 8, 4, 2, 1}; const static unsigned char mask[8] = {0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0xfe}; #define W1 2841 /* 2048*sqrt(2)*cos(1*pi/16) */ #define W2 2676 /* 2048*sqrt(2)*cos(2*pi/16) */ #define W3 2408 /* 2048*sqrt(2)*cos(3*pi/16) */ #define W5 1609 /* 2048*sqrt(2)*cos(5*pi/16) */ #define W6 1108 /* 2048*sqrt(2)*cos(6*pi/16) */ #define W7 565 /* 2048*sqrt(2)*cos(7*pi/16) */ #ifdef __cplusplus extern "C" { #endif /* Reduced input IDCT */ void idct_col0(Short *blk); void idct_col1(Short *blk); void idct_col2(Short *blk); void idct_col3(Short *blk); void idct_col4(Short *blk); void idct_col0x40(Short *blk); void idct_col0x20(Short *blk); void idct_col0x10(Short *blk); void idct_rowInter(Short *srce, UChar *rec, Int lx); void idct_row0Inter(Short *blk, UChar *rec, Int lx); void idct_row1Inter(Short *blk, UChar *rec, Int lx); void idct_row2Inter(Short *blk, UChar *rec, Int lx); void idct_row3Inter(Short *blk, UChar *rec, Int lx); void idct_row4Inter(Short *blk, UChar *rec, Int lx); void idct_row0x40Inter(Short *blk, UChar *rec, Int lx); void idct_row0x20Inter(Short *blk, UChar *rec, Int lx); void idct_row0x10Inter(Short *blk, UChar *rec, Int lx); void idct_row0xCCInter(Short *blk, UChar *rec, Int lx); void idct_rowIntra(Short *srce, UChar *rec, Int lx); void idct_row0Intra(Short *blk, UChar *rec, Int lx); void idct_row1Intra(Short *blk, UChar *rec, Int lx); void idct_row2Intra(Short *blk, UChar *rec, Int lx); void idct_row3Intra(Short *blk, UChar *rec, Int lx); void idct_row4Intra(Short *blk, UChar *rec, Int lx); void idct_row0x40Intra(Short *blk, UChar *rec, Int lx); void idct_row0x20Intra(Short *blk, UChar *rec, Int lx); void idct_row0x10Intra(Short *blk, UChar *rec, Int lx); void idct_row0xCCIntra(Short *blk, UChar *rec, Int lx); void idct_rowzmv(Short *srce, UChar *rec, UChar *prev, Int lx); void idct_row0zmv(Short *blk, UChar *rec, UChar *prev, Int lx); void idct_row1zmv(Short *blk, UChar *rec, UChar *prev, Int lx); void idct_row2zmv(Short *blk, UChar *rec, UChar *prev, Int lx); void idct_row3zmv(Short *blk, UChar *rec, UChar *prev, Int lx); void idct_row4zmv(Short *blk, UChar *rec, UChar *prev, Int lx); void idct_row0x40zmv(Short *blk, UChar *rec, UChar *prev, Int lx); void idct_row0x20zmv(Short *blk, UChar *rec, UChar *prev, Int lx); void idct_row0x10zmv(Short *blk, UChar *rec, UChar *prev, Int lx); void idct_row0xCCzmv(Short *blk, UChar *rec, UChar *prev, Int lx); #ifdef __cplusplus } #endif /* Look-up table mapping to RIDCT from bitmap */ #ifdef SMALL_DCT static void (*const idctcolVCA[16])(Short*) = { &idct_col0, &idct_col4, &idct_col3, &idct_col4, &idct_col2, &idct_col4, &idct_col3, &idct_col4, &idct_col1, &idct_col4, &idct_col3, &idct_col4, &idct_col2, &idct_col4, &idct_col3, &idct_col4 }; static void (*const idctrowVCAInter[16])(Short*, UChar*, Int) = { &idct_row0Inter, &idct_row4Inter, &idct_row3Inter, &idct_row4Inter, &idct_row2Inter, &idct_row4Inter, &idct_row3Inter, &idct_row4Inter, &idct_row1Inter, &idct_row4Inter, &idct_row3Inter, &idct_row4Inter, &idct_row2Inter, &idct_row4Inter, &idct_row3Inter, &idct_row4Inter }; static void (*const idctrowVCAzmv[16])(Short*, UChar*, UChar*, Int) = { &idct_row0zmv, &idct_row4zmv, &idct_row3zmv, &idct_row4zmv, &idct_row2zmv, &idct_row4zmv, &idct_row3zmv, &idct_row4zmv, &idct_row1zmv, &idct_row4zmv, &idct_row3zmv, &idct_row4zmv, &idct_row2zmv, &idct_row4zmv, &idct_row3zmv, &idct_row4zmv }; static void (*const idctrowVCAIntra[16])(Short*, UChar*, Int) = { &idct_row0Intra, &idct_row4Intra, &idct_row3Intra, &idct_row4Intra, &idct_row2Intra, &idct_row4Intra, &idct_row3Intra, &idct_row4Intra, &idct_row1Intra, &idct_row4Intra, &idct_row3Intra, &idct_row4Intra, &idct_row2Intra, &idct_row4Intra, &idct_row3Intra, &idct_row4Intra }; #else /* SMALL_DCT */ static void (*const idctcolVCA[16])(Short*) = { &idct_col0, &idct_col0x10, &idct_col0x20, &idct_col4, &idct_col0x40, &idct_col4, &idct_col3, &idct_col4, &idct_col1, &idct_col4, &idct_col3, &idct_col4, &idct_col2, &idct_col4, &idct_col3, &idct_col4 }; static void (*const idctrowVCAInter[16])(Short*, UChar*, Int) = { &idct_row0Inter, &idct_row0x10Inter, &idct_row0x20Inter, &idct_row4Inter, &idct_row0x40Inter, &idct_row4Inter, &idct_row3Inter, &idct_row4Inter, &idct_row1Inter, &idct_row4Inter, &idct_row3Inter, &idct_row4Inter, &idct_row2Inter, &idct_row4Inter, &idct_row3Inter, &idct_row4Inter }; static void (*const idctrowVCAzmv[16])(Short*, UChar*, UChar*, Int) = { &idct_row0zmv, &idct_row0x10zmv, &idct_row0x20zmv, &idct_row4zmv, &idct_row0x40zmv, &idct_row4zmv, &idct_row3zmv, &idct_row4zmv, &idct_row1zmv, &idct_row4zmv, &idct_row3zmv, &idct_row4zmv, &idct_row2zmv, &idct_row4zmv, &idct_row3zmv, &idct_row4zmv }; static void (*const idctrowVCAIntra[16])(Short*, UChar*, Int) = { &idct_row0Intra, &idct_row0x10Intra, &idct_row0x20Intra, &idct_row4Intra, &idct_row0x40Intra, &idct_row4Intra, &idct_row3Intra, &idct_row4Intra, &idct_row1Intra, &idct_row4Intra, &idct_row3Intra, &idct_row4Intra, &idct_row2Intra, &idct_row4Intra, &idct_row3Intra, &idct_row4Intra }; #endif /* SMALL_DCT */ #ifdef __cplusplus extern "C" { #endif /* part in AppVCA_dct.c */ //void Block1x1DCTzmv (Short *out,UChar *prev,UChar *cur,UChar *rec,Int lx,Int chroma); void Block1x1DCTwSub(Short *out, UChar *cur, UChar *prev, Int pitch_chroma); void Block1x1DCTIntra(Short *out, UChar *cur, UChar *dummy1, Int pitch_chroma); /* This part is in dct_aan.c */ Void BlockDCT_AANwSub(Short *out, UChar *cur, UChar *prev, Int pitch_chroma); Void Block4x4DCT_AANwSub(Short *out, UChar *cur, UChar *prev, Int pitch_chroma); Void Block2x2DCT_AANwSub(Short *out, UChar *cur, UChar *prev, Int pitch_chroma); //Void BlockDCT_AANzmv(Short *out,UChar *prev,UChar *cur,UChar *rec,Int ColTh,Int lx,Int chroma); //Void Block4x4DCT_AANzmv(Short *out,UChar *prev,UChar *cur,UChar *rec,Int ColTh,Int lx,Int chroma); //Void Block2x2DCT_AANzmv(Short *out,UChar *prev,UChar *cur,UChar *rec,Int ColTh,Int lx,Int chroma); Void BlockDCT_AANIntra(Short *out, UChar *cur, UChar *dummy1, Int pitch_chroma); Void Block4x4DCT_AANIntra(Short *out, UChar *cur, UChar *dummy1, Int pitch_chroma); Void Block2x2DCT_AANIntra(Short *out, UChar *cur, UChar *dummy1, Int pitch_chroma); #ifdef __cplusplus } #endif #endif //_DCT_H_ ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/dct_inline.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /* Filename: dct_inline.h */ /* Description: Implementation for in-line functions used in dct.cpp */ /* Modified: */ /*********************************************************************************/ #ifndef _DCT_INLINE_H_ #define _DCT_INLINE_H_ #if !defined(PV_ARM_GCC_V5) #include "oscl_base_macros.h" __inline int32 mla724(int32 op1, int32 op2, int32 op3) { int32 out; OSCL_UNUSED_ARG(op1); out = op2 * 724 + op3; /* op1 is not used here */ return out; } __inline int32 mla392(int32 k0, int32 k14, int32 round) { int32 k1; OSCL_UNUSED_ARG(k14); k1 = k0 * 392 + round; return k1; } __inline int32 mla554(int32 k4, int32 k12, int32 k1) { int32 k0; OSCL_UNUSED_ARG(k12); k0 = k4 * 554 + k1; return k0; } __inline int32 mla1338(int32 k6, int32 k14, int32 k1) { int32 out; OSCL_UNUSED_ARG(k14); out = k6 * 1338 + k1; return out; } __inline int32 mla946(int32 k6, int32 k14, int32 k1) { int32 out; OSCL_UNUSED_ARG(k14); out = k6 * 946 + k1; return out; } __inline int32 sum_abs(int32 k0, int32 k1, int32 k2, int32 k3, int32 k4, int32 k5, int32 k6, int32 k7) { int32 carry, abs_sum; carry = k0 >> 31; abs_sum = (k0 ^ carry); carry = k1 >> 31; abs_sum += (k1 ^ carry) - carry; carry = k2 >> 31; abs_sum += (k2 ^ carry) - carry; carry = k3 >> 31; abs_sum += (k3 ^ carry) - carry; carry = k4 >> 31; abs_sum += (k4 ^ carry) - carry; carry = k5 >> 31; abs_sum += (k5 ^ carry) - carry; carry = k6 >> 31; abs_sum += (k6 ^ carry) - carry; carry = k7 >> 31; abs_sum += (k7 ^ carry) - carry; return abs_sum; } #elif defined(__CC_ARM) /* only work with arm v5 */ #if defined(__TARGET_ARCH_5TE) __inline int32 mla724(int32 op1, int32 op2, int32 op3) { int32 out; __asm { smlabb out, op1, op2, op3 } return out; } __inline int32 mla392(int32 k0, int32 k14, int32 round) { int32 k1; __asm { smlabt k1, k0, k14, round } return k1; } __inline int32 mla554(int32 k4, int32 k12, int32 k1) { int32 k0; __asm { smlabt k0, k4, k12, k1 } return k0; } __inline int32 mla1338(int32 k6, int32 k14, int32 k1) { int32 out; __asm { smlabb out, k6, k14, k1 } return out; } __inline int32 mla946(int32 k6, int32 k14, int32 k1) { int32 out; __asm { smlabb out, k6, k14, k1 } return out; } #else // not ARM5TE __inline int32 mla724(int32 op1, int32 op2, int32 op3) { int32 out; __asm { and out, op2, #0xFFFF mla out, op1, out, op3 } return out; } __inline int32 mla392(int32 k0, int32 k14, int32 round) { int32 k1; __asm { mov k1, k14, asr #16 mla k1, k0, k1, round } return k1; } __inline int32 mla554(int32 k4, int32 k12, int32 k1) { int32 k0; __asm { mov k0, k12, asr #16 mla k0, k4, k0, k1 } return k0; } __inline int32 mla1338(int32 k6, int32 k14, int32 k1) { int32 out; __asm { and out, k14, 0xFFFF mla out, k6, out, k1 } return out; } __inline int32 mla946(int32 k6, int32 k14, int32 k1) { int32 out; __asm { and out, k14, 0xFFFF mla out, k6, out, k1 } return out; } #endif __inline int32 sum_abs(int32 k0, int32 k1, int32 k2, int32 k3, int32 k4, int32 k5, int32 k6, int32 k7) { int32 carry, abs_sum; __asm { eor carry, k0, k0, asr #31 ; eors abs_sum, k1, k1, asr #31 ; adc abs_sum, abs_sum, carry ; eors carry, k2, k2, asr #31 ; adc abs_sum, abs_sum, carry ; eors carry, k3, k3, asr #31 ; adc abs_sum, abs_sum, carry ; eors carry, k4, k4, asr #31 ; adc abs_sum, abs_sum, carry ; eors carry, k5, k5, asr #31 ; adc abs_sum, abs_sum, carry ; eors carry, k6, k6, asr #31 ; adc abs_sum, abs_sum, carry ; eors carry, k7, k7, asr #31 ; adc abs_sum, abs_sum, carry ; } return abs_sum; } #elif defined(PV_ARM_GCC_V5) /* ARM GNU COMPILER */ __inline int32 mla724(int32 op1, int32 op2, int32 op3) { register int32 out; register int32 aa = (int32)op1; register int32 bb = (int32)op2; register int32 cc = (int32)op3; asm volatile("smlabb %0, %1, %2, %3" : "=&r"(out) : "r"(aa), "r"(bb), "r"(cc)); return out; } __inline int32 mla392(int32 k0, int32 k14, int32 round) { register int32 out; register int32 aa = (int32)k0; register int32 bb = (int32)k14; register int32 cc = (int32)round; asm volatile("smlabt %0, %1, %2, %3" : "=&r"(out) : "r"(aa), "r"(bb), "r"(cc)); return out; } __inline int32 mla554(int32 k4, int32 k12, int32 k1) { register int32 out; register int32 aa = (int32)k4; register int32 bb = (int32)k12; register int32 cc = (int32)k1; asm volatile("smlabt %0, %1, %2, %3" : "=&r"(out) : "r"(aa), "r"(bb), "r"(cc)); return out; } __inline int32 mla1338(int32 k6, int32 k14, int32 k1) { register int32 out; register int32 aa = (int32)k6; register int32 bb = (int32)k14; register int32 cc = (int32)k1; asm volatile("smlabb %0, %1, %2, %3" : "=&r"(out) : "r"(aa), "r"(bb), "r"(cc)); return out; } __inline int32 mla946(int32 k6, int32 k14, int32 k1) { register int32 out; register int32 aa = (int32)k6; register int32 bb = (int32)k14; register int32 cc = (int32)k1; asm volatile("smlabb %0, %1, %2, %3" : "=&r"(out) : "r"(aa), "r"(bb), "r"(cc)); return out; } __inline int32 sum_abs(int32 k0, int32 k1, int32 k2, int32 k3, int32 k4, int32 k5, int32 k6, int32 k7) { register int32 carry; register int32 abs_sum; register int32 aa = (int32)k0; register int32 bb = (int32)k1; register int32 cc = (int32)k2; register int32 dd = (int32)k3; register int32 ee = (int32)k4; register int32 ff = (int32)k5; register int32 gg = (int32)k6; register int32 hh = (int32)k7; asm volatile("eor %0, %2, %2, asr #31\n\t" "eors %1, %3, %3, asr #31\n\t" "adc %1, %1, %0\n\t" "eors %0, %4, %4, asr #31\n\t" "adc %1, %1, %0\n\t" "eors %0, %5, %5, asr #31\n\t" "adc %1, %1, %0\n\t" "eors %0, %6, %6, asr #31\n\t" "adc %1, %1, %0\n\t" "eors %0, %7, %7, asr #31\n\t" "adc %1, %1, %0\n\t" "eors %0, %8, %8, asr #31\n\t" "adc %1, %1, %0\n\t" "eors %0, %9, %9, asr #31\n\t" "adc %1, %1, %0\n\t" : "=&r"(carry), "=&r"(abs_sum): "r"(aa), "r"(bb), "r"(cc), "r"(dd), "r"(ee), "r"(ff), "r"(gg), "r"(hh)); return abs_sum; } #endif // Diff. OS #endif //_DCT_INLINE_H_ ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/fastcodemb.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "oscl_base_macros.h" // for OSCL_UNUSED_ARG #include "mp4def.h" #include "mp4lib_int.h" #include "mp4enc_lib.h" #include "dct.h" #include "m4venc_oscl.h" /* ======================================================================== */ /* Function : CodeMB_H263( ) */ /* Date : 8/15/2001 */ /* Purpose : Perform residue calc (only zero MV), DCT, H263 Quant/Dequant,*/ /* IDCT and motion compensation.Modified from FastCodeMB() */ /* Input : */ /* video Video encoder data structure */ /* function Approximate DCT function, scaling and threshold */ /* ncoefblck Array for last nonzero coeff for speedup in VlcEncode */ /* QP Combined offset from the origin to the current */ /* macroblock and QP for current MB. */ /* Output : */ /* video->outputMB Quantized DCT coefficients. */ /* currVop->yChan,uChan,vChan Reconstructed pixels */ /* */ /* Return : PV_STATUS */ /* Modified : */ /* 2/26/01 -modified threshold based on correlation coeff 0.75 only for mode H.263 -ncoefblck[] as input, to keep position of last non-zero coeff*/ /* 8/10/01 -modified threshold based on correlation coeff 0.5 -used column threshold to speedup column DCT. -used bitmap zigzag to speedup RunLevel(). */ /* ======================================================================== */ PV_STATUS CodeMB_H263(VideoEncData *video, approxDCT *function, Int QP, Int ncoefblck[]) { Int sad, k, CBP, mbnum = video->mbnum; Short *output, *dataBlock; UChar Mode = video->headerInfo.Mode[mbnum]; UChar *bitmapcol, *bitmaprow = video->bitmaprow; UInt *bitmapzz ; UChar shortHeader = video->vol[video->currLayer]->shortVideoHeader; Int dc_scaler = 8; Int intra = (Mode == MODE_INTRA || Mode == MODE_INTRA_Q); struct QPstruct QuantParam; Int dctMode, DctTh1; Int ColTh; Int(*BlockQuantDequantH263)(Short *, Short *, struct QPstruct *, UChar[], UChar *, UInt *, Int, Int, Int, UChar); Int(*BlockQuantDequantH263DC)(Short *, Short *, struct QPstruct *, UChar *, UInt *, Int, UChar); void (*BlockDCT1x1)(Short *, UChar *, UChar *, Int); void (*BlockDCT2x2)(Short *, UChar *, UChar *, Int); void (*BlockDCT4x4)(Short *, UChar *, UChar *, Int); void (*BlockDCT8x8)(Short *, UChar *, UChar *, Int); /* motion comp. related var. */ Vop *currVop = video->currVop; VideoEncFrameIO *inputFrame = video->input; Int ind_x = video->outputMB->mb_x; Int ind_y = video->outputMB->mb_y; Int lx = currVop->pitch; Int width = currVop->width; UChar *rec, *input, *pred; Int offset = QP >> 5; /* QP is combined offset and QP */ Int offsetc = (offset >> 2) + (ind_x << 2); /* offset for chrom */ /*****************************/ OSCL_UNUSED_ARG(function); output = video->outputMB->block[0]; CBP = 0; QP = QP & 0x1F; // M4VENC_MEMSET(output,0,(sizeof(Short)<<6)*6); /* reset quantized coeff. to zero , 7/24/01*/ QuantParam.QPx2 = QP << 1; QuantParam.QP = QP; QuantParam.QPdiv2 = QP >> 1; QuantParam.QPx2plus = QuantParam.QPx2 + QuantParam.QPdiv2; QuantParam.Addition = QP - 1 + (QP & 0x1); if (intra) { BlockDCT1x1 = &Block1x1DCTIntra; BlockDCT2x2 = &Block2x2DCT_AANIntra; BlockDCT4x4 = &Block4x4DCT_AANIntra; BlockDCT8x8 = &BlockDCT_AANIntra; BlockQuantDequantH263 = &BlockQuantDequantH263Intra; BlockQuantDequantH263DC = &BlockQuantDequantH263DCIntra; if (shortHeader) { dc_scaler = 8; } else { dc_scaler = cal_dc_scalerENC(QP, 1); /* luminance blocks */ } DctTh1 = (Int)(dc_scaler * 3);//*1.829 ColTh = ColThIntra[QP]; } else { BlockDCT1x1 = &Block1x1DCTwSub; BlockDCT2x2 = &Block2x2DCT_AANwSub; BlockDCT4x4 = &Block4x4DCT_AANwSub; BlockDCT8x8 = &BlockDCT_AANwSub; BlockQuantDequantH263 = &BlockQuantDequantH263Inter; BlockQuantDequantH263DC = &BlockQuantDequantH263DCInter; ColTh = ColThInter[QP]; DctTh1 = (Int)(16 * QP); //9*QP; } rec = currVop->yChan + offset; input = inputFrame->yChan + offset; if (lx != width) input -= (ind_y << 9); /* non-padded offset */ dataBlock = video->dataBlock; pred = video->predictedMB; for (k = 0; k < 6; k++) { CBP <<= 1; bitmapcol = video->bitmapcol[k]; bitmapzz = video->bitmapzz[k]; /* 7/30/01 */ if (k < 4) { sad = video->mot[mbnum][k+1].sad; if (k&1) { rec += 8; input += 8; } else if (k == 2) { dctMode = ((width << 3) - 8); input += dctMode; dctMode = ((lx << 3) - 8); rec += dctMode; } } else { if (k == 4) { rec = currVop->uChan + offsetc; input = inputFrame->uChan + offsetc; if (lx != width) input -= (ind_y << 7); lx >>= 1; width >>= 1; if (intra) { sad = getBlockSum(input, width); if (shortHeader) dc_scaler = 8; else { dc_scaler = cal_dc_scalerENC(QP, 2); /* chrominance blocks */ } DctTh1 = (Int)(dc_scaler * 3);//*1.829 } else sad = Sad8x8(input, pred, width); } else { rec = currVop->vChan + offsetc; input = inputFrame->vChan + offsetc; if (lx != width) input -= (ind_y << 7); if (intra) { sad = getBlockSum(input, width); } else sad = Sad8x8(input, pred, width); } } if (sad < DctTh1 && !(shortHeader && intra)) /* all-zero */ { /* For shortHeader intra block, DC value cannot be zero */ dctMode = 0; CBP |= 0; ncoefblck[k] = 0; } else if (sad < 18*QP/*(QP<<4)*/) /* DC-only */ { dctMode = 1; BlockDCT1x1(dataBlock, input, pred, width); CBP |= (*BlockQuantDequantH263DC)(dataBlock, output, &QuantParam, bitmaprow + k, bitmapzz, dc_scaler, shortHeader); ncoefblck[k] = 1; } else { dataBlock[64] = ColTh; if (sad < 22*QP/*(QP<<4)+(QP<<1)*/) /* 2x2 DCT */ { dctMode = 2; BlockDCT2x2(dataBlock, input, pred, width); ncoefblck[k] = 6; } else if (sad < (QP << 5)) /* 4x4 DCT */ { dctMode = 4; BlockDCT4x4(dataBlock, input, pred, width); ncoefblck[k] = 26; } else /* Full-DCT */ { dctMode = 8; BlockDCT8x8(dataBlock, input, pred, width); ncoefblck[k] = 64; } CBP |= (*BlockQuantDequantH263)(dataBlock, output, &QuantParam, bitmapcol, bitmaprow + k, bitmapzz, dctMode, k, dc_scaler, shortHeader); } BlockIDCTMotionComp(dataBlock, bitmapcol, bitmaprow[k], dctMode, rec, pred, (lx << 1) | intra); output += 64; if (!(k&1)) { pred += 8; } else { pred += 120; } } video->headerInfo.CBP[mbnum] = CBP; /* 5/18/2001 */ return PV_SUCCESS; } #ifndef NO_MPEG_QUANT /* ======================================================================== */ /* Function : CodeMB_MPEG( ) */ /* Date : 8/15/2001 */ /* Purpose : Perform residue calc (only zero MV), DCT, MPEG Quant/Dequant,*/ /* IDCT and motion compensation.Modified from FastCodeMB() */ /* Input : */ /* video Video encoder data structure */ /* function Approximate DCT function, scaling and threshold */ /* ncoefblck Array for last nonzero coeff for speedup in VlcEncode */ /* QP Combined offset from the origin to the current */ /* macroblock and QP for current MB. */ /* Output : */ /* video->outputMB Quantized DCT coefficients. */ /* currVop->yChan,uChan,vChan Reconstructed pixels */ /* */ /* Return : PV_STATUS */ /* Modified : */ /* 2/26/01 -modified threshold based on correlation coeff 0.75 only for mode H.263 -ncoefblck[] as input, keep position of last non-zero coeff*/ /* 8/10/01 -modified threshold based on correlation coeff 0.5 -used column threshold to speedup column DCT. -used bitmap zigzag to speedup RunLevel(). */ /* ======================================================================== */ PV_STATUS CodeMB_MPEG(VideoEncData *video, approxDCT *function, Int QP, Int ncoefblck[]) { Int sad, k, CBP, mbnum = video->mbnum; Short *output, *dataBlock; UChar Mode = video->headerInfo.Mode[mbnum]; UChar *bitmapcol, *bitmaprow = video->bitmaprow; UInt *bitmapzz ; Int dc_scaler = 8; Vol *currVol = video->vol[video->currLayer]; Int intra = (Mode == MODE_INTRA || Mode == MODE_INTRA_Q); Int *qmat; Int dctMode, DctTh1, DctTh2, DctTh3, DctTh4; Int ColTh; Int(*BlockQuantDequantMPEG)(Short *, Short *, Int, Int *, UChar [], UChar *, UInt *, Int, Int, Int); Int(*BlockQuantDequantMPEGDC)(Short *, Short *, Int, Int *, UChar [], UChar *, UInt *, Int); void (*BlockDCT1x1)(Short *, UChar *, UChar *, Int); void (*BlockDCT2x2)(Short *, UChar *, UChar *, Int); void (*BlockDCT4x4)(Short *, UChar *, UChar *, Int); void (*BlockDCT8x8)(Short *, UChar *, UChar *, Int); /* motion comp. related var. */ Vop *currVop = video->currVop; VideoEncFrameIO *inputFrame = video->input; Int ind_x = video->outputMB->mb_x; Int ind_y = video->outputMB->mb_y; Int lx = currVop->pitch; Int width = currVop->width; UChar *rec, *input, *pred; Int offset = QP >> 5; Int offsetc = (offset >> 2) + (ind_x << 2); /* offset for chrom */ /*****************************/ OSCL_UNUSED_ARG(function); output = video->outputMB->block[0]; CBP = 0; QP = QP & 0x1F; // M4VENC_MEMSET(output,0,(sizeof(Short)<<6)*6); /* reset quantized coeff. to zero , 7/24/01*/ if (intra) { BlockDCT1x1 = &Block1x1DCTIntra; BlockDCT2x2 = &Block2x2DCT_AANIntra; BlockDCT4x4 = &Block4x4DCT_AANIntra; BlockDCT8x8 = &BlockDCT_AANIntra; BlockQuantDequantMPEG = &BlockQuantDequantMPEGIntra; BlockQuantDequantMPEGDC = &BlockQuantDequantMPEGDCIntra; dc_scaler = cal_dc_scalerENC(QP, 1); /* luminance blocks */ qmat = currVol->iqmat; DctTh1 = (Int)(3 * dc_scaler);//2*dc_scaler); DctTh2 = (Int)((1.25 * QP - 1) * qmat[1] * 0.45);//0.567);//0.567); DctTh3 = (Int)((1.25 * QP - 1) * qmat[2] * 0.55);//1.162); /* 8/2/2001 */ DctTh4 = (Int)((1.25 * QP - 1) * qmat[32] * 0.8);//1.7583);//0.7942); ColTh = ColThIntra[QP]; } else { BlockDCT1x1 = &Block1x1DCTwSub; BlockDCT2x2 = &Block2x2DCT_AANwSub; BlockDCT4x4 = &Block4x4DCT_AANwSub; BlockDCT8x8 = &BlockDCT_AANwSub; BlockQuantDequantMPEG = &BlockQuantDequantMPEGInter; BlockQuantDequantMPEGDC = &BlockQuantDequantMPEGDCInter; qmat = currVol->niqmat; DctTh1 = (Int)(((QP << 1) - 0.5) * qmat[0] * 0.4);//0.2286);//0.3062); DctTh2 = (Int)(((QP << 1) - 0.5) * qmat[1] * 0.45);//0.567);//0.4); DctTh3 = (Int)(((QP << 1) - 0.5) * qmat[2] * 0.55);//1.162); /* 8/2/2001 */ DctTh4 = (Int)(((QP << 1) - 0.5) * qmat[32] * 0.8);//1.7583);//0.7942); ColTh = ColThInter[QP]; }// get qmat, DctTh1, DctTh2, DctTh3 rec = currVop->yChan + offset; input = inputFrame->yChan + offset; if (lx != width) input -= (ind_y << 9); /* non-padded offset */ dataBlock = video->dataBlock; pred = video->predictedMB; for (k = 0; k < 6; k++) { CBP <<= 1; bitmapcol = video->bitmapcol[k]; bitmapzz = video->bitmapzz[k]; /* 8/2/01 */ if (k < 4) {//Y block sad = video->mot[mbnum][k+1].sad; if (k&1) { rec += 8; input += 8; } else if (k == 2) { dctMode = ((width << 3) - 8); input += dctMode; dctMode = ((lx << 3) - 8); rec += dctMode; } } else {// U, V block if (k == 4) { rec = currVop->uChan + offsetc; input = inputFrame->uChan + offsetc; if (lx != width) input -= (ind_y << 7); lx >>= 1; width >>= 1; if (intra) { dc_scaler = cal_dc_scalerENC(QP, 2); /* luminance blocks */ DctTh1 = dc_scaler * 3; sad = getBlockSum(input, width); } else sad = Sad8x8(input, pred, width); } else { rec = currVop->vChan + offsetc; input = inputFrame->vChan + offsetc; if (lx != width) input -= (ind_y << 7); if (intra) sad = getBlockSum(input, width); else sad = Sad8x8(input, pred, width); } } if (sad < DctTh1) /* all-zero */ { dctMode = 0; CBP |= 0; ncoefblck[k] = 0; } else if (sad < DctTh2) /* DC-only */ { dctMode = 1; BlockDCT1x1(dataBlock, input, pred, width); CBP |= (*BlockQuantDequantMPEGDC)(dataBlock, output, QP, qmat, bitmapcol, bitmaprow + k, bitmapzz, dc_scaler); ncoefblck[k] = 1; } else { dataBlock[64] = ColTh; if (sad < DctTh3) /* 2x2-DCT */ { dctMode = 2; BlockDCT2x2(dataBlock, input, pred, width); ncoefblck[k] = 6; } else if (sad < DctTh4) /* 4x4 DCT */ { dctMode = 4; BlockDCT4x4(dataBlock, input, pred, width); ncoefblck[k] = 26; } else /* full-DCT */ { dctMode = 8; BlockDCT8x8(dataBlock, input, pred, width); ncoefblck[k] = 64; } CBP |= (*BlockQuantDequantMPEG)(dataBlock, output, QP, qmat, bitmapcol, bitmaprow + k, bitmapzz, dctMode, k, dc_scaler); // } dctMode = 8; /* for mismatch handle */ BlockIDCTMotionComp(dataBlock, bitmapcol, bitmaprow[k], dctMode, rec, pred, (lx << 1) | (intra)); output += 64; if (!(k&1)) { pred += 8; } else { pred += 120; } } video->headerInfo.CBP[mbnum] = CBP; /* 5/18/2001 */ return PV_SUCCESS; } #endif /* ======================================================================== */ /* Function : getBlockSAV( ) */ /* Date : 8/10/2000 */ /* Purpose : Get SAV for one block */ /* In/out : block[64] contain one block data */ /* Return : */ /* Modified : */ /* ======================================================================== */ /* can be written in MMX or SSE, 2/22/2001 */ Int getBlockSAV(Short block[]) { Int i, val, sav = 0; i = 8; while (i--) { val = *block++; if (val > 0) sav += val; else sav -= val; val = *block++; if (val > 0) sav += val; else sav -= val; val = *block++; if (val > 0) sav += val; else sav -= val; val = *block++; if (val > 0) sav += val; else sav -= val; val = *block++; if (val > 0) sav += val; else sav -= val; val = *block++; if (val > 0) sav += val; else sav -= val; val = *block++; if (val > 0) sav += val; else sav -= val; val = *block++; if (val > 0) sav += val; else sav -= val; } return sav; } /* ======================================================================== */ /* Function : Sad8x8( ) */ /* Date : 8/10/2000 */ /* Purpose : Find SAD between prev block and current block */ /* In/out : Previous and current frame block pointers, and frame width */ /* Return : */ /* Modified : */ /* 8/15/01, - do 4 pixel at a time assuming 32 bit register */ /* ======================================================================== */ Int Sad8x8(UChar *cur, UChar *prev, Int width) { UChar *end = cur + (width << 3); Int sad = 0; Int *curInt = (Int*) cur; Int *prevInt = (Int*) prev; Int cur1, cur2, prev1, prev2; UInt mask, sgn_msk = 0x80808080; Int sum2 = 0, sum4 = 0; Int tmp; do { mask = ~(0xFF00); cur1 = curInt[1]; /* load cur[4..7] */ cur2 = curInt[0]; curInt += (width >> 2); /* load cur[0..3] and +=lx */ prev1 = prevInt[1]; prev2 = prevInt[0]; prevInt += 4; tmp = prev2 ^ cur2; cur2 = prev2 - cur2; tmp = tmp ^ cur2; /* (^)^(-) last bit is one if carry */ tmp = sgn_msk & ((UInt)tmp >> 1); /* check the sign of each byte */ if (cur2 < 0) tmp = tmp | 0x80000000; /* corcurt sign of first byte */ tmp = (tmp << 8) - tmp; /* carry borrowed bytes are marked with 0x1FE */ cur2 = cur2 + (tmp >> 7); /* negative bytes is added with 0xFF, -1 */ cur2 = cur2 ^(tmp >> 7); /* take absolute by inverting bits (EOR) */ tmp = prev1 ^ cur1; cur1 = prev1 - cur1; tmp = tmp ^ cur1; /* (^)^(-) last bit is one if carry */ tmp = sgn_msk & ((UInt)tmp >> 1); /* check the sign of each byte */ if (cur1 < 0) tmp = tmp | 0x80000000; /* corcurt sign of first byte */ tmp = (tmp << 8) - tmp; /* carry borrowed bytes are marked with 0x1FE */ cur1 = cur1 + (tmp >> 7); /* negative bytes is added with 0xFF, -1 */ cur1 = cur1 ^(tmp >> 7); /* take absolute by inverting bits (EOR) */ sum4 = sum4 + cur1; cur1 = cur1 & (mask << 8); /* mask first and third bytes */ sum2 = sum2 + ((UInt)cur1 >> 8); sum4 = sum4 + cur2; cur2 = cur2 & (mask << 8); /* mask first and third bytes */ sum2 = sum2 + ((UInt)cur2 >> 8); } while ((UInt)curInt < (UInt)end); cur1 = sum4 - (sum2 << 8); /* get even-sum */ cur1 = cur1 + sum2; /* add 16 bit even-sum and odd-sum*/ cur1 = cur1 + (cur1 << 16); /* add upper and lower 16 bit sum */ sad = ((UInt)cur1 >> 16); /* take upper 16 bit */ return sad; } /* ======================================================================== */ /* Function : getBlockSum( ) */ /* Date : 8/10/2000 */ /* Purpose : Find summation of value within a block. */ /* In/out : Pointer to current block in a frame and frame width */ /* Return : */ /* Modified : */ /* 8/15/01, - SIMD 4 pixels at a time */ /* ======================================================================== */ Int getBlockSum(UChar *cur, Int width) { Int sad = 0, sum4 = 0, sum2 = 0; UChar *end = cur + (width << 3); Int *curInt = (Int*)cur; UInt mask = ~(0xFF00); Int load1, load2; do { load1 = curInt[1]; load2 = curInt[0]; curInt += (width >> 2); sum4 += load1; load1 = load1 & (mask << 8); /* even bytes */ sum2 += ((UInt)load1 >> 8); /* sum even bytes, 16 bit */ sum4 += load2; load2 = load2 & (mask << 8); /* even bytes */ sum2 += ((UInt)load2 >> 8); /* sum even bytes, 16 bit */ } while ((UInt)curInt < (UInt)end); load1 = sum4 - (sum2 << 8); /* get even-sum */ load1 = load1 + sum2; /* add 16 bit even-sum and odd-sum*/ load1 = load1 + (load1 << 16); /* add upper and lower 16 bit sum */ sad = ((UInt)load1 >> 16); /* take upper 16 bit */ return sad; } ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/fastcodemb.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /* ===================================================================== */ /* File: FastCodeMB.h */ /* Description: This file contains structure and function prototypes used in FastCodeMB() function. When it is decided to use FastCodeMB instead of CodeMB, all of this prototypes should be migrated to mp4enc_lib.h. */ /* Rev: */ /* Created: 8/14/01 */ /* //////////////////////////////////////////////////////////////////////// */ typedef struct struct_approxDCT approxDCT; struct struct_approxDCT { const Int *scale; Int(*DCT)(Int block[ ], Int coeff[ ], approxDCT *); // Threshold value for H.263 Quantizer Int th_app_all[8]; Int th_app_odd[8]; Int th_app_even[8]; Int th_app_even1[8]; Int th_app_even2[8]; }; struct QPstruct { Int QPx2 ; Int QP; Int QPdiv2; Int QPx2plus; Int Addition; }; /*---- FastCodeMB.c -----*/ void initCodeMB(approxDCT *function, Int QP); PV_STATUS CodeMB_H263(VideoEncData *video, approxDCT *function, Int QP, Int ncoefblck[], Int offset); PV_STATUS CodeMB_MPEG(VideoEncData *video, approxDCT *function, Int QP, Int ncoefblck[], Int offset); Int getBlockSAV(Int block[]); Int Sad8x8(UChar *rec, UChar *prev, Int lx); Int getBlockSum(UChar *rec, Int lx); /*---- AppVCA_dct.c -----*/ Int AppVCA1_dct(Int block[], Int out[ ], approxDCT *function); Int AppVCA2_dct(Int block[], Int out[ ], approxDCT *function); Int AppVCA3_dct(Int block[], Int out[ ], approxDCT *function); Int AppVCA4_dct(Int block[], Int out[ ], approxDCT *function); Int AppVCA5_dct(Int block[], Int out[ ], approxDCT *function); /*---- FastQuant.c -----*/ Int cal_dc_scalerENC(Int QP, Int type) ; Int BlockQuantDequantH263Inter(Int *rcoeff, Int *qcoeff, struct QPstruct *QuantParam, UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz, Int dctMode, Int comp, Int dummy); Int BlockQuantDequantH263Intra(Int *rcoeff, Int *qcoeff, struct QPstruct *QuantParam, UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz, Int dctMode, Int comp, Int dc_scaler); Int BlockQuantDequantH263DCInter(Int *rcoeff, Int *qcoeff, struct QPstruct *QuantParam, UChar *bitmaprow, UInt *bitmapzz, Int dummy); Int BlockQuantDequantH263DCIntra(Int *rcoeff, Int *qcoeff, struct QPstruct *QuantParam, UChar *bitmaprow, UInt *bitmapzz, Int dc_scaler); Int BlockQuantDequantMPEGInter(Int *rcoeff, Int *qcoeff, Int QP, Int *qmat, UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz, Int DctMode, Int comp, Int dc_scaler); Int BlockQuantDequantMPEGIntra(Int *rcoeff, Int *qcoeff, Int QP, Int *qmat, UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz, Int DctMode, Int comp, Int dc_scaler); Int BlockQuantDequantMPEGDCInter(Int *rcoeff, Int *qcoeff, Int QP, Int *qmat, UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz, Int dummy); Int BlockQuantDequantMPEGDCIntra(Int *rcoeff, Int *qcoeff, Int QP, Int *qmat, UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz, Int dc_scaler); /*---- FastIDCT.c -----*/ void BlockIDCTMotionComp(Int *block, UChar *bitmapcol, UChar bitmaprow, Int dctMode, UChar *rec, Int lx, Int intra); /*---- motion_comp.c -----*/ void PutSkippedBlock(UChar *rec, UChar *prev, Int lx); ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/fastidct.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "oscl_base_macros.h" // for OSCL_UNUSED_ARG #include "mp4def.h" #include "mp4enc_lib.h" #include "mp4lib_int.h" #include "dct.h" #define ADD_CLIP { \ tmp = *rec + tmp; \ if((UInt)tmp > mask) tmp = mask&(~(tmp>>31)); \ *rec++ = tmp; \ } #define INTRA_CLIP { \ if((UInt)tmp > mask) tmp = mask&(~(tmp>>31)); \ *rec++ = tmp; \ } #define CLIP_RESULT(x) if((UInt)x > 0xFF){x = 0xFF & (~(x>>31));} #define ADD_AND_CLIP1(x) x += (pred_word&0xFF); CLIP_RESULT(x); #define ADD_AND_CLIP2(x) x += ((pred_word>>8)&0xFF); CLIP_RESULT(x); #define ADD_AND_CLIP3(x) x += ((pred_word>>16)&0xFF); CLIP_RESULT(x); #define ADD_AND_CLIP4(x) x += ((pred_word>>24)&0xFF); CLIP_RESULT(x); void idct_col0(Short *blk) { OSCL_UNUSED_ARG(blk); return; } void idct_col1(Short *blk) { blk[0] = blk[8] = blk[16] = blk[24] = blk[32] = blk[40] = blk[48] = blk[56] = blk[0] << 3; return ; } void idct_col2(Short *blk) { int32 x0, x1, x3, x5, x7;//, x8; x1 = blk[8]; x0 = ((int32)blk[0] << 11) + 128; /* both upper and lower*/ x7 = W7 * x1; x1 = W1 * x1; x3 = x7; x5 = (181 * (x1 - x7) + 128) >> 8; x7 = (181 * (x1 + x7) + 128) >> 8; blk[0] = (x0 + x1) >> 8; blk[8] = (x0 + x7) >> 8; blk[16] = (x0 + x5) >> 8; blk[24] = (x0 + x3) >> 8; blk[56] = (x0 - x1) >> 8; blk[48] = (x0 - x7) >> 8; blk[40] = (x0 - x5) >> 8; blk[32] = (x0 - x3) >> 8; return ; } void idct_col3(Short *blk) { int32 x0, x1, x2, x3, x4, x5, x6, x7, x8; x2 = blk[16]; x1 = blk[8]; x0 = ((int32)blk[0] << 11) + 128; x4 = x0; x6 = W6 * x2; x2 = W2 * x2; x8 = x0 - x2; x0 += x2; x2 = x8; x8 = x4 - x6; x4 += x6; x6 = x8; x7 = W7 * x1; x1 = W1 * x1; x3 = x7; x5 = (181 * (x1 - x7) + 128) >> 8; x7 = (181 * (x1 + x7) + 128) >> 8; blk[0] = (x0 + x1) >> 8; blk[8] = (x4 + x7) >> 8; blk[16] = (x6 + x5) >> 8; blk[24] = (x2 + x3) >> 8; blk[56] = (x0 - x1) >> 8; blk[48] = (x4 - x7) >> 8; blk[40] = (x6 - x5) >> 8; blk[32] = (x2 - x3) >> 8; return ; } void idct_col4(Short *blk) { int32 x0, x1, x2, x3, x4, x5, x6, x7, x8; x2 = blk[16]; x1 = blk[8]; x3 = blk[24]; x0 = ((int32)blk[0] << 11) + 128; x4 = x0; x6 = W6 * x2; x2 = W2 * x2; x8 = x0 - x2; x0 += x2; x2 = x8; x8 = x4 - x6; x4 += x6; x6 = x8; x7 = W7 * x1; x1 = W1 * x1; x5 = W3 * x3; x3 = -W5 * x3; x8 = x1 - x5; x1 += x5; x5 = x8; x8 = x7 - x3; x3 += x7; x7 = (181 * (x5 + x8) + 128) >> 8; x5 = (181 * (x5 - x8) + 128) >> 8; blk[0] = (x0 + x1) >> 8; blk[8] = (x4 + x7) >> 8; blk[16] = (x6 + x5) >> 8; blk[24] = (x2 + x3) >> 8; blk[56] = (x0 - x1) >> 8; blk[48] = (x4 - x7) >> 8; blk[40] = (x6 - x5) >> 8; blk[32] = (x2 - x3) >> 8; return ; } #ifndef SMALL_DCT void idct_col0x40(Short *blk) { int32 x1, x3, x5, x7;//, x8; x1 = blk[8]; /* both upper and lower*/ x7 = W7 * x1; x1 = W1 * x1; x3 = x7; x5 = (181 * (x1 - x7) + 128) >> 8; x7 = (181 * (x1 + x7) + 128) >> 8; blk[0] = (128 + x1) >> 8; blk[8] = (128 + x7) >> 8; blk[16] = (128 + x5) >> 8; blk[24] = (128 + x3) >> 8; blk[56] = (128 - x1) >> 8; blk[48] = (128 - x7) >> 8; blk[40] = (128 - x5) >> 8; blk[32] = (128 - x3) >> 8; return ; } void idct_col0x20(Short *blk) { int32 x0, x2, x4, x6; x2 = blk[16]; x6 = W6 * x2; x2 = W2 * x2; x0 = 128 + x2; x2 = 128 - x2; x4 = 128 + x6; x6 = 128 - x6; blk[0] = (x0) >> 8; blk[56] = (x0) >> 8; blk[8] = (x4) >> 8; blk[48] = (x4) >> 8; blk[16] = (x6) >> 8; blk[40] = (x6) >> 8; blk[24] = (x2) >> 8; blk[32] = (x2) >> 8; return ; } void idct_col0x10(Short *blk) { int32 x1, x3, x5, x7; x3 = blk[24]; x1 = W3 * x3; x3 = W5 * x3; x7 = (181 * (x3 - x1) + 128) >> 8; x5 = (-181 * (x1 + x3) + 128) >> 8; blk[0] = (128 + x1) >> 8; blk[8] = (128 + x7) >> 8; blk[16] = (128 + x5) >> 8; blk[24] = (128 - x3) >> 8; blk[56] = (128 - x1) >> 8; blk[48] = (128 - x7) >> 8; blk[40] = (128 - x5) >> 8; blk[32] = (128 + x3) >> 8; return ; } #endif /* SMALL_DCT */ void idct_col(Short *blk) { int32 x0, x1, x2, x3, x4, x5, x6, x7, x8; x1 = (int32)blk[32] << 11; x2 = blk[48]; x3 = blk[16]; x4 = blk[8]; x5 = blk[56]; x6 = blk[40]; x7 = blk[24]; x0 = ((int32)blk[0] << 11) + 128; /* first stage */ x8 = W7 * (x4 + x5); x4 = x8 + (W1 - W7) * x4; x5 = x8 - (W1 + W7) * x5; x8 = W3 * (x6 + x7); x6 = x8 - (W3 - W5) * x6; x7 = x8 - (W3 + W5) * x7; /* second stage */ x8 = x0 + x1; x0 -= x1; x1 = W6 * (x3 + x2); x2 = x1 - (W2 + W6) * x2; x3 = x1 + (W2 - W6) * x3; x1 = x4 + x6; x4 -= x6; x6 = x5 + x7; x5 -= x7; /* third stage */ x7 = x8 + x3; x8 -= x3; x3 = x0 + x2; x0 -= x2; x2 = (181 * (x4 + x5) + 128) >> 8; x4 = (181 * (x4 - x5) + 128) >> 8; /* fourth stage */ blk[0] = (x7 + x1) >> 8; blk[8] = (x3 + x2) >> 8; blk[16] = (x0 + x4) >> 8; blk[24] = (x8 + x6) >> 8; blk[32] = (x8 - x6) >> 8; blk[40] = (x0 - x4) >> 8; blk[48] = (x3 - x2) >> 8; blk[56] = (x7 - x1) >> 8; return ; } /* This function should not be called at all ****/ void idct_row0Inter(Short *srce, UChar *rec, Int lx) { OSCL_UNUSED_ARG(srce); OSCL_UNUSED_ARG(rec); OSCL_UNUSED_ARG(lx); return; } void idct_row1Inter(Short *blk, UChar *rec, Int lx) { int tmp; int i = 8; uint32 pred_word, dst_word; int res, res2; /* preset the offset, such that we can take advantage pre-offset addressing mode */ rec -= lx; blk -= 8; while (i--) { tmp = (*(blk += 8) + 32) >> 6; *blk = 0; pred_word = *((uint32*)(rec += lx)); /* read 4 bytes from pred */ res = tmp + (pred_word & 0xFF); CLIP_RESULT(res); res2 = tmp + ((pred_word >> 8) & 0xFF); CLIP_RESULT(res2); dst_word = (res2 << 8) | res; res = tmp + ((pred_word >> 16) & 0xFF); CLIP_RESULT(res); dst_word |= (res << 16); res = tmp + ((pred_word >> 24) & 0xFF); CLIP_RESULT(res); dst_word |= (res << 24); *((uint32*)rec) = dst_word; /* save 4 bytes to dst */ pred_word = *((uint32*)(rec + 4)); /* read 4 bytes from pred */ res = tmp + (pred_word & 0xFF); CLIP_RESULT(res); res2 = tmp + ((pred_word >> 8) & 0xFF); CLIP_RESULT(res2); dst_word = (res2 << 8) | res; res = tmp + ((pred_word >> 16) & 0xFF); CLIP_RESULT(res); dst_word |= (res << 16); res = tmp + ((pred_word >> 24) & 0xFF); CLIP_RESULT(res); dst_word |= (res << 24); *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */ } return; } void idct_row2Inter(Short *blk, UChar *rec, Int lx) { int32 x0, x1, x2, x4, x5; int i = 8; uint32 pred_word, dst_word; int res, res2; /* preset the offset, such that we can take advantage pre-offset addressing mode */ rec -= lx; blk -= 8; while (i--) { /* shortcut */ x4 = blk[9]; blk[9] = 0; x0 = ((*(blk += 8)) << 8) + 8192; *blk = 0; /* for proper rounding in the fourth stage */ /* first stage */ x5 = (W7 * x4 + 4) >> 3; x4 = (W1 * x4 + 4) >> 3; /* third stage */ x2 = (181 * (x4 + x5) + 128) >> 8; x1 = (181 * (x4 - x5) + 128) >> 8; /* fourth stage */ pred_word = *((uint32*)(rec += lx)); /* read 4 bytes from pred */ res = (x0 + x4) >> 14; ADD_AND_CLIP1(res); res2 = (x0 + x2) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (x0 + x1) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (x0 + x5) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)rec) = dst_word; /* save 4 bytes to dst */ pred_word = *((uint32*)(rec + 4)); /* read 4 bytes from pred */ res = (x0 - x5) >> 14; ADD_AND_CLIP1(res); res2 = (x0 - x1) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (x0 - x2) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (x0 - x4) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */ } return ; } void idct_row3Inter(Short *blk, UChar *rec, Int lx) { int32 x0, x1, x2, x3, x4, x5, x6, x7, x8; int i = 8; uint32 pred_word, dst_word; int res, res2; /* preset the offset, such that we can take advantage pre-offset addressing mode */ rec -= lx; blk -= 8; while (i--) { x2 = blk[10]; blk[10] = 0; x1 = blk[9]; blk[9] = 0; x0 = ((*(blk += 8)) << 8) + 8192; *blk = 0; /* for proper rounding in the fourth stage */ /* both upper and lower*/ /* both x2orx6 and x0orx4 */ x4 = x0; x6 = (W6 * x2 + 4) >> 3; x2 = (W2 * x2 + 4) >> 3; x8 = x0 - x2; x0 += x2; x2 = x8; x8 = x4 - x6; x4 += x6; x6 = x8; x7 = (W7 * x1 + 4) >> 3; x1 = (W1 * x1 + 4) >> 3; x3 = x7; x5 = (181 * (x1 - x7) + 128) >> 8; x7 = (181 * (x1 + x7) + 128) >> 8; pred_word = *((uint32*)(rec += lx)); /* read 4 bytes from pred */ res = (x0 + x1) >> 14; ADD_AND_CLIP1(res); res2 = (x4 + x7) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (x6 + x5) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (x2 + x3) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)rec) = dst_word; /* save 4 bytes to dst */ pred_word = *((uint32*)(rec + 4)); /* read 4 bytes from pred */ res = (x2 - x3) >> 14; ADD_AND_CLIP1(res); res2 = (x6 - x5) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (x4 - x7) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (x0 - x1) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */ } return ; } void idct_row4Inter(Short *blk, UChar *rec, Int lx) { int32 x0, x1, x2, x3, x4, x5, x6, x7, x8; int i = 8; uint32 pred_word, dst_word; int res, res2; /* preset the offset, such that we can take advantage pre-offset addressing mode */ rec -= lx; blk -= 8; while (i--) { x2 = blk[10]; blk[10] = 0; x1 = blk[9]; blk[9] = 0; x3 = blk[11]; blk[11] = 0; x0 = ((*(blk += 8)) << 8) + 8192; *blk = 0; /* for proper rounding in the fourth stage */ x4 = x0; x6 = (W6 * x2 + 4) >> 3; x2 = (W2 * x2 + 4) >> 3; x8 = x0 - x2; x0 += x2; x2 = x8; x8 = x4 - x6; x4 += x6; x6 = x8; x7 = (W7 * x1 + 4) >> 3; x1 = (W1 * x1 + 4) >> 3; x5 = (W3 * x3 + 4) >> 3; x3 = (- W5 * x3 + 4) >> 3; x8 = x1 - x5; x1 += x5; x5 = x8; x8 = x7 - x3; x3 += x7; x7 = (181 * (x5 + x8) + 128) >> 8; x5 = (181 * (x5 - x8) + 128) >> 8; pred_word = *((uint32*)(rec += lx)); /* read 4 bytes from pred */ res = (x0 + x1) >> 14; ADD_AND_CLIP1(res); res2 = (x4 + x7) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (x6 + x5) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (x2 + x3) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)rec) = dst_word; /* save 4 bytes to dst */ pred_word = *((uint32*)(rec + 4)); /* read 4 bytes from pred */ res = (x2 - x3) >> 14; ADD_AND_CLIP1(res); res2 = (x6 - x5) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (x4 - x7) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (x0 - x1) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */ } return ; } #ifndef SMALL_DCT void idct_row0x40Inter(Short *blk, UChar *rec, Int lx) { int32 x1, x2, x4, x5; int i = 8; uint32 pred_word, dst_word; int res, res2; /* preset the offset, such that we can take advantage pre-offset addressing mode */ rec -= lx; while (i--) { /* shortcut */ x4 = blk[1]; blk[1] = 0; blk += 8; /* for proper rounding in the fourth stage */ /* first stage */ x5 = (W7 * x4 + 4) >> 3; x4 = (W1 * x4 + 4) >> 3; /* third stage */ x2 = (181 * (x4 + x5) + 128) >> 8; x1 = (181 * (x4 - x5) + 128) >> 8; /* fourth stage */ pred_word = *((uint32*)(rec += lx)); /* read 4 bytes from pred */ res = (8192 + x4) >> 14; ADD_AND_CLIP1(res); res2 = (8192 + x2) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (8192 + x1) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (8192 + x5) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)rec) = dst_word; /* save 4 bytes to dst */ pred_word = *((uint32*)(rec + 4)); /* read 4 bytes from pred */ res = (8192 - x5) >> 14; ADD_AND_CLIP1(res); res2 = (8192 - x1) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (8192 - x2) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (8192 - x4) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */ } return ; } void idct_row0x20Inter(Short *blk, UChar *rec, Int lx) { int32 x0, x2, x4, x6; int i = 8; uint32 pred_word, dst_word; int res, res2; /* preset the offset, such that we can take advantage pre-offset addressing mode */ rec -= lx; while (i--) { x2 = blk[2]; blk[2] = 0; blk += 8; /* for proper rounding in the fourth stage */ /* both upper and lower*/ /* both x2orx6 and x0orx4 */ x6 = (W6 * x2 + 4) >> 3; x2 = (W2 * x2 + 4) >> 3; x0 = 8192 + x2; x2 = 8192 - x2; x4 = 8192 + x6; x6 = 8192 - x6; pred_word = *((uint32*)(rec += lx)); /* read 4 bytes from pred */ res = (x0) >> 14; ADD_AND_CLIP1(res); res2 = (x4) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (x6) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (x2) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)rec) = dst_word; /* save 4 bytes to dst */ pred_word = *((uint32*)(rec + 4)); /* read 4 bytes from pred */ res = (x2) >> 14; ADD_AND_CLIP1(res); res2 = (x6) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (x4) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (x0) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */ } return ; } void idct_row0x10Inter(Short *blk, UChar *rec, Int lx) { int32 x1, x3, x5, x7; int i = 8; uint32 pred_word, dst_word; int res, res2; /* preset the offset, such that we can take advantage pre-offset addressing mode */ rec -= lx; while (i--) { x3 = blk[3]; blk[3] = 0; blk += 8; x1 = (W3 * x3 + 4) >> 3; x3 = (-W5 * x3 + 4) >> 3; x7 = (-181 * (x3 + x1) + 128) >> 8; x5 = (181 * (x3 - x1) + 128) >> 8; pred_word = *((uint32*)(rec += lx)); /* read 4 bytes from pred */ res = (8192 + x1) >> 14; ADD_AND_CLIP1(res); res2 = (8192 + x7) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (8192 + x5) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (8192 + x3) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)rec) = dst_word; /* save 4 bytes to dst */ pred_word = *((uint32*)(rec + 4)); /* read 4 bytes from pred */ res = (8192 - x3) >> 14; ADD_AND_CLIP1(res); res2 = (8192 - x5) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (8192 - x7) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (8192 - x1) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */ } return ; } #endif /* SMALL_DCT */ void idct_rowInter(Short *blk, UChar *rec, Int lx) { int32 x0, x1, x2, x3, x4, x5, x6, x7, x8; int i = 8; uint32 pred_word, dst_word; int res, res2; /* preset the offset, such that we can take advantage pre-offset addressing mode */ rec -= lx; blk -= 8; while (i--) { x1 = (int32)blk[12] << 8; blk[12] = 0; x2 = blk[14]; blk[14] = 0; x3 = blk[10]; blk[10] = 0; x4 = blk[9]; blk[9] = 0; x5 = blk[15]; blk[15] = 0; x6 = blk[13]; blk[13] = 0; x7 = blk[11]; blk[11] = 0; x0 = ((*(blk += 8)) << 8) + 8192; *blk = 0; /* for proper rounding in the fourth stage */ /* first stage */ x8 = W7 * (x4 + x5) + 4; x4 = (x8 + (W1 - W7) * x4) >> 3; x5 = (x8 - (W1 + W7) * x5) >> 3; x8 = W3 * (x6 + x7) + 4; x6 = (x8 - (W3 - W5) * x6) >> 3; x7 = (x8 - (W3 + W5) * x7) >> 3; /* second stage */ x8 = x0 + x1; x0 -= x1; x1 = W6 * (x3 + x2) + 4; x2 = (x1 - (W2 + W6) * x2) >> 3; x3 = (x1 + (W2 - W6) * x3) >> 3; x1 = x4 + x6; x4 -= x6; x6 = x5 + x7; x5 -= x7; /* third stage */ x7 = x8 + x3; x8 -= x3; x3 = x0 + x2; x0 -= x2; x2 = (181 * (x4 + x5) + 128) >> 8; x4 = (181 * (x4 - x5) + 128) >> 8; /* fourth stage */ pred_word = *((uint32*)(rec += lx)); /* read 4 bytes from pred */ res = (x7 + x1) >> 14; ADD_AND_CLIP1(res); res2 = (x3 + x2) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (x0 + x4) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (x8 + x6) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)rec) = dst_word; /* save 4 bytes to dst */ pred_word = *((uint32*)(rec + 4)); /* read 4 bytes from pred */ res = (x8 - x6) >> 14; ADD_AND_CLIP1(res); res2 = (x0 - x4) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (x3 - x2) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (x7 - x1) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */ } return; } void idct_row0Intra(Short *srce, UChar *rec, Int lx) { OSCL_UNUSED_ARG(srce); OSCL_UNUSED_ARG(rec); OSCL_UNUSED_ARG(lx); return; } void idct_row1Intra(Short *blk, UChar *rec, Int lx) { int32 tmp; int i = 8; rec -= lx; blk -= 8; while (i--) { tmp = ((*(blk += 8) + 32) >> 6); *blk = 0; CLIP_RESULT(tmp) tmp |= (tmp << 8); tmp |= (tmp << 16); *((uint32*)(rec += lx)) = tmp; *((uint32*)(rec + 4)) = tmp; } return; } void idct_row2Intra(Short *blk, UChar *rec, Int lx) { int32 x0, x1, x2, x4, x5; int res, res2; uint32 dst_word; int i = 8; rec -= lx; blk -= 8; while (i--) { /* shortcut */ x4 = blk[9]; blk[9] = 0; x0 = ((*(blk += 8)) << 8) + 8192; *blk = 0; /* for proper rounding in the fourth stage */ /* first stage */ x5 = (W7 * x4 + 4) >> 3; x4 = (W1 * x4 + 4) >> 3; /* third stage */ x2 = (181 * (x4 + x5) + 128) >> 8; x1 = (181 * (x4 - x5) + 128) >> 8; /* fourth stage */ res = ((x0 + x4) >> 14); CLIP_RESULT(res) res2 = ((x0 + x2) >> 14); CLIP_RESULT(res2) dst_word = (res2 << 8) | res; res = ((x0 + x1) >> 14); CLIP_RESULT(res) dst_word |= (res << 16); res = ((x0 + x5) >> 14); CLIP_RESULT(res) dst_word |= (res << 24); *((uint32*)(rec += lx)) = dst_word; res = ((x0 - x5) >> 14); CLIP_RESULT(res) res2 = ((x0 - x1) >> 14); CLIP_RESULT(res2) dst_word = (res2 << 8) | res; res = ((x0 - x2) >> 14); CLIP_RESULT(res) dst_word |= (res << 16); res = ((x0 - x4) >> 14); CLIP_RESULT(res) dst_word |= (res << 24); *((uint32*)(rec + 4)) = dst_word; } return ; } void idct_row3Intra(Short *blk, UChar *rec, Int lx) { int32 x0, x1, x2, x3, x4, x5, x6, x7, x8; int res, res2; uint32 dst_word; int i = 8; rec -= lx; blk -= 8; while (i--) { x2 = blk[10]; blk[10] = 0; x1 = blk[9]; blk[9] = 0; x0 = ((*(blk += 8)) << 8) + 8192; *blk = 0;/* for proper rounding in the fourth stage */ /* both upper and lower*/ /* both x2orx6 and x0orx4 */ x4 = x0; x6 = (W6 * x2 + 4) >> 3; x2 = (W2 * x2 + 4) >> 3; x8 = x0 - x2; x0 += x2; x2 = x8; x8 = x4 - x6; x4 += x6; x6 = x8; x7 = (W7 * x1 + 4) >> 3; x1 = (W1 * x1 + 4) >> 3; x3 = x7; x5 = (181 * (x1 - x7) + 128) >> 8; x7 = (181 * (x1 + x7) + 128) >> 8; res = ((x0 + x1) >> 14); CLIP_RESULT(res) res2 = ((x4 + x7) >> 14); CLIP_RESULT(res2) dst_word = (res2 << 8) | res; res = ((x6 + x5) >> 14); CLIP_RESULT(res) dst_word |= (res << 16); res = ((x2 + x3) >> 14); CLIP_RESULT(res) dst_word |= (res << 24); *((uint32*)(rec += lx)) = dst_word; res = ((x2 - x3) >> 14); CLIP_RESULT(res) res2 = ((x6 - x5) >> 14); CLIP_RESULT(res2) dst_word = (res2 << 8) | res; res = ((x4 - x7) >> 14); CLIP_RESULT(res) dst_word |= (res << 16); res = ((x0 - x1) >> 14); CLIP_RESULT(res) dst_word |= (res << 24); *((uint32*)(rec + 4)) = dst_word; } return ; } void idct_row4Intra(Short *blk, UChar *rec, Int lx) { int32 x0, x1, x2, x3, x4, x5, x6, x7, x8; int res, res2; uint32 dst_word; int i = 8; rec -= lx; blk -= 8; while (i--) { x2 = blk[10]; blk[10] = 0; x1 = blk[9]; blk[9] = 0; x3 = blk[11]; blk[11] = 0; x0 = ((*(blk += 8)) << 8) + 8192; *blk = 0; /* for proper rounding in the fourth stage */ x4 = x0; x6 = (W6 * x2 + 4) >> 3; x2 = (W2 * x2 + 4) >> 3; x8 = x0 - x2; x0 += x2; x2 = x8; x8 = x4 - x6; x4 += x6; x6 = x8; x7 = (W7 * x1 + 4) >> 3; x1 = (W1 * x1 + 4) >> 3; x5 = (W3 * x3 + 4) >> 3; x3 = (- W5 * x3 + 4) >> 3; x8 = x1 - x5; x1 += x5; x5 = x8; x8 = x7 - x3; x3 += x7; x7 = (181 * (x5 + x8) + 128) >> 8; x5 = (181 * (x5 - x8) + 128) >> 8; res = ((x0 + x1) >> 14); CLIP_RESULT(res) res2 = ((x4 + x7) >> 14); CLIP_RESULT(res2) dst_word = (res2 << 8) | res; res = ((x6 + x5) >> 14); CLIP_RESULT(res) dst_word |= (res << 16); res = ((x2 + x3) >> 14); CLIP_RESULT(res) dst_word |= (res << 24); *((uint32*)(rec += lx)) = dst_word; res = ((x2 - x3) >> 14); CLIP_RESULT(res) res2 = ((x6 - x5) >> 14); CLIP_RESULT(res2) dst_word = (res2 << 8) | res; res = ((x4 - x7) >> 14); CLIP_RESULT(res) dst_word |= (res << 16); res = ((x0 - x1) >> 14); CLIP_RESULT(res) dst_word |= (res << 24); *((uint32*)(rec + 4)) = dst_word; } return ; } #ifndef SMALL_DCT void idct_row0x40Intra(Short *blk, UChar *rec, Int lx) { int32 x1, x2, x4, x5; int res, res2; uint32 dst_word; int i = 8; rec -= lx; while (i--) { /* shortcut */ x4 = blk[1]; blk[1] = 0; blk += 8; /* first stage */ x5 = (W7 * x4 + 4) >> 3; x4 = (W1 * x4 + 4) >> 3; /* third stage */ x2 = (181 * (x4 + x5) + 128) >> 8; x1 = (181 * (x4 - x5) + 128) >> 8; /* fourth stage */ res = ((8192 + x4) >> 14); CLIP_RESULT(res) res2 = ((8192 + x2) >> 14); CLIP_RESULT(res2) dst_word = (res2 << 8) | res; res = ((8192 + x1) >> 14); CLIP_RESULT(res) dst_word |= (res << 16); res = ((8192 + x5) >> 14); CLIP_RESULT(res) dst_word |= (res << 24); *((uint32*)(rec += lx)) = dst_word; res = ((8192 - x5) >> 14); CLIP_RESULT(res) res2 = ((8192 - x1) >> 14); CLIP_RESULT(res2) dst_word = (res2 << 8) | res; res = ((8192 - x2) >> 14); CLIP_RESULT(res) dst_word |= (res << 16); res = ((8192 - x4) >> 14); CLIP_RESULT(res) dst_word |= (res << 24); *((uint32*)(rec + 4)) = dst_word; } return ; } void idct_row0x20Intra(Short *blk, UChar *rec, Int lx) { int32 x0, x2, x4, x6; int res, res2; uint32 dst_word; int i = 8; rec -= lx; while (i--) { x2 = blk[2]; blk[2] = 0; blk += 8; /* both upper and lower*/ /* both x2orx6 and x0orx4 */ x6 = (W6 * x2 + 4) >> 3; x2 = (W2 * x2 + 4) >> 3; x0 = 8192 + x2; x2 = 8192 - x2; x4 = 8192 + x6; x6 = 8192 - x6; res = ((x0) >> 14); CLIP_RESULT(res) res2 = ((x4) >> 14); CLIP_RESULT(res2) dst_word = (res2 << 8) | res; res = ((x6) >> 14); CLIP_RESULT(res) dst_word |= (res << 16); res = ((x2) >> 14); CLIP_RESULT(res) dst_word |= (res << 24); *((uint32*)(rec += lx)) = dst_word; res = ((x2) >> 14); CLIP_RESULT(res) res2 = ((x6) >> 14); CLIP_RESULT(res2) dst_word = (res2 << 8) | res; res = ((x4) >> 14); CLIP_RESULT(res) dst_word |= (res << 16); res = ((x0) >> 14); CLIP_RESULT(res) dst_word |= (res << 24); *((uint32*)(rec + 4)) = dst_word; } return ; } void idct_row0x10Intra(Short *blk, UChar *rec, Int lx) { int32 x1, x3, x5, x7; int res, res2; uint32 dst_word; int i = 8; rec -= lx; while (i--) { x3 = blk[3]; blk[3] = 0 ; blk += 8; x1 = (W3 * x3 + 4) >> 3; x3 = (W5 * x3 + 4) >> 3; x7 = (181 * (x3 - x1) + 128) >> 8; x5 = (-181 * (x1 + x3) + 128) >> 8; res = ((8192 + x1) >> 14); CLIP_RESULT(res) res2 = ((8192 + x7) >> 14); CLIP_RESULT(res2) dst_word = (res2 << 8) | res; res = ((8192 + x5) >> 14); CLIP_RESULT(res) dst_word |= (res << 16); res = ((8192 - x3) >> 14); CLIP_RESULT(res) dst_word |= (res << 24); *((uint32*)(rec += lx)) = dst_word; res = ((8192 + x3) >> 14); CLIP_RESULT(res) res2 = ((8192 - x5) >> 14); CLIP_RESULT(res2) dst_word = (res2 << 8) | res; res = ((8192 - x7) >> 14); CLIP_RESULT(res) dst_word |= (res << 16); res = ((8192 - x1) >> 14); CLIP_RESULT(res) dst_word |= (res << 24); *((uint32*)(rec + 4)) = dst_word; } return ; } #endif /* SMALL_DCT */ void idct_rowIntra(Short *blk, UChar *rec, Int lx) { int32 x0, x1, x2, x3, x4, x5, x6, x7, x8; int i = 8; int res, res2; uint32 dst_word; blk -= 8; rec -= lx; while (i--) { x1 = (int32)blk[12] << 8; blk[12] = 0; x2 = blk[14]; blk[14] = 0; x3 = blk[10]; blk[10] = 0; x4 = blk[9]; blk[9] = 0; x5 = blk[15]; blk[15] = 0; x6 = blk[13]; blk[13] = 0; x7 = blk[11]; blk[11] = 0; x0 = ((*(blk += 8)) << 8) + 8192; *blk = 0; /* for proper rounding in the fourth stage */ /* first stage */ x8 = W7 * (x4 + x5) + 4; x4 = (x8 + (W1 - W7) * x4) >> 3; x5 = (x8 - (W1 + W7) * x5) >> 3; x8 = W3 * (x6 + x7) + 4; x6 = (x8 - (W3 - W5) * x6) >> 3; x7 = (x8 - (W3 + W5) * x7) >> 3; /* second stage */ x8 = x0 + x1; x0 -= x1; x1 = W6 * (x3 + x2) + 4; x2 = (x1 - (W2 + W6) * x2) >> 3; x3 = (x1 + (W2 - W6) * x3) >> 3; x1 = x4 + x6; x4 -= x6; x6 = x5 + x7; x5 -= x7; /* third stage */ x7 = x8 + x3; x8 -= x3; x3 = x0 + x2; x0 -= x2; x2 = (181 * (x4 + x5) + 128) >> 8; x4 = (181 * (x4 - x5) + 128) >> 8; /* fourth stage */ res = ((x7 + x1) >> 14); CLIP_RESULT(res) res2 = ((x3 + x2) >> 14); CLIP_RESULT(res2) dst_word = res | (res2 << 8); res = ((x0 + x4) >> 14); CLIP_RESULT(res) dst_word |= (res << 16); res = ((x8 + x6) >> 14); CLIP_RESULT(res) dst_word |= (res << 24); *((uint32*)(rec += lx)) = dst_word; res = ((x8 - x6) >> 14); CLIP_RESULT(res) res2 = ((x0 - x4) >> 14); CLIP_RESULT(res2) dst_word = res | (res2 << 8); res = ((x3 - x2) >> 14); CLIP_RESULT(res) dst_word |= (res << 16); res = ((x7 - x1) >> 14); CLIP_RESULT(res) dst_word |= (res << 24); *((uint32*)(rec + 4)) = dst_word; } return; } /* This function should not be called at all ****/ void idct_row0zmv(Short *srce, UChar *rec, UChar *pred, Int lx) { OSCL_UNUSED_ARG(srce); OSCL_UNUSED_ARG(rec); OSCL_UNUSED_ARG(pred); OSCL_UNUSED_ARG(lx); return; } void idct_row1zmv(Short *blk, UChar *rec, UChar *pred, Int lx) { int tmp; int i = 8; uint32 pred_word, dst_word; int res, res2; /* preset the offset, such that we can take advantage pre-offset addressing mode */ pred -= 16; rec -= lx; blk -= 8; while (i--) { tmp = (*(blk += 8) + 32) >> 6; *blk = 0; pred_word = *((uint32*)(pred += 16)); /* read 4 bytes from pred */ res = tmp + (pred_word & 0xFF); CLIP_RESULT(res); res2 = tmp + ((pred_word >> 8) & 0xFF); CLIP_RESULT(res2); dst_word = (res2 << 8) | res; res = tmp + ((pred_word >> 16) & 0xFF); CLIP_RESULT(res); dst_word |= (res << 16); res = tmp + ((pred_word >> 24) & 0xFF); CLIP_RESULT(res); dst_word |= (res << 24); *((uint32*)(rec += lx)) = dst_word; /* save 4 bytes to dst */ pred_word = *((uint32*)(pred + 4)); /* read 4 bytes from pred */ res = tmp + (pred_word & 0xFF); CLIP_RESULT(res); res2 = tmp + ((pred_word >> 8) & 0xFF); CLIP_RESULT(res2); dst_word = (res2 << 8) | res; res = tmp + ((pred_word >> 16) & 0xFF); CLIP_RESULT(res); dst_word |= (res << 16); res = tmp + ((pred_word >> 24) & 0xFF); CLIP_RESULT(res); dst_word |= (res << 24); *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */ } return; } void idct_row2zmv(Short *blk, UChar *rec, UChar *pred, Int lx) { int32 x0, x1, x2, x4, x5; int i = 8; uint32 pred_word, dst_word; int res, res2; /* preset the offset, such that we can take advantage pre-offset addressing mode */ rec -= lx; pred -= 16; blk -= 8; while (i--) { /* shortcut */ x4 = blk[9]; blk[9] = 0; x0 = ((*(blk += 8)) << 8) + 8192; *blk = 0; /* for proper rounding in the fourth stage */ /* first stage */ x5 = (W7 * x4 + 4) >> 3; x4 = (W1 * x4 + 4) >> 3; /* third stage */ x2 = (181 * (x4 + x5) + 128) >> 8; x1 = (181 * (x4 - x5) + 128) >> 8; /* fourth stage */ pred_word = *((uint32*)(pred += 16)); /* read 4 bytes from pred */ res = (x0 + x4) >> 14; ADD_AND_CLIP1(res); res2 = (x0 + x2) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (x0 + x1) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (x0 + x5) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)(rec += lx)) = dst_word; /* save 4 bytes to dst */ pred_word = *((uint32*)(pred + 4)); /* read 4 bytes from pred */ res = (x0 - x5) >> 14; ADD_AND_CLIP1(res); res2 = (x0 - x1) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (x0 - x2) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (x0 - x4) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */ } return ; } void idct_row3zmv(Short *blk, UChar *rec, UChar *pred, Int lx) { int32 x0, x1, x2, x3, x4, x5, x6, x7, x8; int i = 8; uint32 pred_word, dst_word; int res, res2; /* preset the offset, such that we can take advantage pre-offset addressing mode */ rec -= lx; pred -= 16; blk -= 8; while (i--) { x2 = blk[10]; blk[10] = 0; x1 = blk[9]; blk[9] = 0; x0 = ((*(blk += 8)) << 8) + 8192; *blk = 0; /* for proper rounding in the fourth stage */ /* both upper and lower*/ /* both x2orx6 and x0orx4 */ x4 = x0; x6 = (W6 * x2 + 4) >> 3; x2 = (W2 * x2 + 4) >> 3; x8 = x0 - x2; x0 += x2; x2 = x8; x8 = x4 - x6; x4 += x6; x6 = x8; x7 = (W7 * x1 + 4) >> 3; x1 = (W1 * x1 + 4) >> 3; x3 = x7; x5 = (181 * (x1 - x7) + 128) >> 8; x7 = (181 * (x1 + x7) + 128) >> 8; pred_word = *((uint32*)(pred += 16)); /* read 4 bytes from pred */ res = (x0 + x1) >> 14; ADD_AND_CLIP1(res); res2 = (x4 + x7) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (x6 + x5) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (x2 + x3) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)(rec += lx)) = dst_word; /* save 4 bytes to dst */ pred_word = *((uint32*)(pred + 4)); /* read 4 bytes from pred */ res = (x2 - x3) >> 14; ADD_AND_CLIP1(res); res2 = (x6 - x5) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (x4 - x7) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (x0 - x1) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */ } return ; } void idct_row4zmv(Short *blk, UChar *rec, UChar *pred, Int lx) { int32 x0, x1, x2, x3, x4, x5, x6, x7, x8; int i = 8; uint32 pred_word, dst_word; int res, res2; /* preset the offset, such that we can take advantage pre-offset addressing mode */ rec -= lx; pred -= 16; blk -= 8; while (i--) { x2 = blk[10]; blk[10] = 0; x1 = blk[9]; blk[9] = 0; x3 = blk[11]; blk[11] = 0; x0 = ((*(blk += 8)) << 8) + 8192; *blk = 0; /* for proper rounding in the fourth stage */ x4 = x0; x6 = (W6 * x2 + 4) >> 3; x2 = (W2 * x2 + 4) >> 3; x8 = x0 - x2; x0 += x2; x2 = x8; x8 = x4 - x6; x4 += x6; x6 = x8; x7 = (W7 * x1 + 4) >> 3; x1 = (W1 * x1 + 4) >> 3; x5 = (W3 * x3 + 4) >> 3; x3 = (- W5 * x3 + 4) >> 3; x8 = x1 - x5; x1 += x5; x5 = x8; x8 = x7 - x3; x3 += x7; x7 = (181 * (x5 + x8) + 128) >> 8; x5 = (181 * (x5 - x8) + 128) >> 8; pred_word = *((uint32*)(pred += 16)); /* read 4 bytes from pred */ res = (x0 + x1) >> 14; ADD_AND_CLIP1(res); res2 = (x4 + x7) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (x6 + x5) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (x2 + x3) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)(rec += lx)) = dst_word; /* save 4 bytes to dst */ pred_word = *((uint32*)(pred + 4)); /* read 4 bytes from pred */ res = (x2 - x3) >> 14; ADD_AND_CLIP1(res); res2 = (x6 - x5) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (x4 - x7) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (x0 - x1) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */ } return ; } #ifndef SMALL_DCT void idct_row0x40zmv(Short *blk, UChar *rec, UChar *pred, Int lx) { int32 x1, x2, x4, x5; int i = 8; uint32 pred_word, dst_word; int res, res2; /* preset the offset, such that we can take advantage pre-offset addressing mode */ rec -= lx; pred -= 16; while (i--) { /* shortcut */ x4 = blk[1]; blk[1] = 0; blk += 8; /* for proper rounding in the fourth stage */ /* first stage */ x5 = (W7 * x4 + 4) >> 3; x4 = (W1 * x4 + 4) >> 3; /* third stage */ x2 = (181 * (x4 + x5) + 128) >> 8; x1 = (181 * (x4 - x5) + 128) >> 8; /* fourth stage */ pred_word = *((uint32*)(pred += 16)); /* read 4 bytes from pred */ res = (8192 + x4) >> 14; ADD_AND_CLIP1(res); res2 = (8192 + x2) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (8192 + x1) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (8192 + x5) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)(rec += lx)) = dst_word; /* save 4 bytes to dst */ pred_word = *((uint32*)(pred + 4)); /* read 4 bytes from pred */ res = (8192 - x5) >> 14; ADD_AND_CLIP1(res); res2 = (8192 - x1) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (8192 - x2) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (8192 - x4) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */ } return ; } void idct_row0x20zmv(Short *blk, UChar *rec, UChar *pred, Int lx) { int32 x0, x2, x4, x6; int i = 8; uint32 pred_word, dst_word; int res, res2; /* preset the offset, such that we can take advantage pre-offset addressing mode */ rec -= lx; pred -= 16; while (i--) { x2 = blk[2]; blk[2] = 0; blk += 8; /* for proper rounding in the fourth stage */ /* both upper and lower*/ /* both x2orx6 and x0orx4 */ x6 = (W6 * x2 + 4) >> 3; x2 = (W2 * x2 + 4) >> 3; x0 = 8192 + x2; x2 = 8192 - x2; x4 = 8192 + x6; x6 = 8192 - x6; pred_word = *((uint32*)(pred += 16)); /* read 4 bytes from pred */ res = (x0) >> 14; ADD_AND_CLIP1(res); res2 = (x4) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (x6) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (x2) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)(rec += lx)) = dst_word; /* save 4 bytes to dst */ pred_word = *((uint32*)(pred + 4)); /* read 4 bytes from pred */ res = (x2) >> 14; ADD_AND_CLIP1(res); res2 = (x6) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (x4) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (x0) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */ } return ; } void idct_row0x10zmv(Short *blk, UChar *rec, UChar *pred, Int lx) { int32 x1, x3, x5, x7; int i = 8; uint32 pred_word, dst_word; int res, res2; /* preset the offset, such that we can take advantage pre-offset addressing mode */ rec -= lx; pred -= 16; while (i--) { x3 = blk[3]; blk[3] = 0; blk += 8; x1 = (W3 * x3 + 4) >> 3; x3 = (-W5 * x3 + 4) >> 3; x7 = (-181 * (x3 + x1) + 128) >> 8; x5 = (181 * (x3 - x1) + 128) >> 8; pred_word = *((uint32*)(pred += 16)); /* read 4 bytes from pred */ res = (8192 + x1) >> 14; ADD_AND_CLIP1(res); res2 = (8192 + x7) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (8192 + x5) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (8192 + x3) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)(rec += lx)) = dst_word; /* save 4 bytes to dst */ pred_word = *((uint32*)(pred + 4)); /* read 4 bytes from pred */ res = (8192 - x3) >> 14; ADD_AND_CLIP1(res); res2 = (8192 - x5) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (8192 - x7) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (8192 - x1) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */ } return ; } #endif /* SMALL_DCT */ void idct_rowzmv(Short *blk, UChar *rec, UChar *pred, Int lx) { int32 x0, x1, x2, x3, x4, x5, x6, x7, x8; int i = 8; uint32 pred_word, dst_word; int res, res2; /* preset the offset, such that we can take advantage pre-offset addressing mode */ rec -= lx; pred -= 16; blk -= 8; while (i--) { x1 = (int32)blk[12] << 8; blk[12] = 0; x2 = blk[14]; blk[14] = 0; x3 = blk[10]; blk[10] = 0; x4 = blk[9]; blk[9] = 0; x5 = blk[15]; blk[15] = 0; x6 = blk[13]; blk[13] = 0; x7 = blk[11]; blk[11] = 0; x0 = ((*(blk += 8)) << 8) + 8192; *blk = 0; /* for proper rounding in the fourth stage */ /* first stage */ x8 = W7 * (x4 + x5) + 4; x4 = (x8 + (W1 - W7) * x4) >> 3; x5 = (x8 - (W1 + W7) * x5) >> 3; x8 = W3 * (x6 + x7) + 4; x6 = (x8 - (W3 - W5) * x6) >> 3; x7 = (x8 - (W3 + W5) * x7) >> 3; /* second stage */ x8 = x0 + x1; x0 -= x1; x1 = W6 * (x3 + x2) + 4; x2 = (x1 - (W2 + W6) * x2) >> 3; x3 = (x1 + (W2 - W6) * x3) >> 3; x1 = x4 + x6; x4 -= x6; x6 = x5 + x7; x5 -= x7; /* third stage */ x7 = x8 + x3; x8 -= x3; x3 = x0 + x2; x0 -= x2; x2 = (181 * (x4 + x5) + 128) >> 8; x4 = (181 * (x4 - x5) + 128) >> 8; /* fourth stage */ pred_word = *((uint32*)(pred += 16)); /* read 4 bytes from pred */ res = (x7 + x1) >> 14; ADD_AND_CLIP1(res); res2 = (x3 + x2) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (x0 + x4) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (x8 + x6) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)(rec += lx)) = dst_word; /* save 4 bytes to dst */ pred_word = *((uint32*)(pred + 4)); /* read 4 bytes from pred */ res = (x8 - x6) >> 14; ADD_AND_CLIP1(res); res2 = (x0 - x4) >> 14; ADD_AND_CLIP2(res2); dst_word = (res2 << 8) | res; res = (x3 - x2) >> 14; ADD_AND_CLIP3(res); dst_word |= (res << 16); res = (x7 - x1) >> 14; ADD_AND_CLIP4(res); dst_word |= (res << 24); *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */ } return; } /*---------------------------------------------------------------------------- ; End Function: idctcol ----------------------------------------------------------------------------*/ /* ======================================================================== */ /* Function : BlockIDCTMotionComp */ /* Date : 10/16/2000 */ /* Purpose : fast IDCT routine */ /* In/out : */ /* Int* coeff_in Dequantized coefficient Int block_out output IDCT coefficient Int maxval clip value */ /* Modified : 7/31/01, add checking for all-zero and DC-only block. */ /* do 8 columns at a time */ /* 8/2/01, do column first then row-IDCT. */ /* 8/2/01, remove clipping (included in motion comp). */ /* 8/7/01, combine with motion comp. */ /* 8/8/01, use AAN IDCT */ /* 9/4/05, use Chen's IDCT and 16 bit block */ /* ======================================================================== */ void BlockIDCTMotionComp(Short *block, UChar *bitmapcol, UChar bitmaprow, Int dctMode, UChar *rec, UChar *pred, Int lx_intra) { Int i; Int tmp, tmp2; ULong tmp4; Int bmap; Short *ptr = block; UChar *endcol; UInt mask = 0xFF; Int lx = lx_intra >> 1; Int intra = (lx_intra & 1); /* all-zero block */ if (dctMode == 0 || bitmaprow == 0) { if (intra) { *((ULong*)rec) = *((ULong*)(rec + 4)) = 0; *((ULong*)(rec += lx)) = 0; *((ULong*)(rec + 4)) = 0; *((ULong*)(rec += lx)) = 0; *((ULong*)(rec + 4)) = 0; *((ULong*)(rec += lx)) = 0; *((ULong*)(rec + 4)) = 0; *((ULong*)(rec += lx)) = 0; *((ULong*)(rec + 4)) = 0; *((ULong*)(rec += lx)) = 0; *((ULong*)(rec + 4)) = 0; *((ULong*)(rec += lx)) = 0; *((ULong*)(rec + 4)) = 0; *((ULong*)(rec += lx)) = 0; *((ULong*)(rec + 4)) = 0; return ; } else /* copy from previous frame */ { *((ULong*)rec) = *((ULong*)pred); *((ULong*)(rec + 4)) = *((ULong*)(pred + 4)); *((ULong*)(rec += lx)) = *((ULong*)(pred += 16)); *((ULong*)(rec + 4)) = *((ULong*)(pred + 4)); *((ULong*)(rec += lx)) = *((ULong*)(pred += 16)); *((ULong*)(rec + 4)) = *((ULong*)(pred + 4)); *((ULong*)(rec += lx)) = *((ULong*)(pred += 16)); *((ULong*)(rec + 4)) = *((ULong*)(pred + 4)); *((ULong*)(rec += lx)) = *((ULong*)(pred += 16)); *((ULong*)(rec + 4)) = *((ULong*)(pred + 4)); *((ULong*)(rec += lx)) = *((ULong*)(pred += 16)); *((ULong*)(rec + 4)) = *((ULong*)(pred + 4)); *((ULong*)(rec += lx)) = *((ULong*)(pred += 16)); *((ULong*)(rec + 4)) = *((ULong*)(pred + 4)); *((ULong*)(rec += lx)) = *((ULong*)(pred += 16)); *((ULong*)(rec + 4)) = *((ULong*)(pred + 4)); return ; } } /* Test for DC only block */ if (dctMode == 1 || (bitmaprow == 0x80 && bitmapcol[0] == 0x80)) { i = ((block[0] << 3) + 32) >> 6; block[0] = 0; if (intra) { if ((UInt)i > mask) i = mask & (~(i >> 31)); tmp = i | (i << 8); tmp |= (tmp << 16); *((ULong*)rec) = *((ULong*)(rec + 4)) = tmp; *((ULong*)(rec += lx)) = tmp; *((ULong*)(rec + 4)) = tmp; *((ULong*)(rec += lx)) = tmp; *((ULong*)(rec + 4)) = tmp; *((ULong*)(rec += lx)) = tmp; *((ULong*)(rec + 4)) = tmp; *((ULong*)(rec += lx)) = tmp; *((ULong*)(rec + 4)) = tmp; *((ULong*)(rec += lx)) = tmp; *((ULong*)(rec + 4)) = tmp; *((ULong*)(rec += lx)) = tmp; *((ULong*)(rec + 4)) = tmp; *((ULong*)(rec += lx)) = tmp; *((ULong*)(rec + 4)) = tmp; return ; } else { endcol = rec + (lx << 3); do { tmp4 = *((ULong*)pred); tmp2 = tmp4 & 0xFF; tmp2 += i; if ((UInt)tmp2 > mask) tmp2 = mask & (~(tmp2 >> 31)); tmp = (tmp4 >> 8) & 0xFF; tmp += i; if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31)); tmp2 |= (tmp << 8); tmp = (tmp4 >> 16) & 0xFF; tmp += i; if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31)); tmp2 |= (tmp << 16); tmp = (tmp4 >> 24) & 0xFF; tmp += i; if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31)); tmp2 |= (tmp << 24); *((ULong*)rec) = tmp2; tmp4 = *((ULong*)(pred + 4)); tmp2 = tmp4 & 0xFF; tmp2 += i; if ((UInt)tmp2 > mask) tmp2 = mask & (~(tmp2 >> 31)); tmp = (tmp4 >> 8) & 0xFF; tmp += i; if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31)); tmp2 |= (tmp << 8); tmp = (tmp4 >> 16) & 0xFF; tmp += i; if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31)); tmp2 |= (tmp << 16); tmp = (tmp4 >> 24) & 0xFF; tmp += i; if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31)); tmp2 |= (tmp << 24); *((ULong*)(rec + 4)) = tmp2; rec += lx; pred += 16; } while (rec < endcol); return ; } } for (i = 0; i < dctMode; i++) { bmap = (Int)bitmapcol[i]; if (bmap) { if ((bmap&0xf) == 0) (*(idctcolVCA[bmap>>4]))(ptr); else idct_col(ptr); } ptr++; } if ((bitmaprow&0xf) == 0) { if (intra) (*(idctrowVCAIntra[(Int)(bitmaprow>>4)]))(block, rec, lx); else (*(idctrowVCAzmv[(Int)(bitmaprow>>4)]))(block, rec, pred, lx); } else { if (intra) idct_rowIntra(block, rec, lx); else idct_rowzmv(block, rec, pred, lx); } } ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/fastquant.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "mp4enc_lib.h" #include "fastquant_inline.h" #define siz 63 #define LSL 18 const static UChar imask[8] = {128, 64, 32, 16, 8, 4, 2, 1}; #define SIGN0(a) ( ((a)<0) ? -1 : (((a)>0) ? 1 : 0) ) /* variable bit precision quantization scale */ /* used to avoid using 32-bit multiplication */ const static Short scaleArrayV[32] = {0, 16384, 8192, 5462, /* 15 */ 4096, 3277, 2731, 2341, 4096, 3641, 3277, 2979, /* 16 */ 2731, 2521, 2341, 2185, 4096, 3856, 3641, 3450, /* 17 */ 3277, 3121, 2979, 2850, 5462, 5243, 5042, 4855, /* 18 */ 4682, 4520, 4370, 4229 }; /* scale for dc_scaler and qmat, note, no value smaller than 8 */ const static Short scaleArrayV2[47] = {0, 0, 0, 0, 0, 0, 0, 0, /* 15 */ 4096, 3641, 3277, 2979, 2731, 2521, 2341, 2185, 4096, 3856, 3641, 3450, 3277, 3121, 2979, 2850, /* 16 */ 2731, 2622, 2521, 2428, 2341, 2260, 2185, 2115, 4096, 3972, 3856, 3745, 3641, 3543, 3450, 3361, /* 17 */ 3277, 3197, 3121, 3049, 2979, 2913, 2850 }; /* AAN scale and zigzag */ const static Short AANScale[64] = { /* 0 */ 0x1000, 0x0B89, 0x0C3E, 0x0D9B, 0x1000, 0x0A2E, 0x0EC8, 0x0E7F, /* 1 */ 0x0B89, 0x0851, 0x08D4, 0x09CF, 0x0B89, 0x0757, 0x0AA8, 0x0A73, /* 2 */ 0x0C3E, 0x08D4, 0x095F, 0x0A6A, 0x0C3E, 0x07CB, 0x0B50, 0x0B18, /* 3 */ 0x0D9B, 0x09CF, 0x0A6A, 0x0B92, 0x0D9B, 0x08A8, 0x0C92, 0x0C54, /* 4 */ 0x1000, 0x0B89, 0x0C3E, 0x0D9B, 0x1000, 0x0A2E, 0x0EC8, 0x0E7F, /* 5 */ 0x0A2E, 0x0757, 0x07CB, 0x08A8, 0x0A2E, 0x067A, 0x0968, 0x0939, /* 6 */ 0x0EC8, 0x0AA8, 0x0B50, 0x0C92, 0x0EC8, 0x0968, 0x0DA8, 0x0D64, /* 7 */ 0x0E7F, 0x0A73, 0x0B18, 0x0C54, 0x0E7F, 0x0939, 0x0D64, 0x0D23 }; const static UShort ZZTab[64] = { /* 0 */ 0x0, 0x2, 0xA, 0xC, 0x1C, 0x1E, 0x36, 0x38, /* 1 */ 0x4, 0x8, 0xE, 0x1A, 0x20, 0x34, 0x3A, 0x54, /* 2 */ 0x6, 0x10, 0x18, 0x22, 0x32, 0x3C, 0x52, 0x56, /* 3 */ 0x12, 0x16, 0x24, 0x30, 0x3E, 0x50, 0x58, 0x6A, /* 4 */ 0x14, 0x26, 0x2E, 0x40, 0x4E, 0x5A, 0x68, 0x6C, /* 5 */ 0x28, 0x2C, 0x42, 0x4C, 0x5C, 0x66, 0x6E, 0x78, /* 6 */ 0x2A, 0x44, 0x4A, 0x5E, 0x64, 0x70, 0x76, 0x7A, /* 7 */ 0x46, 0x48, 0x60, 0x62, 0x72, 0x74, 0x7C, 0x7E }; //Tao need to remove, write another version of abs //#include /* ======================================================================== */ /* Function : cal_dc_scalerENC */ /* Date : 01/25/2000 */ /* Purpose : calculation of DC quantization scale according to the incoming Q and type; */ /* In/out : */ /* Int Qp Quantizer */ /* Return : */ /* DC Scaler */ /* Modified : */ /* ======================================================================== */ /* ======================================================================== */ Int cal_dc_scalerENC(Int QP, Int type) { Int dc_scaler; if (type == 1) { if (QP > 0 && QP < 5) dc_scaler = 8; else if (QP > 4 && QP < 9) dc_scaler = 2 * QP; else if (QP > 8 && QP < 25) dc_scaler = QP + 8; else dc_scaler = 2 * QP - 16; } else { if (QP > 0 && QP < 5) dc_scaler = 8; else if (QP > 4 && QP < 25) dc_scaler = (QP + 13) / 2; else dc_scaler = QP - 6; } return dc_scaler; } /*********************************************************************** Function: BlckQuantDequantH263 Date: June 15, 1999 Purpose: Combine BlockQuantH263 and BlockDequantH263ENC Input: coeff=> DCT coefficient Output: qcoeff=> quantized coefficient rcoeff=> reconstructed coefficient return CBP for this block 4/2/01, correct dc_scaler for short_header mode. 5/14/01, changed the division into LUT multiplication/shift and other modifications to speed up fastQuant/DeQuant (check for zero 1st, rowq LUT, fast bitmaprow mask and borrowed Addition method instead of ifs from , ). 6/25/01, Further optimization (~100K/QCIF), need more testing/comment before integration. 7/4/01, break up Inter / Intra function and merge for different cases. 7/22/01, combine AAN scaling here and reordering. 7/24/01, , reorder already done in FDCT, the input here is in the next block and it's the transpose of the raster scan. Output the same order (for proof of concenpt). 8/1/01, , change FDCT to do row/column FDCT without reordering, input is still in the next block. The reconstructed DCT output is current block in normal order. The quantized output is in zigzag scan order for INTER, row/column for INTRA. Use bitmapzz for zigzag RunLevel for INTER. The quantization is done in column/row scanning order. 8/2/01, , change IDCT to do column/row, change bitmaprow/col to the opposite. 8/3/01, , add clipping to the reconstructed coefficient [-2047,2047] 9/4/05, , removed scaling for AAN IDCT, use Chen IDCT instead. ********************************************************************/ Int BlockQuantDequantH263Inter(Short *rcoeff, Short *qcoeff, struct QPstruct *QuantParam, UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz, Int dctMode, Int comp, Int dummy, UChar shortHeader) { Int i, zz; Int tmp, coeff, q_value; Int QPdiv2 = QuantParam->QPdiv2; Int QPx2 = QuantParam->QPx2; Int Addition = QuantParam->Addition; Int QPx2plus = QuantParam->QPx2plus; Int round = 1 << 15; Int q_scale = scaleArrayV[QuantParam->QP]; Int shift = 15 + (QPx2 >> 4); Int *temp; UChar *bcolptr = bitmapcol; Int ac_clip; /* quantized coeff bound */ OSCL_UNUSED_ARG(comp); OSCL_UNUSED_ARG(dummy); if (shortHeader) ac_clip = 126; /* clip between [-127,126] (standard allows 127!) */ else ac_clip = 2047; /* clip between [-2048,2047] */ /* reset all bitmap to zero */ temp = (Int*) bitmapcol; temp[0] = temp[1] = 0; bitmapzz[0] = bitmapzz[1] = 0; *bitmaprow = 0; QPx2plus <<= 4; QPx2plus -= 8; rcoeff += 64; /* actual data is 64 item ahead */ //end = rcoeff + dctMode - 1; //rcoeff--; bcolptr--; i = 0; do { bcolptr++; //rcoeff++; //i=0; coeff = rcoeff[i]; if (coeff == 0x7fff) /* all zero column */ { i++; continue; } do { if (coeff >= -QPx2plus && coeff < QPx2plus) /* quantize to zero */ { i += 8; if (i < (dctMode << 3)) { coeff = rcoeff[i]; if (coeff > -QPx2plus && coeff < QPx2plus) /* quantize to zero */ { i += 8; coeff = rcoeff[i]; continue; } else goto NONZERO1; } } else { NONZERO1: /* scaling */ q_value = AANScale[i]; /* load scale AAN */ zz = ZZTab[i]; /* zigzag order */ coeff = aan_scale(q_value, coeff, round, QPdiv2); q_value = coeff_quant(coeff, q_scale, shift); /* dequantization */ if (q_value) { //coeff = PV_MIN(ac_clip,PV_MAX(-ac_clip-1, q_value)); q_value = coeff_clip(q_value, ac_clip); qcoeff[zz>>1] = q_value; // dequant and clip //coeff = PV_MIN(2047,PV_MAX(-2048, q_value)); tmp = 2047; coeff = coeff_dequant(q_value, QPx2, Addition, tmp); rcoeff[i-64] = coeff; (*bcolptr) |= imask[i>>3]; if ((zz >> 1) > 31) bitmapzz[1] |= (1 << (63 - (zz >> 1))); else bitmapzz[0] |= (1 << (31 - (zz >> 1))); } i += 8; coeff = rcoeff[i]; } } while (i < (dctMode << 3)); i += (1 - (dctMode << 3)); } while (i < dctMode) ; i = dctMode; tmp = 1 << (8 - i); while (i--) { if (bitmapcol[i])(*bitmaprow) |= tmp; tmp <<= 1; } if (*bitmaprow) return 1; else return 0; } Int BlockQuantDequantH263Intra(Short *rcoeff, Short *qcoeff, struct QPstruct *QuantParam, UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz, Int dctMode, Int comp, Int dc_scaler, UChar shortHeader) { Int i; Int tmp, coeff, q_value; Int QPx2 = QuantParam->QPx2; Int Addition = QuantParam->Addition; Int QPx2plus = QuantParam->QPx2plus; Int round = 1 << 15; Int q_scale = scaleArrayV[QuantParam->QP]; Int shift = 15 + (QPx2 >> 4); UChar *bmcolptr = bitmapcol; Int ac_clip; /* quantized coeff bound */ OSCL_UNUSED_ARG(bitmapzz); OSCL_UNUSED_ARG(comp); if (shortHeader) ac_clip = 126; /* clip between [-127,126] (standard allows 127!) */ else ac_clip = 2047; /* clip between [-2048,2047] */ *((Int*)bitmapcol) = *((Int*)(bitmapcol + 4)) = 0; *bitmaprow = 0; QPx2plus = QPx2 << 4; QPx2plus -= 8; rcoeff += 64; /* actual data is 64 element ahead */ i = 0; /* DC value */ coeff = *rcoeff; /* scaling */ if (coeff == 0x7fff && !shortHeader) /* all zero column */ { bmcolptr++; i++; } else { if (coeff == 0x7fff) /* shortHeader on */ { coeff = 1; /* can't be zero */ qcoeff[0] = coeff; coeff = coeff * dc_scaler; coeff = PV_MAX(-2048, PV_MIN(2047, coeff)); rcoeff[-64] = coeff; bitmapcol[0] |= 128; bmcolptr++; //qcoeff++; //rcoeff++; //i=0; i++; } else { q_value = round + (coeff << 12); coeff = q_value >> 16; if (coeff >= 0) coeff += (dc_scaler >> 1) ; else coeff -= (dc_scaler >> 1) ; q_value = scaleArrayV2[dc_scaler]; coeff = coeff * q_value; coeff >>= (15 + (dc_scaler >> 4)); coeff += ((UInt)coeff >> 31); if (shortHeader) coeff = PV_MAX(1, PV_MIN(254, coeff)); if (coeff) { qcoeff[0] = coeff; coeff = coeff * dc_scaler; coeff = PV_MAX(-2048, PV_MIN(2047, coeff)); rcoeff[-64] = coeff; bitmapcol[0] |= 128; } i += 8; } } /* AC values */ do { coeff = rcoeff[i]; if (coeff == 0x7fff) /* all zero row */ { bmcolptr++; i++; continue; } do { if (coeff >= -QPx2plus && coeff < QPx2plus) /* quantize to zero */ { i += 8; if (i < dctMode << 3) { coeff = rcoeff[i]; if (coeff > -QPx2plus && coeff < QPx2plus) /* quantize to zero */ { i += 8; coeff = rcoeff[i]; continue; } else goto NONZERO2; } } else { NONZERO2: /* scaling */ q_value = AANScale[i]; /* 09/02/05 */ /* scale aan */ q_value = smlabb(q_value, coeff, round); coeff = q_value >> 16; /* quant */ q_value = smulbb(q_scale, coeff); /*mov q_value, coeff, lsl #14 */ /*smull tmp, coeff, q_value, q_scale*/ q_value >>= shift; q_value += ((UInt)q_value >> 31); /* add 1 if negative */ if (q_value) { //coeff = PV_MIN(ac_clip,PV_MAX(-ac_clip-1, q_value)); q_value = coeff_clip(q_value, ac_clip); qcoeff[i] = q_value; // dequant and clip //coeff = PV_MIN(2047,PV_MAX(-2048, q_value)); tmp = 2047; coeff = coeff_dequant(q_value, QPx2, Addition, tmp); rcoeff[i-64] = coeff; (*bmcolptr) |= imask[i>>3]; } i += 8; coeff = rcoeff[i]; } } while (i < (dctMode << 3)) ; //qcoeff++; /* next column */ bmcolptr++; //rcoeff++; i += (1 - (dctMode << 3)); //i = 0; } while (i < dctMode);//while(rcoeff < end) ; i = dctMode; tmp = 1 << (8 - i); while (i--) { if (bitmapcol[i])(*bitmaprow) |= tmp; tmp <<= 1; } if (((*bitmaprow)&127) || (bitmapcol[0]&127)) /* exclude DC */ return 1; else return 0; } /*********************************************************************** Function: BlckQuantDequantH263DC Date: 5/3/2001 Purpose: H.263 quantization mode, only for DC component 6/25/01, Further optimization (~100K/QCIF), need more testing/comment before integration. ********************************************************************/ Int BlockQuantDequantH263DCInter(Short *rcoeff, Short *qcoeff, struct QPstruct *QuantParam, UChar *bitmaprow, UInt *bitmapzz, Int dummy, UChar shortHeader) { Int coeff, scale_q; Int CBP = 0; Int QP = QuantParam->QP; Int QPx2plus = QuantParam->QPx2plus; Int Addition = QuantParam->Addition; Int shift = 15 + (QP >> 3); Int ac_clip; /* quantized coeff bound */ Int tmp; OSCL_UNUSED_ARG(dummy); if (shortHeader) ac_clip = 126; /* clip between [-127,126] (standard allows 127!) */ else ac_clip = 2047; /* clip between [-2048,2047] */ *bitmaprow = 0; bitmapzz[0] = bitmapzz[1] = 0; coeff = rcoeff[0]; if (coeff >= -QPx2plus && coeff < QPx2plus) { rcoeff[0] = 0; return CBP;//rcoeff[0] = 0; not needed since CBP will be zero } else { scale_q = scaleArrayV[QP]; coeff = aan_dc_scale(coeff, QP); scale_q = coeff_quant(coeff, scale_q, shift); //coeff = PV_MIN(ac_clip,PV_MAX(-ac_clip-1, tmp)); scale_q = coeff_clip(scale_q, ac_clip); qcoeff[0] = scale_q; QP <<= 1; //coeff = PV_MIN(2047,PV_MAX(-2048, tmp)); tmp = 2047; coeff = coeff_dequant(scale_q, QP, Addition, tmp); rcoeff[0] = coeff; (*bitmaprow) = 128; bitmapzz[0] = (ULong)1 << 31; CBP = 1; } return CBP; } Int BlockQuantDequantH263DCIntra(Short *rcoeff, Short *qcoeff, struct QPstruct *QuantParam, UChar *bitmaprow, UInt *bitmapzz, Int dc_scaler, UChar shortHeader) { Int tmp, coeff; OSCL_UNUSED_ARG(QuantParam); *bitmaprow = 0; coeff = rcoeff[0]; if (coeff >= 0) coeff += (dc_scaler >> 1) ; else coeff -= (dc_scaler >> 1) ; tmp = scaleArrayV2[dc_scaler]; tmp = coeff * tmp; tmp >>= (15 + (dc_scaler >> 4)); tmp += ((UInt)tmp >> 31); if (shortHeader) tmp = PV_MAX(1, PV_MIN(254, tmp)); if (tmp) { qcoeff[0] = tmp; coeff = tmp * dc_scaler; coeff = PV_MAX(-2048, PV_MIN(2047, coeff)); rcoeff[0] = coeff; *bitmaprow = 128; bitmapzz[0] = (ULong)1 << 31; } return 0; } #ifndef NO_MPEG_QUANT /*********************************************************************** Function: BlckQuantDequantMPEG Date: June 15, 1999 Purpose: Combine BlockQuantMPEG and BlockDequantMPEGENC Input: coeff=> DCT coefficient Output: qcoeff=> quantized coefficient rcoeff=> reconstructed coefficient Modified: 7/5/01, break up function for Intra/Inter 8/3/01, update with changes from H263 quant mode. 8/3/01, add clipping to the reconstructed coefficient [-2048,2047] 8/6/01, optimize using multiplicative lookup-table. can be further optimized using ARM assembly, e.g., clipping, 16-bit mult., etc !!!!!!!!!!!!! ********************************************************************/ Int BlockQuantDequantMPEGInter(Short *rcoeff, Short *qcoeff, Int QP, Int *qmat, UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz, Int dctMode, Int comp, Int dc_scaler) { Int i, zz; Int tmp, coeff, q_value = 0; Int sum = 0; Int stepsize, QPx2 = QP << 1; Int CBP = 0; Int round = 1 << 15; Int q_scale = scaleArrayV[QP]; Int shift = 15 + (QP >> 3); UChar *bcolptr = bitmapcol; OSCL_UNUSED_ARG(dc_scaler); OSCL_UNUSED_ARG(comp); *((Int*)bitmapcol) = *((Int*)(bitmapcol + 4)) = 0; bitmapzz[0] = bitmapzz[1] = 0; *bitmaprow = 0; rcoeff += 64; i = 0; bcolptr--; do { bcolptr++; coeff = rcoeff[i]; if (coeff == 0x7fff) /* all zero column */ { i++; continue; } do { q_value = AANScale[i]; /* 09/02/05 scaling for AAN*/ /* aan scaling */ q_value = smlabb(q_value, coeff, round); coeff = q_value >> 16; stepsize = qmat[i]; // if(coeff>0) coeff = (16*coeff + (stepsize/2)) / stepsize; // else coeff = (16*coeff - (stepsize/2)) / stepsize; coeff <<= 4; if (coeff >= 0) coeff += (stepsize >> 1) ; else coeff -= (stepsize >> 1) ; q_value = scaleArrayV2[stepsize]; /* mpeg quant table scale */ coeff = smulbb(coeff, q_value); coeff >>= (15 + (stepsize >> 4)); coeff += ((UInt)coeff >> 31); /* QP scale */ if (coeff >= -QPx2 && coeff < QPx2) /* quantized to zero*/ { i += 8; } else { // q_value = coeff/(QPx2); q_value = coeff_quant(coeff, q_scale, shift); if (q_value) /* dequant */ { zz = ZZTab[i]; /* zigzag order */ tmp = 2047; q_value = clip_2047(q_value, tmp); qcoeff[zz>>1] = q_value; //q_value=(((coeff*2)+SIGN0(coeff))*stepsize*QP)/16; /* no need for SIGN0, no zero coming in this {} */ q_value = coeff_dequant_mpeg(q_value, stepsize, QP, tmp); rcoeff[i-64] = q_value; sum += q_value; (*bcolptr) |= imask[i>>3]; if ((zz >> 1) > 31) bitmapzz[1] |= (1 << (63 - (zz >> 1))); else bitmapzz[0] |= (1 << (31 - (zz >> 1))); } i += 8; } coeff = rcoeff[i]; } while (i < (dctMode << 3)) ; i += (1 - (dctMode << 3)); } while (i < dctMode) ; i = dctMode; tmp = 1 << (8 - i); while (i--) { if (bitmapcol[i])(*bitmaprow) |= tmp; tmp <<= 1; } if (*bitmaprow) CBP = 1; /* check CBP before mismatch control, 7/5/01 */ /* Mismatch control, 5/3/01 */ if (CBP) { if ((sum&0x1) == 0) { rcoeff--; /* rcoeff[63] */ coeff = *rcoeff; coeff ^= 0x1; *rcoeff = coeff; if (coeff) { bitmapcol[7] |= 1; (*bitmaprow) |= 1; } } } return CBP; } Int BlockQuantDequantMPEGIntra(Short *rcoeff, Short *qcoeff, Int QP, Int *qmat, UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz, Int dctMode, Int comp, Int dc_scaler) { Int i; Int tmp, coeff, q_value = 0; Int sum = 0; Int stepsize; Int CBP = 0; Int round = 1 << 15; Int q_scale = scaleArrayV[QP]; Int shift = 15 + (QP >> 3); Int round2 = (3 * QP + 2) >> 2; Int QPx2plus = (QP << 1) - round2; UChar *bmcolptr = bitmapcol; OSCL_UNUSED_ARG(bitmapzz); OSCL_UNUSED_ARG(comp); *((Int*)bitmapcol) = *((Int*)(bitmapcol + 4)) = 0; *bitmaprow = 0; rcoeff += 64; i = 0; /* DC value */ coeff = *rcoeff; if (coeff == 0x7fff) /* all zero column */ { bmcolptr++; i++; } else { q_value = round + (coeff << 12); coeff = q_value >> 16; /*if (coeff >= 0) coeff = (coeff + (dc_scaler/2)) / dc_scaler; else coeff = (coeff - (dc_scaler/2)) / dc_scaler;*/ if (coeff >= 0) coeff += (dc_scaler >> 1) ; else coeff -= (dc_scaler >> 1) ; q_value = scaleArrayV2[dc_scaler]; /* mpeg quant table scale */ coeff = smulbb(coeff, q_value); coeff >>= (15 + (dc_scaler >> 4)); coeff += ((UInt)coeff >> 31); if (coeff) { coeff = PV_MAX(1, PV_MIN(254, coeff)); qcoeff[0] = coeff; coeff = smulbb(coeff, dc_scaler); q_value = clip_2047(coeff, 2047); sum = q_value; rcoeff[-64] = q_value; bitmapcol[0] |= 128; } i += 8; } /* AC values */ do { coeff = rcoeff[i]; if (coeff == 0x7fff) /* all zero row */ { bmcolptr++; i++; continue; } do { /* scaling */ q_value = AANScale[i]; /* 09/02/05 */ /* q_value = coeff*q_value + round */ q_value = smlabb(coeff, q_value, round); coeff = q_value >> 16; stepsize = qmat[i]; /*if(coeff>0) coeff = (16*coeff + (stepsize/2)) / stepsize; else coeff = (16*coeff - (stepsize/2)) / stepsize;*/ coeff <<= 4; if (coeff >= 0) coeff += (stepsize >> 1) ; else coeff -= (stepsize >> 1) ; q_value = scaleArrayV2[stepsize]; /* scale mpeg quant */ coeff = smulbb(coeff, q_value); coeff >>= (15 + (stepsize >> 4)); coeff += ((UInt)coeff >> 31); if (coeff >= -QPx2plus && coeff < QPx2plus) { i += 8; } else { //q_value = ( coeff + SIGN0(coeff)*((3*QP+2)/4))/(2*QP); if (coeff > 0) coeff += round2; else if (coeff < 0) coeff -= round2; q_value = smulbb(coeff, q_scale); q_value >>= shift; q_value += ((UInt)q_value >> 31); if (q_value) { tmp = 2047; q_value = clip_2047(q_value, tmp); qcoeff[i] = q_value; stepsize = smulbb(stepsize, QP); q_value = smulbb(q_value, stepsize); q_value = coeff_dequant_mpeg_intra(q_value, tmp); //q_value = (coeff*stepsize*QP*2)/16; rcoeff[i-64] = q_value; sum += q_value; (*bmcolptr) |= imask[i>>3]; } i += 8; } coeff = rcoeff[i]; } while (i < (dctMode << 3)) ; bmcolptr++; i += (1 - (dctMode << 3)); } while (i < dctMode) ; i = dctMode; tmp = 1 << (8 - i); while (i--) { if (bitmapcol[i])(*bitmaprow) |= tmp; tmp <<= 1; } if (((*bitmaprow) &127) || (bitmapcol[0]&127)) CBP = 1; /* check CBP before mismatch control, 7/5/01 */ /* Mismatch control, 5/3/01 */ if (CBP || bitmapcol[0]) { if ((sum&0x1) == 0) { rcoeff--; /* rcoeff[63] */ coeff = *rcoeff; coeff ^= 0x1; *rcoeff = coeff; if (coeff) { bitmapcol[7] |= 1; (*bitmaprow) |= 1; } } } return CBP; } /*********************************************************************** Function: BlckQuantDequantMPEGDC Date: 5/3/2001 Purpose: MPEG Quant/Dequant for DC only block. ********************************************************************/ Int BlockQuantDequantMPEGDCInter(Short *rcoeff, Short *qcoeff, Int QP, Int *qmat, UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz, Int dummy) { Int q_value, coeff, stepsize; Int CBP = 0; Int q_scale = scaleArrayV[QP]; Int shift = 15 + (QP >> 3); Int QPx2 = QP << 1; OSCL_UNUSED_ARG(dummy); *((Int*)bitmapcol) = *((Int*)(bitmapcol + 4)) = 0; *bitmaprow = 0; bitmapzz[0] = bitmapzz[1] = 0; coeff = rcoeff[0]; stepsize = qmat[0]; /*if(coeff>0) coeff = (16*coeff + (stepsize/2)) / stepsize; else coeff = (16*coeff - (stepsize/2)) / stepsize;*/ coeff <<= 4; if (coeff >= 0) coeff += (stepsize >> 1) ; else coeff -= (stepsize >> 1) ; q_value = scaleArrayV2[stepsize]; coeff = smulbb(coeff, q_value); coeff >>= (15 + (stepsize >> 4)); coeff += ((UInt)coeff >> 31); if (coeff >= -QPx2 && coeff < QPx2) { rcoeff[0] = 0; return CBP; } else { // q_value = coeff/(QPx2); q_value = coeff_quant(coeff, q_scale, shift); if (q_value) { //PV_MIN(2047,PV_MAX(-2048, q_value)); q_value = clip_2047(q_value, 2047); qcoeff[0] = q_value; q_value = coeff_dequant_mpeg(q_value, stepsize, QP, 2047); //q_value=(((coeff*2)+SIGN0(coeff))*stepsize*QP)/16; rcoeff[0] = q_value; bitmapcol[0] = 128; (*bitmaprow) = 128; bitmapzz[0] = (UInt)1 << 31; CBP = 1; /* Mismatch control, 5/3/01 */ if ((q_value&0x1) == 0) { rcoeff[63] = 1; /* after scaling it remains the same */ bitmapcol[7] |= 1; (*bitmaprow) |= 1; } } } return CBP; } Int BlockQuantDequantMPEGDCIntra(Short *rcoeff, Short *qcoeff, Int QP, Int *qmat, UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz, Int dc_scaler) { Int tmp, coeff, q_value; OSCL_UNUSED_ARG(QP); OSCL_UNUSED_ARG(qmat); *((Int*)bitmapcol) = *((Int*)(bitmapcol + 4)) = 0; *bitmaprow = 0; coeff = rcoeff[0]; /*if (coeff >= 0) tmp = (coeff + dc_scaler/2) / dc_scaler; else tmp = (coeff - dc_scaler/2) / dc_scaler;*/ if (coeff >= 0) coeff += (dc_scaler >> 1) ; else coeff -= (dc_scaler >> 1) ; tmp = scaleArrayV2[dc_scaler]; tmp = smulbb(tmp, coeff); tmp >>= (15 + (dc_scaler >> 4)); tmp += ((UInt)tmp >> 31); if (tmp) { coeff = PV_MAX(1, PV_MIN(254, tmp)); qcoeff[0] = coeff; q_value = smulbb(coeff, dc_scaler); q_value = clip_2047(q_value, 2047); rcoeff[0] = q_value; bitmapcol[0] = 128; *bitmaprow = 128; bitmapzz[0] = (UInt)1 << 31; /* Mismatch control, 5/3/01 */ if ((q_value&0x1) == 0) { rcoeff[63] = 1; /* after scaling it remains the same */ bitmapcol[7] |= 1; (*bitmaprow) |= 1; } } return 0; } #endif ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/fastquant_inline.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /*********************************************************************************/ /* Filename: fastquant_inline.h */ /* Description: Implementation for in-line functions used in dct.cpp */ /* Modified: */ /*********************************************************************************/ #ifndef _FASTQUANT_INLINE_H_ #define _FASTQUANT_INLINE_H_ #include "mp4def.h" #include "oscl_base_macros.h" #if !defined(PV_ARM_GCC_V5) /* ARM GNU COMPILER */ __inline int32 aan_scale(int32 q_value, int32 coeff, int32 round, int32 QPdiv2) { q_value = coeff * q_value + round; coeff = q_value >> 16; if (coeff < 0) coeff += QPdiv2; else coeff -= QPdiv2; return coeff; } __inline int32 coeff_quant(int32 coeff, int32 q_scale, int32 shift) { int32 q_value; q_value = coeff * q_scale; //q_value = -((-(coeff + QPdiv2)*q_scale)>>LSL); q_value >>= shift; //q_value = (((coeff - QPdiv2)*q_scale)>>LSL ); q_value += ((UInt)q_value >> 31); /* add one if negative */ return q_value; } __inline int32 coeff_clip(int32 q_value, int32 ac_clip) { int32 coeff = q_value + ac_clip; if ((UInt)coeff > (UInt)(ac_clip << 1)) q_value = ac_clip ^(q_value >> 31); return q_value; } __inline int32 coeff_dequant(int32 q_value, int32 QPx2, int32 Addition, int32 tmp) { int32 coeff; OSCL_UNUSED_ARG(tmp); if (q_value < 0) { coeff = q_value * QPx2 - Addition; if (coeff < -2048) coeff = -2048; } else { coeff = q_value * QPx2 + Addition; if (coeff > 2047) coeff = 2047; } return coeff; } __inline int32 smlabb(int32 q_value, int32 coeff, int32 round) { q_value = coeff * q_value + round; return q_value; } __inline int32 smulbb(int32 q_scale, int32 coeff) { int32 q_value; q_value = coeff * q_scale; return q_value; } __inline int32 aan_dc_scale(int32 coeff, int32 QP) { if (coeff < 0) coeff += (QP >> 1); else coeff -= (QP >> 1); return coeff; } __inline int32 clip_2047(int32 q_value, int32 tmp) { OSCL_UNUSED_ARG(tmp); if (q_value < -2048) { q_value = -2048; } else if (q_value > 2047) { q_value = 2047; } return q_value; } __inline int32 coeff_dequant_mpeg(int32 q_value, int32 stepsize, int32 QP, int32 tmp) { int32 coeff; OSCL_UNUSED_ARG(tmp); coeff = q_value << 1; stepsize *= QP; if (coeff > 0) { q_value = (coeff + 1) * stepsize; q_value >>= 4; if (q_value > 2047) q_value = 2047; } else { q_value = (coeff - 1) * stepsize; q_value += 15; q_value >>= 4; if (q_value < -2048) q_value = -2048; } return q_value; } __inline int32 coeff_dequant_mpeg_intra(int32 q_value, int32 tmp) { OSCL_UNUSED_ARG(tmp); q_value <<= 1; if (q_value > 0) { q_value >>= 4; if (q_value > 2047) q_value = 2047; } else { q_value += 15; q_value >>= 4; if (q_value < -2048) q_value = -2048; } return q_value; } #elif defined(__CC_ARM) /* only work with arm v5 */ #if defined(__TARGET_ARCH_5TE) __inline int32 aan_scale(int32 q_value, int32 coeff, int32 round, int32 QPdiv2) { __asm { smlabb q_value, coeff, q_value, round movs coeff, q_value, asr #16 addle coeff, coeff, QPdiv2 subgt coeff, coeff, QPdiv2 } return coeff; } __inline int32 coeff_quant(int32 coeff, int32 q_scale, int32 shift) { int32 q_value; __asm { smulbb q_value, q_scale, coeff /*mov coeff, coeff, lsl #14*/ mov coeff, q_value, asr shift /*smull tmp, coeff, q_scale, coeff*/ add q_value, coeff, coeff, lsr #31 } return q_value; } __inline int32 coeff_dequant(int32 q_value, int32 QPx2, int32 Addition, int32 tmp) { int32 coeff; __asm { cmp q_value, #0 smulbb coeff, q_value, QPx2 sublt coeff, coeff, Addition addge coeff, coeff, Addition add q_value, coeff, tmp subs q_value, q_value, #3840 subcss q_value, q_value, #254 eorhi coeff, tmp, coeff, asr #31 } return coeff; } __inline int32 smlabb(int32 q_value, int32 coeff, int32 round) { __asm { smlabb q_value, coeff, q_value, round } return q_value; } __inline int32 smulbb(int32 q_scale, int32 coeff) { int32 q_value; __asm { smulbb q_value, q_scale, coeff } return q_value; } __inline int32 coeff_dequant_mpeg(int32 q_value, int32 stepsize, int32 QP, int32 tmp) { /* tmp must have value of 2047 */ int32 coeff; __asm { movs coeff, q_value, lsl #1 smulbb stepsize, stepsize, QP addgt coeff, coeff, #1 sublt coeff, coeff, #1 smulbb q_value, coeff, stepsize addlt q_value, q_value, #15 mov q_value, q_value, asr #4 add coeff, q_value, tmp subs coeff, coeff, #0xf00 subcss coeff, coeff, #0xfe eorhi q_value, tmp, q_value, asr #31 } return q_value; } #else // not ARMV5TE __inline int32 aan_scale(int32 q_value, int32 coeff, int32 round, int32 QPdiv2) { __asm { mla q_value, coeff, q_value, round movs coeff, q_value, asr #16 addle coeff, coeff, QPdiv2 subgt coeff, coeff, QPdiv2 } return coeff; } __inline int32 coeff_quant(int32 coeff, int32 q_scale, int32 shift) { int32 q_value; __asm { mul q_value, q_scale, coeff /*mov coeff, coeff, lsl #14*/ mov coeff, q_value, asr shift /*smull tmp, coeff, q_scale, coeff*/ add q_value, coeff, coeff, lsr #31 } return q_value; } __inline int32 coeff_dequant(int32 q_value, int32 QPx2, int32 Addition, int32 tmp) { int32 coeff; __asm { cmp q_value, #0 mul coeff, q_value, QPx2 sublt coeff, coeff, Addition addge coeff, coeff, Addition add q_value, coeff, tmp subs q_value, q_value, #3840 subcss q_value, q_value, #254 eorhi coeff, tmp, coeff, asr #31 } return coeff; } __inline int32 smlabb(int32 q_value, int32 coeff, int32 round) { __asm { mla q_value, coeff, q_value, round } return q_value; } __inline int32 smulbb(int32 q_scale, int32 coeff) { int32 q_value; __asm { mul q_value, q_scale, coeff } return q_value; } __inline int32 coeff_dequant_mpeg(int32 q_value, int32 stepsize, int32 QP, int32 tmp) { /* tmp must have value of 2047 */ int32 coeff; __asm { movs coeff, q_value, lsl #1 mul stepsize, stepsize, QP addgt coeff, coeff, #1 sublt coeff, coeff, #1 mul q_value, coeff, stepsize addlt q_value, q_value, #15 mov q_value, q_value, asr #4 add coeff, q_value, tmp subs coeff, coeff, #0xf00 subcss coeff, coeff, #0xfe eorhi q_value, tmp, q_value, asr #31 } return q_value; } #endif __inline int32 coeff_clip(int32 q_value, int32 ac_clip) { int32 coeff; __asm { add coeff, q_value, ac_clip subs coeff, coeff, ac_clip, lsl #1 eorhi q_value, ac_clip, q_value, asr #31 } return q_value; } __inline int32 aan_dc_scale(int32 coeff, int32 QP) { __asm { cmp coeff, #0 addle coeff, coeff, QP, asr #1 subgt coeff, coeff, QP, asr #1 } return coeff; } __inline int32 clip_2047(int32 q_value, int32 tmp) { /* tmp must have value of 2047 */ int32 coeff; __asm { add coeff, q_value, tmp subs coeff, coeff, #0xf00 subcss coeff, coeff, #0xfe eorhi q_value, tmp, q_value, asr #31 } return q_value; } __inline int32 coeff_dequant_mpeg_intra(int32 q_value, int32 tmp) { int32 coeff; __asm { movs q_value, q_value, lsl #1 addlt q_value, q_value, #15 mov q_value, q_value, asr #4 add coeff, q_value, tmp subs coeff, coeff, #0xf00 subcss coeff, coeff, #0xfe eorhi q_value, tmp, q_value, asr #31 } return q_value; } #elif (defined(PV_ARM_GCC_V5)) /* ARM GNU COMPILER */ __inline int32 aan_scale(int32 q_value, int32 coeff, int32 round, int32 QPdiv2) { register int32 out; register int32 qv = q_value; register int32 cf = coeff; register int32 rr = round; register int32 qp = QPdiv2; asm volatile("smlabb %0, %2, %1, %3\n\t" "movs %0, %0, asr #16\n\t" "addle %0, %0, %4\n\t" "subgt %0, %0, %4" : "=&r"(out) : "r"(qv), "r"(cf), "r"(rr), "r"(qp)); return out; } __inline int32 coeff_quant(int32 coeff, int32 q_scale, int32 shift) { register int32 out; register int32 temp1; register int32 cc = coeff; register int32 qs = q_scale; register int32 ss = shift; asm volatile("smulbb %0, %3, %2\n\t" "mov %1, %0, asr %4\n\t" "add %0, %1, %1, lsr #31" : "=&r"(out), "=&r"(temp1) : "r"(cc), "r"(qs), "r"(ss)); return out; } __inline int32 coeff_clip(int32 q_value, int32 ac_clip) { register int32 coeff; asm volatile("add %1, %0, %2\n\t" "subs %1, %1, %2, lsl #1\n\t" "eorhi %0, %2, %0, asr #31" : "+r"(q_value), "=&r"(coeff) : "r"(ac_clip)); return q_value; } __inline int32 coeff_dequant(int32 q_value, int32 QPx2, int32 Addition, int32 tmp) { register int32 out; register int32 temp1; register int32 qv = q_value; register int32 qp = QPx2; register int32 aa = Addition; register int32 tt = tmp; asm volatile("cmp %2, #0\n\t" "mul %0, %2, %3\n\t" "sublt %0, %0, %4\n\t" "addge %0, %0, %4\n\t" "add %1, %0, %5\n\t" "subs %1, %1, #3840\n\t" "subcss %1, %1, #254\n\t" "eorhi %0, %5, %0, asr #31" : "=&r"(out), "=&r"(temp1) : "r"(qv), "r"(qp), "r"(aa), "r"(tt)); return out; } __inline int32 smlabb(int32 q_value, int32 coeff, int32 round) { register int32 out; register int32 aa = (int32)q_value; register int32 bb = (int32)coeff; register int32 cc = (int32)round; asm volatile("smlabb %0, %1, %2, %3" : "=&r"(out) : "r"(aa), "r"(bb), "r"(cc)); return out; } __inline int32 smulbb(int32 q_scale, int32 coeff) { register int32 out; register int32 aa = (int32)q_scale; register int32 bb = (int32)coeff; asm volatile("smulbb %0, %1, %2" : "=&r"(out) : "r"(aa), "r"(bb)); return out; } __inline int32 aan_dc_scale(int32 coeff, int32 QP) { register int32 out; register int32 cc = coeff; register int32 qp = QP; asm volatile("cmp %1, #0\n\t" "addle %0, %1, %2, asr #1\n\t" "subgt %0, %1, %2, asr #1" : "=&r"(out) : "r"(cc), "r"(qp)); return out; } __inline int32 clip_2047(int32 q_value, int32 tmp) { register int32 coeff; asm volatile("add %1, %0, %2\n\t" "subs %1, %1, #0xF00\n\t" "subcss %1, %1, #0xFE\n\t" "eorhi %0, %2, %0, asr #31" : "+r"(q_value), "=&r"(coeff) : "r"(tmp)); return q_value; } __inline int32 coeff_dequant_mpeg(int32 q_value, int32 stepsize, int32 QP, int32 tmp) { register int32 out; register int32 temp1; register int32 qv = q_value; register int32 ss = stepsize; register int32 qp = QP; register int32 tt = tmp; asm volatile("movs %1, %2, lsl #1\n\t" "mul %0, %3, %4\n\t" "addgt %1, %1, #1\n\t" "sublt %1, %1, #1\n\t" "mul %0, %1, %0\n\t" "addlt %0, %0, #15\n\t" "mov %0, %0, asr #4\n\t" "add %1, %0, %5\n\t" "subs %1, %1, #0xF00\n\t" "subcss %1, %1, #0xFE\n\t" "eorhi %0, %5, %0, asr #31" : "=&r"(out), "=&r"(temp1) : "r"(qv), "r"(ss), "r"(qp), "r"(tt)); return out; } __inline int32 coeff_dequant_mpeg_intra(int32 q_value, int32 tmp) { register int32 out; register int32 temp1; register int32 qv = q_value; register int32 tt = tmp; asm volatile("movs %1, %2, lsl #1\n\t" "addlt %1, %1, #15\n\t" "mov %0, %1, asr #4\n\t" "add %1, %0, %3\n\t" "subs %1, %1, #0xF00\n\t" "subcss %1, %1, #0xFE\n\t" "eorhi %0, %3, %0, asr #31" : "=&r"(out), "=&r"(temp1) : "r"(qv), "r"(tt)); return out; } #endif // Platform #endif //_FASTQUANT_INLINE_H_ ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/findhalfpel.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "mp4def.h" #include "mp4enc_lib.h" #include "mp4lib_int.h" #include "m4venc_oscl.h" /* 3/29/01 fast half-pel search based on neighboring guess */ /* value ranging from 0 to 4, high complexity (more accurate) to low complexity (less accurate) */ #define HP_DISTANCE_TH 2 /* half-pel distance threshold */ #define PREF_16_VEC 129 /* 1MV bias versus 4MVs*/ #ifdef __cplusplus extern "C" { #endif void GenerateSearchRegion(UChar *searchPadding, UChar *ref, Int width, Int height, Int ilow, Int ihigh, Int jlow, Int jhigh); void InterpDiag(UChar *prev, Int lx, UChar *pred_block); void InterpHorz(UChar *prev, Int lx, UChar *pred_block); void InterpVert(UChar *prev, Int lx, UChar *pred_block); #ifdef __cplusplus } #endif const static Int distance_tab[9][9] = /* [hp_guess][k] */ { {0, 1, 1, 1, 1, 1, 1, 1, 1}, {1, 0, 1, 2, 3, 4, 3, 2, 1}, {1, 0, 0, 0, 1, 2, 3, 2, 1}, {1, 2, 1, 0, 1, 2, 3, 4, 3}, {1, 2, 1, 0, 0, 0, 1, 2, 3}, {1, 4, 3, 2, 1, 0, 1, 2, 3}, {1, 2, 3, 2, 1, 0, 0, 0, 1}, {1, 2, 3, 4, 3, 2, 1, 0, 1}, {1, 0, 1, 2, 3, 2, 1, 0, 0} }; /*===================================================================== Function: FindHalfPelMB Date: 10/7/2000 Purpose: Find half pel resolution MV surrounding the full-pel MV =====================================================================*/ void FindHalfPelMB(VideoEncData *video, UChar *cur, MOT *mot, UChar *ncand, Int xpos, Int ypos, Int *xhmin, Int *yhmin, Int hp_guess) { // hp_mem = ULong *vertArray; /* 20x17 */ // ULong *horzArray; /* 20x16 */ // ULong *diagArray; /* 20x17 */ Int dmin, d; Int xh, yh; Int k, kmin = 0; Int imin, jmin, ilow, jlow; Int h263_mode = video->encParams->H263_Enabled; /* 3/29/01 */ Int in_range[9] = {0, 1, 1, 1, 1, 1, 1, 1, 1}; /* 3/29/01 */ Int range = video->encParams->SearchRange; Int lx = video->currVop->pitch; Int width = video->currVop->width; /* padding */ Int height = video->vol[video->currLayer]->height; Int(**SAD_MB_HalfPel)(UChar*, UChar*, Int, void*) = video->functionPointer->SAD_MB_HalfPel; void *extra_info = video->sad_extra_info; Int next_hp_pos[9][2] = {{0, 0}, {2, 0}, {1, 1}, {0, 2}, { -1, 1}, { -2, 0}, { -1, -1}, {0, -2}, {0, -1}}; Int next_ncand[9] = {0, 1 , lx, lx, 0, -1, -1, -lx, -lx}; cur = video->currYMB; /**************** check range ***************************/ /* 3/29/01 */ imin = xpos + (mot[0].x >> 1); jmin = ypos + (mot[0].y >> 1); ilow = xpos - range; jlow = ypos - range; if (!h263_mode) { if (imin <= -15 || imin == ilow) in_range[1] = in_range[7] = in_range[8] = 0; else if (imin >= width - 1) in_range[3] = in_range[4] = in_range[5] = 0; if (jmin <= -15 || jmin == jlow) in_range[1] = in_range[2] = in_range[3] = 0; else if (jmin >= height - 1) in_range[5] = in_range[6] = in_range[7] = 0; } else { if (imin <= 0 || imin == ilow) in_range[1] = in_range[7] = in_range[8] = 0; else if (imin >= width - 16) in_range[3] = in_range[4] = in_range[5] = 0; if (jmin <= 0 || jmin == jlow) in_range[1] = in_range[2] = in_range[3] = 0; else if (jmin >= height - 16) in_range[5] = in_range[6] = in_range[7] = 0; } xhmin[0] = 0; yhmin[0] = 0; dmin = mot[0].sad; xh = 0; yh = -1; ncand -= lx; /* initial position */ for (k = 2; k <= 8; k += 2) { if (distance_tab[hp_guess][k] < HP_DISTANCE_TH) { if (in_range[k]) { d = (*(SAD_MB_HalfPel[((yh&1)<<1)+(xh&1)]))(ncand, cur, (dmin << 16) | lx, extra_info); if (d < dmin) { dmin = d; xhmin[0] = xh; yhmin[0] = yh; kmin = k; } else if (d == dmin && PV_ABS(mot[0].x + xh) + PV_ABS(mot[0].y + yh) < PV_ABS(mot[0].x + xhmin[0]) + PV_ABS(mot[0].y + yhmin[0])) { xhmin[0] = xh; yhmin[0] = yh; kmin = k; } } } xh += next_hp_pos[k][0]; yh += next_hp_pos[k][1]; ncand += next_ncand[k]; if (k == 8) { if (xhmin[0] != 0 || yhmin[0] != 0) { k = -1; hp_guess = kmin; } } } mot[0].sad = dmin; mot[0].x += xhmin[0]; mot[0].y += yhmin[0]; return ; } #ifndef NO_INTER4V /*===================================================================== Function: FindHalfPelBlk Date: 10/7/2000 Purpose: Find half pel resolution MV surrounding the full-pel MV And decide between 1MV or 4MV mode =====================================================================*/ ///// THIS FUNCTION IS NOT WORKING!!! NEED TO BE RIVISITED Int FindHalfPelBlk(VideoEncData *video, UChar *cur, MOT *mot, Int sad16, UChar *ncand8[], UChar *mode, Int xpos, Int ypos, Int *xhmin, Int *yhmin, UChar *hp_mem) { Int k, comp; Int xh, yh;//, xhmin, yhmin; Int imin, jmin, ilow, jlow; Int height; UChar *cand, *cur8; UChar *hmem;//[17*17]; /* half-pel memory */ Int d, dmin, sad8; Int lx = video->currVop->pitch; Int width = video->currVop->width; /* , padding */ Int(*SAD_Blk_HalfPel)(UChar*, UChar*, Int, Int, Int, Int, Int, void*) = video->functionPointer->SAD_Blk_HalfPel; void *extra_info = video->sad_extra_info; Int in_range[8]; /* 3/29/01 */ Int range = video->encParams->SearchRange; Int swidth; Int next_hp_pos[8][2] = {{1, 0}, {1, 0}, {0, 1}, {0, 1}, { -1, 0}, { -1, 0}, {0, -1}, {0, -1}}; height = video->vol[video->currLayer]->height; hmem = hp_mem; sad8 = 0; for (comp = 0; comp < 4; comp++) { #ifdef _SAD_STAT num_HP_Blk++; #endif /**************** check range ***************************/ /* 3/29/01 */ M4VENC_MEMSET(in_range, 1, sizeof(Int) << 3); imin = xpos + ((comp & 1) << 3) + (mot[comp+1].x >> 1); jmin = ypos + ((comp & 2) << 2) + (mot[comp+1].y >> 1); ilow = xpos + ((comp & 1) << 3) - range; jlow = ypos + ((comp & 2) << 2) - range; if (imin <= -15 || imin == ilow) in_range[0] = in_range[6] = in_range[7] = 0; else if (imin >= width - 1) in_range[2] = in_range[3] = in_range[4] = 0; if (jmin <= -15 || jmin == jlow) in_range[0] = in_range[1] = in_range[2] = 0; else if (jmin >= height - 1) in_range[4] = in_range[5] = in_range[6] = 0; /**************** half-pel search ***********************/ cur8 = cur + ((comp & 1) << 3) + ((comp & 2) << 2) * width ; /* generate half-pel search region */ { cand = ncand8[comp+1]; swidth = lx; } xhmin[comp+1] = 0; yhmin[comp+1] = 0; dmin = mot[comp+1].sad; xh = -1; yh = -1; for (k = 0; k < 8; k++) { if (in_range[k]) { d = (*SAD_Blk_HalfPel)(cand, cur8, dmin, lx, swidth, xh, yh, extra_info); if (d < dmin) { dmin = d; xhmin[comp+1] = xh; yhmin[comp+1] = yh; } } xh += next_hp_pos[k][0]; yh += next_hp_pos[k][1]; } /********************************************/ mot[comp+1].x += xhmin[comp+1]; mot[comp+1].y += yhmin[comp+1]; mot[comp+1].sad = dmin; sad8 += dmin; if (sad8 >= sad16 - PREF_16_VEC) { *mode = MODE_INTER; for (k = 1; k <= 4; k++) { mot[k].sad = (mot[0].sad + 2) >> 2; mot[k].x = mot[0].x; mot[k].y = mot[0].y; } return sad8; } hmem += (10 * 10); } *mode = MODE_INTER4V; return sad8; } #endif /* NO_INTER4V */ ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/m4venc_oscl.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /*********************************************************************************/ /* Description: Created for abstracting out OSCL such that the code can be used */ /* by both V3 and V4 OSCL library. This file is for V4. */ /*********************************************************************************/ #ifndef _M4VENC_OSCL_H_ #define _M4VENC_OSCL_H_ #define OSCL_DISABLE_WARNING_CONV_POSSIBLE_LOSS_OF_DATA #include "osclconfig_compiler_warnings.h" #include "oscl_mem.h" #define M4VENC_MALLOC(size) oscl_malloc(size) #define M4VENC_FREE(ptr) oscl_free(ptr) #define M4VENC_MEMSET(ptr,val,size) oscl_memset(ptr,val,size) #define M4VENC_MEMCPY(dst,src,size) oscl_memcpy(dst,src,size) #include "oscl_math.h" #define M4VENC_LOG(x) oscl_log(x) #define M4VENC_SQRT(x) oscl_sqrt(x) #define M4VENC_POW(x,y) oscl_pow(x,y) #define M4VENC_HAS_SYMBIAN_SUPPORT OSCL_HAS_SYMBIAN_SUPPORT #endif //_M4VENC_OSCL_H_ ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/me_utils.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "mp4def.h" #include "mp4enc_lib.h" #include "mp4lib_int.h" #include "m4venc_oscl.h" #define VOP_OFFSET ((lx<<4)+16) /* for offset to image area */ #define CVOP_OFFSET ((lx<<2)+8) #define PREF_INTRA 512 /* bias for INTRA coding */ /*=============================================================== Function: ChooseMode Date: 09/21/2000 Purpose: Choosing between INTRA or INTER Input/Output: Pointer to the starting point of the macroblock. Note: ===============================================================*/ void ChooseMode_C(UChar *Mode, UChar *cur, Int lx, Int min_SAD) { Int i, j; Int MB_mean, A, tmp, Th; Int offset = (lx >> 2) - 4; UChar *p = cur; Int *pint = (Int *) cur, temp = 0; MB_mean = 0; A = 0; Th = (min_SAD - PREF_INTRA) >> 1; for (j = 0; j < 8; j++) { /* Odd Rows */ temp += (*pint++) & 0x00FF00FF; temp += (*pint++) & 0x00FF00FF; temp += (*pint++) & 0x00FF00FF; temp += (*pint++) & 0x00FF00FF; pint += offset; /* Even Rows */ temp += (*pint++ >> 8) & 0x00FF00FF; temp += (*pint++ >> 8) & 0x00FF00FF; temp += (*pint++ >> 8) & 0x00FF00FF; temp += (*pint++ >> 8) & 0x00FF00FF; pint += offset; } MB_mean = (((temp & 0x0000FFFF)) + ((temp & 0xFFFF0000) >> 16)) >> 7; p = cur; offset = lx - 16; for (j = 0; j < 16; j++) { temp = (j & 1); p += temp; i = 8; while (i--) { tmp = *p - MB_mean; p += 2; if (tmp > 0) A += tmp; else A -= tmp; } if (A >= Th) { *Mode = MODE_INTER; return ; } p += (offset - temp); } if (A < Th) *Mode = MODE_INTRA; else *Mode = MODE_INTER; return ; } /*=============================================================== Function: GetHalfPelMBRegion Date: 09/17/2000 Purpose: Interpolate the search region for half-pel search Input/Output: Center of the search, Half-pel memory, width Note: rounding type should be parameterized. Now fixed it to zero!!!!!! ===============================================================*/ void GetHalfPelMBRegion_C(UChar *cand, UChar *hmem, Int lx) { Int i, j; UChar *p1, *p2, *p3, *p4; UChar *hmem1 = hmem; UChar *hmem2 = hmem1 + 33; Int offset = lx - 17; p1 = cand - lx - 1; p2 = cand - lx; p3 = cand - 1; p4 = cand; for (j = 0; j < 16; j++) { for (i = 0; i < 16; i++) { *hmem1++ = ((*p1++) + *p2 + *p3 + *p4 + 2) >> 2; *hmem1++ = ((*p2++) + *p4 + 1) >> 1; *hmem2++ = ((*p3++) + *p4 + 1) >> 1; *hmem2++ = *p4++; } /* last pixel */ *hmem1++ = ((*p1++) + (*p2++) + *p3 + *p4 + 2) >> 2; *hmem2++ = ((*p3++) + (*p4++) + 1) >> 1; hmem1 += 33; hmem2 += 33; p1 += offset; p2 += offset; p3 += offset; p4 += offset; } /* last row */ for (i = 0; i < 16; i++) { *hmem1++ = ((*p1++) + *p2 + (*p3++) + *p4 + 2) >> 2; *hmem1++ = ((*p2++) + (*p4++) + 1) >> 1; } *hmem1 = (*p1 + *p2 + *p3 + *p4 + 2) >> 2; return ; } /*=============================================================== Function: GetHalfPelBlkRegion Date: 09/20/2000 Purpose: Interpolate the search region for half-pel search in 4MV mode. Input/Output: Center of the search, Half-pel memory, width Note: rounding type should be parameterized. Now fixed it to zero!!!!!! ===============================================================*/ void GetHalfPelBlkRegion(UChar *cand, UChar *hmem, Int lx) { Int i, j; UChar *p1, *p2, *p3, *p4; UChar *hmem1 = hmem; UChar *hmem2 = hmem1 + 17; Int offset = lx - 9; p1 = cand - lx - 1; p2 = cand - lx; p3 = cand - 1; p4 = cand; for (j = 0; j < 8; j++) { for (i = 0; i < 8; i++) { *hmem1++ = ((*p1++) + *p2 + *p3 + *p4 + 2) >> 2; *hmem1++ = ((*p2++) + *p4 + 1) >> 1; *hmem2++ = ((*p3++) + *p4 + 1) >> 1; *hmem2++ = *p4++; } /* last pixel */ *hmem1++ = ((*p1++) + (*p2++) + *p3 + *p4 + 2) >> 2; *hmem2++ = ((*p3++) + (*p4++) + 1) >> 1; hmem1 += 17; hmem2 += 17; p1 += offset; p2 += offset; p3 += offset; p4 += offset; } /* last row */ for (i = 0; i < 8; i++) { *hmem1++ = ((*p1++) + *p2 + (*p3++) + *p4 + 2) >> 2; *hmem1++ = ((*p2++) + (*p4++) + 1) >> 1; } *hmem1 = (*p1 + *p2 + *p3 + *p4 + 2) >> 2; return ; } /*===================================================================== Function: PaddingEdge Date: 09/16/2000 Purpose: Pad edge of a Vop Modification: 09/20/05. =====================================================================*/ void PaddingEdge(Vop *refVop) { UChar *src, *dst; Int i; Int pitch, width, height; ULong temp1, temp2; width = refVop->width; height = refVop->height; pitch = refVop->pitch; /* pad top */ src = refVop->yChan; temp1 = *src; /* top-left corner */ temp2 = src[width-1]; /* top-right corner */ temp1 |= (temp1 << 8); temp1 |= (temp1 << 16); temp2 |= (temp2 << 8); temp2 |= (temp2 << 16); dst = src - (pitch << 4); *((ULong*)(dst - 16)) = temp1; *((ULong*)(dst - 12)) = temp1; *((ULong*)(dst - 8)) = temp1; *((ULong*)(dst - 4)) = temp1; M4VENC_MEMCPY(dst, src, width); *((ULong*)(dst += width)) = temp2; *((ULong*)(dst + 4)) = temp2; *((ULong*)(dst + 8)) = temp2; *((ULong*)(dst + 12)) = temp2; dst = dst - width - 16; i = 15; while (i--) { M4VENC_MEMCPY(dst + pitch, dst, pitch); dst += pitch; } /* pad sides */ dst += (pitch + 16); src = dst; i = height; while (i--) { temp1 = *src; temp2 = src[width-1]; temp1 |= (temp1 << 8); temp1 |= (temp1 << 16); temp2 |= (temp2 << 8); temp2 |= (temp2 << 16); *((ULong*)(dst - 16)) = temp1; *((ULong*)(dst - 12)) = temp1; *((ULong*)(dst - 8)) = temp1; *((ULong*)(dst - 4)) = temp1; *((ULong*)(dst += width)) = temp2; *((ULong*)(dst + 4)) = temp2; *((ULong*)(dst + 8)) = temp2; *((ULong*)(dst + 12)) = temp2; src += pitch; dst = src; } /* pad bottom */ dst -= 16; i = 16; while (i--) { M4VENC_MEMCPY(dst, dst - pitch, pitch); dst += pitch; } return ; } /*=================================================================== Function: ComputeMBSum Date: 10/28/2000 Purpose: Compute sum of absolute value (SAV) of blocks in a macroblock in INTRA mode needed for rate control. Thus, instead of computing the SAV, we can compute first order moment or variance . 11/28/00: add MMX 9/3/01: do parallel comp for C function. ===================================================================*/ void ComputeMBSum_C(UChar *cur, Int lx, MOT *mot_mb) { Int j; Int *cInt, *cInt2; Int sad1 = 0, sad2 = 0, sad3 = 0, sad4 = 0; Int tmp, tmp2, mask = 0x00FF00FF; cInt = (Int*)cur; /* make sure this is word-align */ cInt2 = (Int*)(cur + (lx << 3)); j = 8; while (j--) { tmp = cInt[3]; /* load 4 pixels at a time */ tmp2 = tmp & mask; tmp = (tmp >> 8) & mask; tmp += tmp2; sad2 += tmp; tmp = cInt[2]; tmp2 = tmp & mask; tmp = (tmp >> 8) & mask; tmp += tmp2; sad2 += tmp; tmp = cInt[1]; tmp2 = tmp & mask; tmp = (tmp >> 8) & mask; tmp += tmp2; sad1 += tmp; tmp = *cInt; cInt += (lx >> 2); tmp2 = tmp & mask; tmp = (tmp >> 8) & mask; tmp += tmp2; sad1 += tmp; tmp = cInt2[3]; tmp2 = tmp & mask; tmp = (tmp >> 8) & mask; tmp += tmp2; sad4 += tmp; tmp = cInt2[2]; tmp2 = tmp & mask; tmp = (tmp >> 8) & mask; tmp += tmp2; sad4 += tmp; tmp = cInt2[1]; tmp2 = tmp & mask; tmp = (tmp >> 8) & mask; tmp += tmp2; sad3 += tmp; tmp = *cInt2; cInt2 += (lx >> 2); tmp2 = tmp & mask; tmp = (tmp >> 8) & mask; tmp += tmp2; sad3 += tmp; } sad1 += (sad1 << 16); sad2 += (sad2 << 16); sad3 += (sad3 << 16); sad4 += (sad4 << 16); sad1 >>= 16; sad2 >>= 16; sad3 >>= 16; sad4 >>= 16; mot_mb[1].sad = sad1; mot_mb[2].sad = sad2; mot_mb[3].sad = sad3; mot_mb[4].sad = sad4; mot_mb[0].sad = sad1 + sad2 + sad3 + sad4; return ; } ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/motion_comp.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "oscl_base_macros.h" // for OSCL_UNUSED_ARG #include "mp4lib_int.h" #include "mp4enc_lib.h" //const static Int roundtab4[] = {0,1,1,1}; //const static Int roundtab8[] = {0,0,1,1,1,1,1,2}; //const static Int roundtab12[] = {0,0,0,1,1,1,1,1,1,1,2,2}; const static Int roundtab16[] = {0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2}; #define FORWARD_MODE 1 #define BACKWARD_MODE 2 #define BIDIRECTION_MODE 3 #define DIRECT_MODE 4 #ifdef __cplusplus extern "C" { #endif /*Function Prototype */ /* no-edge padding */ Int EncGetPredOutside(Int xpos, Int ypos, UChar *c_prev, UChar *rec, Int width, Int height, Int rnd1); void Copy_MB_from_Vop(UChar *comp, Int yChan[][NCOEFF_BLOCK], Int width); void Copy_B_from_Vop(UChar *comp, Int cChan[], Int width); void Copy_MB_into_Vop(UChar *comp, Int yChan[][NCOEFF_BLOCK], Int width); void Copy_B_into_Vop(UChar *comp, Int cChan[], Int width); void get_MB(UChar *c_prev, UChar *c_prev_u , UChar *c_prev_v, Short mb[6][64], Int lx, Int lx_uv); Int GetPredAdvBy0x0( UChar *c_prev, /* i */ UChar *pred_block, /* i */ Int lx, /* i */ Int rnd1 /* i */ ); Int GetPredAdvBy0x1( UChar *c_prev, /* i */ UChar *pred_block, /* i */ Int lx, /* i */ Int rnd1 /* i */ ); Int GetPredAdvBy1x0( UChar *c_prev, /* i */ UChar *pred_block, /* i */ Int lx, /* i */ Int rnd1 /* i */ ); Int GetPredAdvBy1x1( UChar *c_prev, /* i */ UChar *pred_block, /* i */ Int lx, /* i */ Int rnd1 /* i */ ); static Int(*const GetPredAdvBTable[2][2])(UChar*, UChar*, Int, Int) = { {&GetPredAdvBy0x0, &GetPredAdvBy0x1}, {&GetPredAdvBy1x0, &GetPredAdvBy1x1} }; #ifdef __cplusplus } #endif /* ======================================================================== */ /* Function : getMotionCompensatedMB( ) */ /* Date : 4/17/2001 */ /* Purpose : Get the motion compensate block into video->predictionMB */ /* and generate video->predictionErrorMB */ /* modified from MBMotionComp() function in the decoder */ /* In/out : */ /* Return : */ /* Modified : */ /* ======================================================================== */ void getMotionCompensatedMB(VideoEncData *video, Int ind_x, Int ind_y, Int offset) { Vop *prevVop = video->forwardRefVop; //reference frame Vop *currVop = video->currVop; Int mbnum = video->mbnum; //mb index MOT *mot = video->mot[mbnum]; Int ypos, xpos; UChar *c_prev, *cu_prev, *cv_prev; UChar *c_rec, *cu_rec, *cv_rec; Int height, pitch, pitch_uv, height_uv; Int mode = video->headerInfo.Mode[mbnum]; /* get mode */ Int dx, dy; Int xpred, ypred; Int xsum, ysum; Int round1; OSCL_UNUSED_ARG(offset); round1 = (Int)(1 - video->currVop->roundingType); pitch = currVop->pitch; height = currVop->height; pitch_uv = pitch >> 1; height_uv = height >> 1; ypos = ind_y << 4 ; xpos = ind_x << 4 ; c_rec = video->predictedMB; cu_rec = video->predictedMB + 256; cv_rec = video->predictedMB + 264; if (mode == MODE_INTER || mode == MODE_INTER_Q) { /* Motion vector in x direction */ dx = mot[0].x; dy = mot[0].y; c_prev = prevVop->yChan; xpred = (xpos << 1) + dx ; ypred = (ypos << 1) + dy ; /* Call function that performs luminance prediction */ EncPrediction_INTER(xpred, ypred, c_prev, c_rec, pitch, round1); if ((dx & 3) == 0) dx = dx >> 1; else dx = (dx >> 1) | 1; if ((dy & 3) == 0) dy = dy >> 1; else dy = (dy >> 1) | 1; xpred = xpos + dx; ypred = ypos + dy; cu_prev = prevVop->uChan; cv_prev = prevVop->vChan; EncPrediction_Chrom(xpred, ypred, cu_prev, cv_prev, cu_rec, cv_rec, pitch_uv, (currVop->width) >> 1, height_uv, round1); } #ifndef NO_INTER4V else if (mode == MODE_INTER4V) { c_prev = prevVop->yChan; cu_prev = prevVop->uChan; cv_prev = prevVop->vChan; EncPrediction_INTER4V(xpos, ypos, mot, c_prev, c_rec, pitch, round1); xsum = mot[1].x + mot[2].x + mot[3].x + mot[4].x; ysum = mot[1].y + mot[2].y + mot[3].y + mot[4].y; dx = PV_SIGN(xsum) * (roundtab16[(PV_ABS(xsum)) & 0xF] + (((PV_ABS(xsum)) >> 4) << 1)); dy = PV_SIGN(ysum) * (roundtab16[(PV_ABS(ysum)) & 0xF] + (((PV_ABS(ysum)) >> 4) << 1)); ypred = ypos + dy; xpred = xpos + dx; EncPrediction_Chrom(xpred, ypred, cu_prev, cv_prev, cu_rec, cv_rec, pitch_uv, (currVop->width) >> 1, height_uv, round1); } #endif else { ;//printf("Error, MODE_SKIPPED is not decided yet!\n"); } return ; } /*************************************************************************** Function: EncPrediction_INTER Date: 04/17/2001 Purpose: Get predicted area for luminance and compensate with the residue. Modified from luminance_pred_mode_inter() in decoder. ***************************************************************************/ void EncPrediction_INTER( Int xpred, /* i */ Int ypred, /* i */ UChar *c_prev, /* i */ UChar *c_rec, /* i */ Int lx, /* i */ Int round1 /* i */ ) { c_prev += (xpred >> 1) + ((ypred >> 1) * lx); GetPredAdvBTable[ypred&1][xpred&1](c_prev, c_rec, lx, round1); c_prev += B_SIZE; c_rec += B_SIZE; GetPredAdvBTable[ypred&1][xpred&1](c_prev, c_rec, lx, round1); c_prev += (lx << 3) - B_SIZE; c_rec += (16 << 3) - B_SIZE; /* padding */ GetPredAdvBTable[ypred&1][xpred&1](c_prev, c_rec, lx, round1); c_prev += B_SIZE; c_rec += B_SIZE; GetPredAdvBTable[ypred&1][xpred&1](c_prev, c_rec, lx, round1); return; } #ifndef NO_INTER4V /*************************************************************************** Function: EncPrediction_INTER4V Date: 04/17/2001 Purpose: Get predicted area for luminance and compensate with the residue. Modified from luminance_pred_mode_inter4v() in decoder. ***************************************************************************/ void EncPrediction_INTER4V( Int xpos, /* i */ Int ypos, /* i */ MOT *mot, /* i */ UChar *c_prev, /* i */ UChar *c_rec, /* i */ Int lx, /* i */ Int round1 /* i */ ) { Int ypred, xpred; xpred = (Int)((xpos << 1) + mot[1].x); ypred = (Int)((ypos << 1) + mot[1].y); GetPredAdvBTable[ypred&1][xpred&1](c_prev + (xpred >> 1) + ((ypred >> 1)*lx), c_rec, lx, round1); c_rec += B_SIZE; xpred = (Int)(((xpos + B_SIZE) << 1) + mot[2].x); ypred = (Int)((ypos << 1) + mot[2].y); GetPredAdvBTable[ypred&1][xpred&1](c_prev + (xpred >> 1) + ((ypred >> 1)*lx), c_rec, lx, round1); c_rec += (16 << 3) - B_SIZE; /* padding */ xpred = (Int)((xpos << 1) + mot[3].x); ypred = (Int)(((ypos + B_SIZE) << 1) + mot[3].y); GetPredAdvBTable[ypred&1][xpred&1](c_prev + (xpred >> 1) + ((ypred >> 1)*lx), c_rec, lx, round1); c_rec += B_SIZE; xpred = (Int)(((xpos + B_SIZE) << 1) + mot[4].x); ypred = (Int)(((ypos + B_SIZE) << 1) + mot[4].y); GetPredAdvBTable[ypred&1][xpred&1](c_prev + (xpred >> 1) + ((ypred >> 1)*lx), c_rec, lx, round1); return; } #endif /* NO_INTER4V */ /*************************************************************************** Function: EncPrediction_Chrom Date: 04/17/2001 Purpose: Get predicted area for chrominance and compensate with the residue. Modified from chrominance_pred() in decoder. ***************************************************************************/ void EncPrediction_Chrom( Int xpred, /* i */ Int ypred, /* i */ UChar *cu_prev, /* i */ UChar *cv_prev, /* i */ UChar *cu_rec, UChar *cv_rec, Int lx, Int width_uv, /* i */ Int height_uv, /* i */ Int round1 /* i */ ) { /* check whether the MV points outside the frame */ /* Compute prediction for Chrominance b block (block[4]) */ if (xpred >= 0 && xpred <= ((width_uv << 1) - (2*B_SIZE)) && ypred >= 0 && ypred <= ((height_uv << 1) - (2*B_SIZE))) { /*****************************/ /* (x,y) is inside the frame */ /*****************************/ /* Compute prediction for Chrominance b (block[4]) */ GetPredAdvBTable[ypred&1][xpred&1](cu_prev + (xpred >> 1) + ((ypred >> 1)*lx), cu_rec, lx, round1); /* Compute prediction for Chrominance r (block[5]) */ GetPredAdvBTable[ypred&1][xpred&1](cv_prev + (xpred >> 1) + ((ypred >> 1)*lx), cv_rec, lx, round1); } else { /******************************/ /* (x,y) is outside the frame */ /******************************/ /* Compute prediction for Chrominance b (block[4]) */ EncGetPredOutside(xpred, ypred, cu_prev, cu_rec, width_uv, height_uv, round1); /* Compute prediction for Chrominance r (block[5]) */ EncGetPredOutside(xpred, ypred, cv_prev, cv_rec, width_uv, height_uv, round1); } return; } /*************************************************************************** Function: GetPredAdvancedB Date: 04/17/2001 Purpose: Get predicted area (block) and compensate with the residue. - modified from GetPredAdvancedBAdd in decoder. Intput/Output: Modified: ***************************************************************************/ Int GetPredAdvBy0x0( UChar *prev, /* i */ UChar *rec, /* i */ Int lx, /* i */ Int rnd /* i */ ) { Int i; /* loop variable */ ULong pred_word, word1, word2; Int tmp; OSCL_UNUSED_ARG(rnd); /* initialize offset to adjust pixel counter */ /* the next row; full-pel resolution */ tmp = (ULong)prev & 0x3; if (tmp == 0) /* word-aligned */ { rec -= 16; /* preset */ prev -= lx; for (i = 8; i > 0; i--) { *((ULong*)(rec += 16)) = *((ULong*)(prev += lx)); *((ULong*)(rec + 4)) = *((ULong*)(prev + 4)); } return 1; } else if (tmp == 1) /* first position */ { prev--; /* word-aligned */ rec -= 16; /* preset */ prev -= lx; for (i = 8; i > 0; i--) { word1 = *((ULong*)(prev += lx)); /* read 4 bytes, b4 b3 b2 b1 */ word2 = *((ULong*)(prev + 4)); /* read 4 bytes, b8 b7 b6 b5 */ word1 >>= 8; /* 0 b4 b3 b2 */ pred_word = word1 | (word2 << 24); /* b5 b4 b3 b2 */ *((ULong*)(rec += 16)) = pred_word; word1 = *((ULong*)(prev + 8)); /* b12 b11 b10 b9 */ word2 >>= 8; /* 0 b8 b7 b6 */ pred_word = word2 | (word1 << 24); /* b9 b8 b7 b6 */ *((ULong*)(rec + 4)) = pred_word; } return 1; } else if (tmp == 2) /* second position */ { prev -= 2; /* word1-aligned */ rec -= 16; /* preset */ prev -= lx; for (i = 8; i > 0; i--) { word1 = *((ULong*)(prev += lx)); /* read 4 bytes, b4 b3 b2 b1 */ word2 = *((ULong*)(prev + 4)); /* read 4 bytes, b8 b7 b6 b5 */ word1 >>= 16; /* 0 0 b4 b3 */ pred_word = word1 | (word2 << 16); /* b6 b5 b4 b3 */ *((ULong*)(rec += 16)) = pred_word; word1 = *((ULong*)(prev + 8)); /* b12 b11 b10 b9 */ word2 >>= 16; /* 0 0 b8 b7 */ pred_word = word2 | (word1 << 16); /* b10 b9 b8 b7 */ *((ULong*)(rec + 4)) = pred_word; } return 1; } else /* third position */ { prev -= 3; /* word1-aligned */ rec -= 16; /* preset */ prev -= lx; for (i = 8; i > 0; i--) { word1 = *((ULong*)(prev += lx)); /* read 4 bytes, b4 b3 b2 b1 */ word2 = *((ULong*)(prev + 4)); /* read 4 bytes, b8 b7 b6 b5 */ word1 >>= 24; /* 0 0 0 b4 */ pred_word = word1 | (word2 << 8); /* b7 b6 b5 b4 */ *((ULong*)(rec += 16)) = pred_word; word1 = *((ULong*)(prev + 8)); /* b12 b11 b10 b9 */ word2 >>= 24; /* 0 0 0 b8 */ pred_word = word2 | (word1 << 8); /* b11 b10 b9 b8 */ *((ULong*)(rec + 4)) = pred_word; } return 1; } } /**************************************************************************/ Int GetPredAdvBy0x1( UChar *prev, /* i */ UChar *rec, /* i */ Int lx, /* i */ Int rnd1 /* i */ ) { Int i; /* loop variable */ Int offset; ULong word1, word2, word3, word12; Int tmp; ULong mask; /* initialize offset to adjust pixel counter */ /* the next row; full-pel resolution */ offset = lx - B_SIZE; /* offset for prev */ /* Branch based on pixel location (half-pel or full-pel) for x and y */ rec -= 12; /* preset */ tmp = (ULong)prev & 3; mask = 254; mask |= (mask << 8); mask |= (mask << 16); /* 0xFEFEFEFE */ if (tmp == 0) /* word-aligned */ { if (rnd1 == 1) { for (i = B_SIZE; i > 0; i--) { word1 = *((ULong*)prev); /* b4 b3 b2 b1 */ word2 = *((ULong*)(prev += 4)); /* b8 b7 b6 b5 */ word12 = (word1 >> 8); /* 0 b4 b3 b2 */ word12 |= (word2 << 24); /* b5 b4 b3 b2 */ word3 = word1 | word12; // rnd1 = 1; otherwise word3 = word1&word12 word1 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word1 >>= 1; word1 = word1 + (word12 >> 1); word1 += word3; *((ULong*)(rec += 12)) = word1; /* write 4 pixels */ word1 = *((ULong*)(prev += 4)); /* b12 b11 b10 b9 */ word12 = (word2 >> 8); /* 0 b8 b7 b6 */ word12 |= (word1 << 24); /* b9 b8 b7 b6 */ word3 = word2 | word12; word2 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word2 >>= 1; word2 = word2 + (word12 >> 1); word2 += word3; *((ULong*)(rec += 4)) = word2; /* write 4 pixels */ prev += offset; } return 1; } else /* rnd1 == 0 */ { for (i = B_SIZE; i > 0; i--) { word1 = *((ULong*)prev); /* b4 b3 b2 b1 */ word2 = *((ULong*)(prev += 4)); /* b8 b7 b6 b5 */ word12 = (word1 >> 8); /* 0 b4 b3 b2 */ word12 |= (word2 << 24); /* b5 b4 b3 b2 */ word3 = word1 & word12; // rnd1 = 1; otherwise word3 = word1&word12 word1 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word1 >>= 1; word1 = word1 + (word12 >> 1); word1 += word3; *((ULong*)(rec += 12)) = word1; /* write 4 pixels */ word1 = *((ULong*)(prev += 4)); /* b12 b11 b10 b9 */ word12 = (word2 >> 8); /* 0 b8 b7 b6 */ word12 |= (word1 << 24); /* b9 b8 b7 b6 */ word3 = word2 & word12; word2 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word2 >>= 1; word2 = word2 + (word12 >> 1); word2 += word3; *((ULong*)(rec += 4)) = word2; /* write 4 pixels */ prev += offset; } return 1; } /* rnd1 */ } else if (tmp == 1) { prev--; /* word-aligned */ if (rnd1 == 1) { for (i = B_SIZE; i > 0; i--) { word1 = *((ULong*)prev); /* b3 b2 b1 b0 */ word2 = *((ULong*)(prev += 4)); /* b7 b6 b5 b4 */ word12 = (word1 >> 8); /* 0 b3 b2 b1 */ word1 >>= 16; /* 0 0 b3 b2 */ word12 |= (word2 << 24); /* b4 b3 b2 b1 */ word1 |= (word2 << 16); /* b5 b4 b3 b2 */ word3 = word1 | word12; // rnd1 = 1; otherwise word3 = word1&word12 word1 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word1 >>= 1; word1 = word1 + (word12 >> 1); word1 += word3; *((ULong*)(rec += 12)) = word1; /* write 4 pixels */ word1 = *((ULong*)(prev += 4)); /* b11 b10 b9 b8 */ word12 = (word2 >> 8); /* 0 b7 b6 b5 */ word2 >>= 16; /* 0 0 b7 b6 */ word12 |= (word1 << 24); /* b8 b7 b6 b5 */ word2 |= (word1 << 16); /* b9 b8 b7 b6 */ word3 = word2 | word12; // rnd1 = 1; otherwise word3 = word2&word12 word2 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word2 >>= 1; word2 = word2 + (word12 >> 1); word2 += word3; *((ULong*)(rec += 4)) = word2; /* write 4 pixels */ prev += offset; } return 1; } else /* rnd1 = 0 */ { for (i = B_SIZE; i > 0; i--) { word1 = *((ULong*)prev); /* b3 b2 b1 b0 */ word2 = *((ULong*)(prev += 4)); /* b7 b6 b5 b4 */ word12 = (word1 >> 8); /* 0 b3 b2 b1 */ word1 >>= 16; /* 0 0 b3 b2 */ word12 |= (word2 << 24); /* b4 b3 b2 b1 */ word1 |= (word2 << 16); /* b5 b4 b3 b2 */ word3 = word1 & word12; word1 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word1 >>= 1; word1 = word1 + (word12 >> 1); word1 += word3; *((ULong*)(rec += 12)) = word1; /* write 4 pixels */ word1 = *((ULong*)(prev += 4)); /* b11 b10 b9 b8 */ word12 = (word2 >> 8); /* 0 b7 b6 b5 */ word2 >>= 16; /* 0 0 b7 b6 */ word12 |= (word1 << 24); /* b8 b7 b6 b5 */ word2 |= (word1 << 16); /* b9 b8 b7 b6 */ word3 = word2 & word12; word2 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word2 >>= 1; word2 = word2 + (word12 >> 1); word2 += word3; *((ULong*)(rec += 4)) = word2; /* write 4 pixels */ prev += offset; } return 1; } /* rnd1 */ } else if (tmp == 2) { prev -= 2; /* word-aligned */ if (rnd1 == 1) { for (i = B_SIZE; i > 0; i--) { word1 = *((ULong*)prev); /* b2 b1 b0 bN1 */ word2 = *((ULong*)(prev += 4)); /* b6 b5 b4 b3 */ word12 = (word1 >> 16); /* 0 0 b2 b1 */ word1 >>= 24; /* 0 0 0 b2 */ word12 |= (word2 << 16); /* b4 b3 b2 b1 */ word1 |= (word2 << 8); /* b5 b4 b3 b2 */ word3 = word1 | word12; // rnd1 = 1; otherwise word3 = word1&word12 word1 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word1 >>= 1; word1 = word1 + (word12 >> 1); word1 += word3; *((ULong*)(rec += 12)) = word1; /* write 4 pixels */ word1 = *((ULong*)(prev += 4)); /* b10 b9 b8 b7 */ word12 = (word2 >> 16); /* 0 0 b6 b5 */ word2 >>= 24; /* 0 0 0 b6 */ word12 |= (word1 << 16); /* b8 b7 b6 b5 */ word2 |= (word1 << 8); /* b9 b8 b7 b6 */ word3 = word2 | word12; // rnd1 = 1; otherwise word3 = word1&word12 word2 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word2 >>= 1; word2 = word2 + (word12 >> 1); word2 += word3; *((ULong*)(rec += 4)) = word2; /* write 4 pixels */ prev += offset; } return 1; } else /* rnd1 == 0 */ { for (i = B_SIZE; i > 0; i--) { word1 = *((ULong*)prev); /* b2 b1 b0 bN1 */ word2 = *((ULong*)(prev += 4)); /* b6 b5 b4 b3 */ word12 = (word1 >> 16); /* 0 0 b2 b1 */ word1 >>= 24; /* 0 0 0 b2 */ word12 |= (word2 << 16); /* b4 b3 b2 b1 */ word1 |= (word2 << 8); /* b5 b4 b3 b2 */ word3 = word1 & word12; // rnd1 = 1; otherwise word3 = word1&word12 word1 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word1 >>= 1; word1 = word1 + (word12 >> 1); word1 += word3; *((ULong*)(rec += 12)) = word1; /* write 4 pixels */ word1 = *((ULong*)(prev += 4)); /* b10 b9 b8 b7 */ word12 = (word2 >> 16); /* 0 0 b6 b5 */ word2 >>= 24; /* 0 0 0 b6 */ word12 |= (word1 << 16); /* b8 b7 b6 b5 */ word2 |= (word1 << 8); /* b9 b8 b7 b6 */ word3 = word2 & word12; // rnd1 = 1; otherwise word3 = word1&word12 word2 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word2 >>= 1; word2 = word2 + (word12 >> 1); word2 += word3; *((ULong*)(rec += 4)) = word2; /* write 4 pixels */ prev += offset; } return 1; } } else /* tmp = 3 */ { prev -= 3; /* word-aligned */ if (rnd1 == 1) { for (i = B_SIZE; i > 0; i--) { word1 = *((ULong*)prev); /* b1 b0 bN1 bN2 */ word2 = *((ULong*)(prev += 4)); /* b5 b4 b3 b2 */ word12 = (word1 >> 24); /* 0 0 0 b1 */ word12 |= (word2 << 8); /* b4 b3 b2 b1 */ word1 = word2; word3 = word1 | word12; // rnd1 = 1; otherwise word3 = word1&word12 word1 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word1 >>= 1; word1 = word1 + (word12 >> 1); word1 += word3; *((ULong*)(rec += 12)) = word1; /* write 4 pixels */ word1 = *((ULong*)(prev += 4)); /* b9 b8 b7 b6 */ word12 = (word2 >> 24); /* 0 0 0 b5 */ word12 |= (word1 << 8); /* b8 b7 b6 b5 */ word2 = word1; /* b9 b8 b7 b6 */ word3 = word2 | word12; // rnd1 = 1; otherwise word3 = word1&word12 word2 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word2 >>= 1; word2 = word2 + (word12 >> 1); word2 += word3; *((ULong*)(rec += 4)) = word2; /* write 4 pixels */ prev += offset; } return 1; } else { for (i = B_SIZE; i > 0; i--) { word1 = *((ULong*)prev); /* b1 b0 bN1 bN2 */ word2 = *((ULong*)(prev += 4)); /* b5 b4 b3 b2 */ word12 = (word1 >> 24); /* 0 0 0 b1 */ word12 |= (word2 << 8); /* b4 b3 b2 b1 */ word1 = word2; word3 = word1 & word12; // rnd1 = 1; otherwise word3 = word1&word12 word1 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word1 >>= 1; word1 = word1 + (word12 >> 1); word1 += word3; *((ULong*)(rec += 12)) = word1; /* write 4 pixels */ word1 = *((ULong*)(prev += 4)); /* b9 b8 b7 b6 */ word12 = (word2 >> 24); /* 0 0 0 b5 */ word12 |= (word1 << 8); /* b8 b7 b6 b5 */ word2 = word1; /* b9 b8 b7 b6 */ word3 = word2 & word12; // rnd1 = 1; otherwise word3 = word1&word12 word2 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 &= mask; word2 >>= 1; word2 = word2 + (word12 >> 1); word2 += word3; *((ULong*)(rec += 4)) = word2; /* write 4 pixels */ prev += offset; } return 1; } } } /**************************************************************************/ Int GetPredAdvBy1x0( UChar *prev, /* i */ UChar *rec, /* i */ Int lx, /* i */ Int rnd1 /* i */ ) { Int i; /* loop variable */ Int offset; ULong word1, word2, word3, word12, word22; Int tmp; ULong mask; /* initialize offset to adjust pixel counter */ /* the next row; full-pel resolution */ offset = lx - B_SIZE; /* offset for prev */ /* Branch based on pixel location (half-pel or full-pel) for x and y */ rec -= 12; /* preset */ tmp = (ULong)prev & 3; mask = 254; mask |= (mask << 8); mask |= (mask << 16); /* 0xFEFEFEFE */ if (tmp == 0) /* word-aligned */ { prev -= 4; if (rnd1 == 1) { for (i = B_SIZE; i > 0; i--) { word1 = *((ULong*)(prev += 4)); word2 = *((ULong*)(prev + lx)); word3 = word1 | word2; // rnd1 = 1; otherwise word3 = word1&word2 word1 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word2 &= mask; word1 >>= 1; word1 = word1 + (word2 >> 1); word1 += word3; *((ULong*)(rec += 12)) = word1; word1 = *((ULong*)(prev += 4)); word2 = *((ULong*)(prev + lx)); word3 = word1 | word2; // rnd1 = 1; otherwise word3 = word1&word2 word1 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word2 &= mask; word1 >>= 1; word1 = word1 + (word2 >> 1); word1 += word3; *((ULong*)(rec += 4)) = word1; prev += offset; } return 1; } else /* rnd1 = 0 */ { for (i = B_SIZE; i > 0; i--) { word1 = *((ULong*)(prev += 4)); word2 = *((ULong*)(prev + lx)); word3 = word1 & word2; /* rnd1 = 0; */ word1 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word2 &= mask; word1 >>= 1; word1 = word1 + (word2 >> 1); word1 += word3; *((ULong*)(rec += 12)) = word1; word1 = *((ULong*)(prev += 4)); word2 = *((ULong*)(prev + lx)); word3 = word1 & word2; /* rnd1 = 0; */ word1 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word2 &= mask; word1 >>= 1; word1 = word1 + (word2 >> 1); word1 += word3; *((ULong*)(rec += 4)) = word1; prev += offset; } return 1; } } else if (tmp == 1) { prev--; /* word-aligned */ if (rnd1 == 1) { for (i = B_SIZE; i > 0; i--) { word12 = *((ULong*)prev); /* read b4 b3 b2 b1 */ word22 = *((ULong*)(prev + lx)); word1 = *((ULong*)(prev += 4)); /* read b8 b7 b6 b5 */ word2 = *((ULong*)(prev + lx)); word12 >>= 8; /* 0 b4 b3 b2 */ word22 >>= 8; word12 = word12 | (word1 << 24); /* b5 b4 b3 b2 */ word22 = word22 | (word2 << 24); word3 = word12 | word22; word12 &= mask; word22 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 >>= 1; word12 = word12 + (word22 >> 1); word12 += word3; *((ULong*)(rec += 12)) = word12; word12 = *((ULong*)(prev += 4)); /* read b12 b11 b10 b9 */ word22 = *((ULong*)(prev + lx)); word1 >>= 8; /* 0 b8 b7 b6 */ word2 >>= 8; word1 = word1 | (word12 << 24); /* b9 b8 b7 b6 */ word2 = word2 | (word22 << 24); word3 = word1 | word2; word1 &= mask; word2 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word1 >>= 1; word1 = word1 + (word2 >> 1); word1 += word3; *((ULong*)(rec += 4)) = word1; prev += offset; } return 1; } else /* rnd1 = 0 */ { for (i = B_SIZE; i > 0; i--) { word12 = *((ULong*)prev); /* read b4 b3 b2 b1 */ word22 = *((ULong*)(prev + lx)); word1 = *((ULong*)(prev += 4)); /* read b8 b7 b6 b5 */ word2 = *((ULong*)(prev + lx)); word12 >>= 8; /* 0 b4 b3 b2 */ word22 >>= 8; word12 = word12 | (word1 << 24); /* b5 b4 b3 b2 */ word22 = word22 | (word2 << 24); word3 = word12 & word22; word12 &= mask; word22 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 >>= 1; word12 = word12 + (word22 >> 1); word12 += word3; *((ULong*)(rec += 12)) = word12; word12 = *((ULong*)(prev += 4)); /* read b12 b11 b10 b9 */ word22 = *((ULong*)(prev + lx)); word1 >>= 8; /* 0 b8 b7 b6 */ word2 >>= 8; word1 = word1 | (word12 << 24); /* b9 b8 b7 b6 */ word2 = word2 | (word22 << 24); word3 = word1 & word2; word1 &= mask; word2 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word1 >>= 1; word1 = word1 + (word2 >> 1); word1 += word3; *((ULong*)(rec += 4)) = word1; prev += offset; } return 1; } } else if (tmp == 2) { prev -= 2; /* word-aligned */ if (rnd1 == 1) { for (i = B_SIZE; i > 0; i--) { word12 = *((ULong*)prev); /* read b4 b3 b2 b1 */ word22 = *((ULong*)(prev + lx)); word1 = *((ULong*)(prev += 4)); /* read b8 b7 b6 b5 */ word2 = *((ULong*)(prev + lx)); word12 >>= 16; /* 0 0 b4 b3 */ word22 >>= 16; word12 = word12 | (word1 << 16); /* b6 b5 b4 b3 */ word22 = word22 | (word2 << 16); word3 = word12 | word22; word12 &= mask; word22 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 >>= 1; word12 = word12 + (word22 >> 1); word12 += word3; *((ULong*)(rec += 12)) = word12; word12 = *((ULong*)(prev += 4)); /* read b12 b11 b10 b9 */ word22 = *((ULong*)(prev + lx)); word1 >>= 16; /* 0 0 b8 b7 */ word2 >>= 16; word1 = word1 | (word12 << 16); /* b10 b9 b8 b7 */ word2 = word2 | (word22 << 16); word3 = word1 | word2; word1 &= mask; word2 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word1 >>= 1; word1 = word1 + (word2 >> 1); word1 += word3; *((ULong*)(rec += 4)) = word1; prev += offset; } return 1; } else /* rnd1 = 0 */ { for (i = B_SIZE; i > 0; i--) { word12 = *((ULong*)prev); /* read b4 b3 b2 b1 */ word22 = *((ULong*)(prev + lx)); word1 = *((ULong*)(prev += 4)); /* read b8 b7 b6 b5 */ word2 = *((ULong*)(prev + lx)); word12 >>= 16; /* 0 0 b4 b3 */ word22 >>= 16; word12 = word12 | (word1 << 16); /* b6 b5 b4 b3 */ word22 = word22 | (word2 << 16); word3 = word12 & word22; word12 &= mask; word22 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 >>= 1; word12 = word12 + (word22 >> 1); word12 += word3; *((ULong*)(rec += 12)) = word12; word12 = *((ULong*)(prev += 4)); /* read b12 b11 b10 b9 */ word22 = *((ULong*)(prev + lx)); word1 >>= 16; /* 0 0 b8 b7 */ word2 >>= 16; word1 = word1 | (word12 << 16); /* b10 b9 b8 b7 */ word2 = word2 | (word22 << 16); word3 = word1 & word2; word1 &= mask; word2 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word1 >>= 1; word1 = word1 + (word2 >> 1); word1 += word3; *((ULong*)(rec += 4)) = word1; prev += offset; } return 1; } } else /* tmp == 3 */ { prev -= 3; /* word-aligned */ if (rnd1 == 1) { for (i = B_SIZE; i > 0; i--) { word12 = *((ULong*)prev); /* read b4 b3 b2 b1 */ word22 = *((ULong*)(prev + lx)); word1 = *((ULong*)(prev += 4)); /* read b8 b7 b6 b5 */ word2 = *((ULong*)(prev + lx)); word12 >>= 24; /* 0 0 0 b4 */ word22 >>= 24; word12 = word12 | (word1 << 8); /* b7 b6 b5 b4 */ word22 = word22 | (word2 << 8); word3 = word12 | word22; word12 &= mask; word22 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 >>= 1; word12 = word12 + (word22 >> 1); word12 += word3; *((ULong*)(rec += 12)) = word12; word12 = *((ULong*)(prev += 4)); /* read b12 b11 b10 b9 */ word22 = *((ULong*)(prev + lx)); word1 >>= 24; /* 0 0 0 b8 */ word2 >>= 24; word1 = word1 | (word12 << 8); /* b11 b10 b9 b8 */ word2 = word2 | (word22 << 8); word3 = word1 | word2; word1 &= mask; word2 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word1 >>= 1; word1 = word1 + (word2 >> 1); word1 += word3; *((ULong*)(rec += 4)) = word1; prev += offset; } return 1; } else /* rnd1 = 0 */ { for (i = B_SIZE; i > 0; i--) { word12 = *((ULong*)prev); /* read b4 b3 b2 b1 */ word22 = *((ULong*)(prev + lx)); word1 = *((ULong*)(prev += 4)); /* read b8 b7 b6 b5 */ word2 = *((ULong*)(prev + lx)); word12 >>= 24; /* 0 0 0 b4 */ word22 >>= 24; word12 = word12 | (word1 << 8); /* b7 b6 b5 b4 */ word22 = word22 | (word2 << 8); word3 = word12 & word22; word12 &= mask; word22 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word12 >>= 1; word12 = word12 + (word22 >> 1); word12 += word3; *((ULong*)(rec += 12)) = word12; word12 = *((ULong*)(prev += 4)); /* read b12 b11 b10 b9 */ word22 = *((ULong*)(prev + lx)); word1 >>= 24; /* 0 0 0 b8 */ word2 >>= 24; word1 = word1 | (word12 << 8); /* b11 b10 b9 b8 */ word2 = word2 | (word22 << 8); word3 = word1 & word2; word1 &= mask; word2 &= mask; word3 &= (~mask); /* 0x1010101, check last bit */ word1 >>= 1; word1 = word1 + (word2 >> 1); word1 += word3; *((ULong*)(rec += 4)) = word1; prev += offset; } return 1; } /* rnd */ } /* tmp */ } /**********************************************************************************/ Int GetPredAdvBy1x1( UChar *prev, /* i */ UChar *rec, /* i */ Int lx, /* i */ Int rnd1 /* i */ ) { Int i; /* loop variable */ Int offset; ULong x1, x2, x1m, x2m, y1, y2, y1m, y2m; /* new way */ Int tmp; Int rnd2; ULong mask; /* initialize offset to adjust pixel counter */ /* the next row; full-pel resolution */ offset = lx - B_SIZE; /* offset for prev */ rnd2 = rnd1 + 1; rnd2 |= (rnd2 << 8); rnd2 |= (rnd2 << 16); mask = 0x3F; mask |= (mask << 8); mask |= (mask << 16); /* 0x3f3f3f3f */ tmp = (ULong)prev & 3; rec -= 4; /* preset */ if (tmp == 0) /* word-aligned */ { for (i = B_SIZE; i > 0; i--) { x1 = *((ULong*)prev); /* load a3 a2 a1 a0 */ x2 = *((ULong*)(prev + lx)); /* load b3 b2 b1 b0, another line */ y1 = *((ULong*)(prev += 4)); /* a7 a6 a5 a4 */ y2 = *((ULong*)(prev + lx)); /* b7 b6 b5 b4 */ x1m = (x1 >> 2) & mask; /* zero out last 2 bits */ x2m = (x2 >> 2) & mask; x1 = x1 ^(x1m << 2); x2 = x2 ^(x2m << 2); x1m += x2m; x1 += x2; /* x2m, x2 free */ y1m = (y1 >> 2) & mask; /* zero out last 2 bits */ y2m = (y2 >> 2) & mask; y1 = y1 ^(y1m << 2); y2 = y2 ^(y2m << 2); y1m += y2m; y1 += y2; /* y2m, y2 free */ /* x2m, x2 free */ x2 = *((ULong*)(prev += 4)); /* a11 a10 a9 a8 */ y2 = *((ULong*)(prev + lx)); /* b11 b10 b9 b8 */ x2m = (x2 >> 2) & mask; y2m = (y2 >> 2) & mask; x2 = x2 ^(x2m << 2); y2 = y2 ^(y2m << 2); x2m += y2m; x2 += y2; /* y2m, y2 free */ /* now operate on x1m, x1, y1m, y1, x2m, x2 */ /* x1m = a3+b3, a2+b2, a1+b1, a0+b0 */ /* y1m = a7+b7, a6+b6, a5+b5, a4+b4 */ /* x2m = a11+b11, a10+b10, a9+b9, a8+b8 */ /* x1, y1, x2 */ y2m = x1m >> 8; y2 = x1 >> 8; y2m |= (y1m << 24); /* a4+b4, a3+b3, a2+b2, a1+b1 */ y2 |= (y1 << 24); x1m += y2m; /* a3+b3+a4+b4, ....., a0+b0+a1+b1 */ x1 += y2; x1 += rnd2; x1 &= (mask << 2); x1m += (x1 >> 2); *((ULong*)(rec += 4)) = x1m; /* save x1m */ y2m = y1m >> 8; y2 = y1 >> 8; y2m |= (x2m << 24); /* a8+b8, a7+b7, a6+b6, a5+b5 */ y2 |= (x2 << 24); y1m += y2m; /* a7+b7+a8+b8, ....., a4+b4+a5+b5 */ y1 += y2; y1 += rnd2; y1 &= (mask << 2); y1m += (y1 >> 2); *((ULong*)(rec += 4)) = y1m; /* save y1m */ rec += 8; prev += offset; } return 1; } else if (tmp == 1) { prev--; /* to word-aligned */ for (i = B_SIZE; i > 0; i--) { x1 = *((ULong*)prev); /* load a3 a2 a1 a0 */ x2 = *((ULong*)(prev + lx)); /* load b3 b2 b1 b0, another line */ y1 = *((ULong*)(prev += 4)); /* a7 a6 a5 a4 */ y2 = *((ULong*)(prev + lx)); /* b7 b6 b5 b4 */ x1m = (x1 >> 2) & mask; /* zero out last 2 bits */ x2m = (x2 >> 2) & mask; x1 = x1 ^(x1m << 2); x2 = x2 ^(x2m << 2); x1m += x2m; x1 += x2; /* x2m, x2 free */ y1m = (y1 >> 2) & mask; /* zero out last 2 bits */ y2m = (y2 >> 2) & mask; y1 = y1 ^(y1m << 2); y2 = y2 ^(y2m << 2); y1m += y2m; y1 += y2; /* y2m, y2 free */ /* x2m, x2 free */ x2 = *((ULong*)(prev += 4)); /* a11 a10 a9 a8 */ y2 = *((ULong*)(prev + lx)); /* b11 b10 b9 b8 */ x2m = (x2 >> 2) & mask; y2m = (y2 >> 2) & mask; x2 = x2 ^(x2m << 2); y2 = y2 ^(y2m << 2); x2m += y2m; x2 += y2; /* y2m, y2 free */ /* now operate on x1m, x1, y1m, y1, x2m, x2 */ /* x1m = a3+b3, a2+b2, a1+b1, a0+b0 */ /* y1m = a7+b7, a6+b6, a5+b5, a4+b4 */ /* x2m = a11+b11, a10+b10, a9+b9, a8+b8 */ /* x1, y1, x2 */ x1m >>= 8 ; x1 >>= 8; x1m |= (y1m << 24); /* a4+b4, a3+b3, a2+b2, a1+b1 */ x1 |= (y1 << 24); y2m = (y1m << 16); y2 = (y1 << 16); y2m |= (x1m >> 8); /* a5+b5, a4+b4, a3+b3, a2+b2 */ y2 |= (x1 >> 8); x1 += rnd2; x1m += y2m; /* a4+b4+a5+b5, ....., a1+b1+a2+b2 */ x1 += y2; x1 &= (mask << 2); x1m += (x1 >> 2); *((ULong*)(rec += 4)) = x1m; /* save x1m */ y1m >>= 8; y1 >>= 8; y1m |= (x2m << 24); /* a8+b8, a7+b7, a6+b6, a5+b5 */ y1 |= (x2 << 24); y2m = (x2m << 16); y2 = (x2 << 16); y2m |= (y1m >> 8); /* a9+b9, a8+b8, a7+b7, a6+b6,*/ y2 |= (y1 >> 8); y1 += rnd2; y1m += y2m; /* a8+b8+a9+b9, ....., a5+b5+a6+b6 */ y1 += y2; y1 &= (mask << 2); y1m += (y1 >> 2); *((ULong*)(rec += 4)) = y1m; /* save y1m */ rec += 8; prev += offset; } return 1; } else if (tmp == 2) { prev -= 2; /* to word-aligned */ for (i = B_SIZE; i > 0; i--) { x1 = *((ULong*)prev); /* load a3 a2 a1 a0 */ x2 = *((ULong*)(prev + lx)); /* load b3 b2 b1 b0, another line */ y1 = *((ULong*)(prev += 4)); /* a7 a6 a5 a4 */ y2 = *((ULong*)(prev + lx)); /* b7 b6 b5 b4 */ x1m = (x1 >> 2) & mask; /* zero out last 2 bits */ x2m = (x2 >> 2) & mask; x1 = x1 ^(x1m << 2); x2 = x2 ^(x2m << 2); x1m += x2m; x1 += x2; /* x2m, x2 free */ y1m = (y1 >> 2) & mask; /* zero out last 2 bits */ y2m = (y2 >> 2) & mask; y1 = y1 ^(y1m << 2); y2 = y2 ^(y2m << 2); y1m += y2m; y1 += y2; /* y2m, y2 free */ /* x2m, x2 free */ x2 = *((ULong*)(prev += 4)); /* a11 a10 a9 a8 */ y2 = *((ULong*)(prev + lx)); /* b11 b10 b9 b8 */ x2m = (x2 >> 2) & mask; y2m = (y2 >> 2) & mask; x2 = x2 ^(x2m << 2); y2 = y2 ^(y2m << 2); x2m += y2m; x2 += y2; /* y2m, y2 free */ /* now operate on x1m, x1, y1m, y1, x2m, x2 */ /* x1m = a3+b3, a2+b2, a1+b1, a0+b0 */ /* y1m = a7+b7, a6+b6, a5+b5, a4+b4 */ /* x2m = a11+b11, a10+b10, a9+b9, a8+b8 */ /* x1, y1, x2 */ x1m >>= 16 ; x1 >>= 16; x1m |= (y1m << 16); /* a5+b5, a4+b4, a3+b3, a2+b2 */ x1 |= (y1 << 16); y2m = (y1m << 8); y2 = (y1 << 8); y2m |= (x1m >> 8); /* a6+b6, a5+b5, a4+b4, a3+b3 */ y2 |= (x1 >> 8); x1 += rnd2; x1m += y2m; /* a5+b5+a6+b6, ....., a2+b2+a3+b3 */ x1 += y2; x1 &= (mask << 2); x1m += (x1 >> 2); *((ULong*)(rec += 4)) = x1m; /* save x1m */ y1m >>= 16; y1 >>= 16; y1m |= (x2m << 16); /* a9+b9, a8+b8, a7+b7, a6+b6 */ y1 |= (x2 << 16); y2m = (x2m << 8); y2 = (x2 << 8); y2m |= (y1m >> 8); /* a10+b10, a9+b9, a8+b8, a7+b7,*/ y2 |= (y1 >> 8); y1 += rnd2; y1m += y2m; /* a9+b9+a10+b10, ....., a6+b6+a7+b7 */ y1 += y2; y1 &= (mask << 2); y1m += (y1 >> 2); *((ULong*)(rec += 4)) = y1m; /* save y1m */ rec += 8; prev += offset; } return 1; } else /* tmp == 3 */ { prev -= 3; /* to word-aligned */ for (i = B_SIZE; i > 0; i--) { x1 = *((ULong*)prev); /* load a3 a2 a1 a0 */ x2 = *((ULong*)(prev + lx)); /* load b3 b2 b1 b0, another line */ y1 = *((ULong*)(prev += 4)); /* a7 a6 a5 a4 */ y2 = *((ULong*)(prev + lx)); /* b7 b6 b5 b4 */ x1m = (x1 >> 2) & mask; /* zero out last 2 bits */ x2m = (x2 >> 2) & mask; x1 = x1 ^(x1m << 2); x2 = x2 ^(x2m << 2); x1m += x2m; x1 += x2; /* x2m, x2 free */ y1m = (y1 >> 2) & mask; /* zero out last 2 bits */ y2m = (y2 >> 2) & mask; y1 = y1 ^(y1m << 2); y2 = y2 ^(y2m << 2); y1m += y2m; y1 += y2; /* y2m, y2 free */ /* x2m, x2 free */ x2 = *((ULong*)(prev += 4)); /* a11 a10 a9 a8 */ y2 = *((ULong*)(prev + lx)); /* b11 b10 b9 b8 */ x2m = (x2 >> 2) & mask; y2m = (y2 >> 2) & mask; x2 = x2 ^(x2m << 2); y2 = y2 ^(y2m << 2); x2m += y2m; x2 += y2; /* y2m, y2 free */ /* now operate on x1m, x1, y1m, y1, x2m, x2 */ /* x1m = a3+b3, a2+b2, a1+b1, a0+b0 */ /* y1m = a7+b7, a6+b6, a5+b5, a4+b4 */ /* x2m = a11+b11, a10+b10, a9+b9, a8+b8 */ /* x1, y1, x2 */ x1m >>= 24 ; x1 >>= 24; x1m |= (y1m << 8); /* a6+b6, a5+b5, a4+b4, a3+b3 */ x1 |= (y1 << 8); x1m += y1m; /* a6+b6+a7+b7, ....., a3+b3+a4+b4 */ x1 += y1; x1 += rnd2; x1 &= (mask << 2); x1m += (x1 >> 2); *((ULong*)(rec += 4)) = x1m; /* save x1m */ y1m >>= 24; y1 >>= 24; y1m |= (x2m << 8); /* a10+b10, a9+b9, a8+b8, a7+b7 */ y1 |= (x2 << 8); y1m += x2m; /* a10+b10+a11+b11, ....., a7+b7+a8+b8 */ y1 += x2; y1 += rnd2; y1 &= (mask << 2); y1m += (y1 >> 2); *((ULong*)(rec += 4)) = y1m; /* save y1m */ rec += 8; prev += offset; } return 1; } } /*============================================================================= Function: EncGetPredOutside Date: 04/17/2001 Purpose: - modified from GetPredOutside in the decoder. Modified: 09/24/05 use the existing non-initialized padded region =============================================================================*/ // not really needed since padding is included #define PAD_CORNER { temp = *src; \ temp |= (temp<<8); \ temp |= (temp<<16); \ *((ULong*)dst) = temp; \ *((ULong*)(dst+4)) = temp; \ *((ULong*)(dst+=lx)) = temp; \ *((ULong*)(dst+4)) = temp; \ *((ULong*)(dst+=lx)) = temp; \ *((ULong*)(dst+4)) = temp; \ *((ULong*)(dst+=lx)) = temp; \ *((ULong*)(dst+4)) = temp; \ *((ULong*)(dst+=lx)) = temp; \ *((ULong*)(dst+4)) = temp; \ *((ULong*)(dst+=lx)) = temp; \ *((ULong*)(dst+4)) = temp; \ *((ULong*)(dst+=lx)) = temp; \ *((ULong*)(dst+4)) = temp; \ *((ULong*)(dst+=lx)) = temp; \ *((ULong*)(dst+4)) = temp; } #define PAD_ROW { temp = *((ULong*)src); \ temp2 = *((ULong*)(src+4)); \ *((ULong*)dst) = temp; \ *((ULong*)(dst+4)) = temp2; \ *((ULong*)(dst+=lx)) = temp; \ *((ULong*)(dst+4)) = temp2; \ *((ULong*)(dst+=lx)) = temp; \ *((ULong*)(dst+4)) = temp2; \ *((ULong*)(dst+=lx)) = temp; \ *((ULong*)(dst+4)) = temp2; \ *((ULong*)(dst+=lx)) = temp; \ *((ULong*)(dst+4)) = temp2; \ *((ULong*)(dst+=lx)) = temp; \ *((ULong*)(dst+4)) = temp2; \ *((ULong*)(dst+=lx)) = temp; \ *((ULong*)(dst+4)) = temp2; \ *((ULong*)(dst+=lx)) = temp; \ *((ULong*)(dst+4)) = temp2; } #define PAD_COL { temp = *src; temp |= (temp<<8); temp |= (temp<<16); \ *((ULong*)dst) = temp; \ *((ULong*)(dst+4)) = temp; \ temp = *(src+=lx); temp |= (temp<<8); temp |= (temp<<16); \ *((ULong*)(dst+=lx)) = temp; \ *((ULong*)(dst+4)) = temp; \ temp = *(src+=lx); temp |= (temp<<8); temp |= (temp<<16); \ *((ULong*)(dst+=lx)) = temp; \ *((ULong*)(dst+4)) = temp; \ temp = *(src+=lx); temp |= (temp<<8); temp |= (temp<<16); \ *((ULong*)(dst+=lx)) = temp; \ *((ULong*)(dst+4)) = temp; \ temp = *(src+=lx); temp |= (temp<<8); temp |= (temp<<16); \ *((ULong*)(dst+=lx)) = temp; \ *((ULong*)(dst+4)) = temp; \ temp = *(src+=lx); temp |= (temp<<8); temp |= (temp<<16); \ *((ULong*)(dst+=lx)) = temp; \ *((ULong*)(dst+4)) = temp; \ temp = *(src+=lx); temp |= (temp<<8); temp |= (temp<<16); \ *((ULong*)(dst+=lx)) = temp; \ *((ULong*)(dst+4)) = temp; \ temp = *(src+=lx); temp |= (temp<<8); temp |= (temp<<16); \ *((ULong*)(dst+=lx)) = temp; \ *((ULong*)(dst+4)) = temp; } Int EncGetPredOutside(Int xpos, Int ypos, UChar *c_prev, UChar *rec, Int width, Int height, Int rnd1) { Int lx; UChar *src, *dst; ULong temp, temp2; Int xoffset; lx = width + 16; /* only works for chroma */ if (xpos < 0) { if (ypos < 0) /* pad top-left */ { /* pad corner */ src = c_prev; dst = c_prev - (lx << 3) - 8; PAD_CORNER /* pad top */ dst = c_prev - (lx << 3); PAD_ROW /* pad left */ dst = c_prev - 8; PAD_COL GetPredAdvBTable[ypos&1][xpos&1](c_prev + (xpos >> 1) + ((ypos >> 1)*lx), rec, lx, rnd1); return 1; } else if ((ypos >> 1) < (height - 8)) /* pad left of frame */ { /* pad left */ src = c_prev + (ypos >> 1) * lx; dst = src - 8; PAD_COL /* pad extra row */ temp = *(src += lx); temp |= (temp << 8); temp |= (temp << 16); *((ULong*)(dst += lx)) = temp; *((ULong*)(dst + 4)) = temp; GetPredAdvBTable[ypos&1][xpos&1](c_prev + (xpos >> 1) + ((ypos >> 1)*lx), rec, lx, rnd1); return 1; } else /* pad bottom-left */ { /* pad corner */ src = c_prev + (height - 1) * lx; dst = src + lx - 8; PAD_CORNER /* pad bottom */ dst = src + lx; PAD_ROW /* pad left */ src -= (lx << 3); src += lx; dst = src - 8; PAD_COL GetPredAdvBTable[ypos&1][xpos&1](c_prev + (xpos >> 1) + ((ypos >> 1)*lx), rec, lx, rnd1); return 1; } } else if ((xpos >> 1) < (width - 8)) { if (ypos < 0) /* pad top of frame */ { xoffset = (xpos >> 1) & 0x3; src = c_prev + (xpos >> 1) - xoffset; dst = src - (lx << 3); PAD_ROW if (xoffset || (xpos&1)) { temp = *((ULong*)(src + 8)); dst = src - (lx << 3) + 8; *((ULong*)dst) = temp; *((ULong*)(dst += lx)) = temp; *((ULong*)(dst += lx)) = temp; *((ULong*)(dst += lx)) = temp; *((ULong*)(dst += lx)) = temp; *((ULong*)(dst += lx)) = temp; *((ULong*)(dst += lx)) = temp; *((ULong*)(dst += lx)) = temp; } GetPredAdvBTable[ypos&1][xpos&1](c_prev + (xpos >> 1) + ((ypos >> 1)*lx), rec, lx, rnd1); return 1; } else /* pad bottom of frame */ { xoffset = (xpos >> 1) & 0x3; src = c_prev + (xpos >> 1) - xoffset + (height - 1) * lx; dst = src + lx; PAD_ROW if (xoffset || (xpos&1)) { temp = *((ULong*)(src + 8)); dst = src + lx + 8; *((ULong*)dst) = temp; *((ULong*)(dst += lx)) = temp; *((ULong*)(dst += lx)) = temp; *((ULong*)(dst += lx)) = temp; *((ULong*)(dst += lx)) = temp; *((ULong*)(dst += lx)) = temp; *((ULong*)(dst += lx)) = temp; *((ULong*)(dst += lx)) = temp; } GetPredAdvBTable[ypos&1][xpos&1](c_prev + (xpos >> 1) + ((ypos >> 1)*lx), rec, lx, rnd1); return 1; } } else { if (ypos < 0) /* pad top-right */ { /* pad corner */ src = c_prev + width - 1; dst = src - (lx << 3) + 1; PAD_CORNER /* pad top */ src -= 7; dst = src - (lx << 3); PAD_ROW /* pad left */ src += 7; dst = src + 1; PAD_COL GetPredAdvBTable[ypos&1][xpos&1](c_prev + (xpos >> 1) + ((ypos >> 1)*lx), rec, lx, rnd1); return 1; } else if ((ypos >> 1) < (height - B_SIZE)) /* pad right of frame */ { /* pad left */ src = c_prev + (ypos >> 1) * lx + width - 1; dst = src + 1; PAD_COL /* pad extra row */ temp = *(src += lx); temp |= (temp << 8); temp |= (temp << 16); *((ULong*)(dst += lx)) = temp; *((ULong*)(dst + 4)) = temp; GetPredAdvBTable[ypos&1][xpos&1](c_prev + (xpos >> 1) + ((ypos >> 1)*lx), rec, lx, rnd1); return 1; } else /* pad bottom-right */ { /* pad left */ src = c_prev + (height - 8) * lx + width - 1; dst = src + 1; PAD_COL /* pad corner */ dst = src + lx + 1; PAD_CORNER /* pad bottom */ src -= 7; dst = src + lx; PAD_ROW GetPredAdvBTable[ypos&1][xpos&1](c_prev + (xpos >> 1) + ((ypos >> 1)*lx), rec, lx, rnd1); return 1; } } } /* ====================================================================== / Function : Copy_MB_from_Vop() Date : 04/17/2001 ====================================================================== */ void Copy_MB_from_Vop(UChar *comp, Int yChan[][NCOEFF_BLOCK], Int pitch) { Int row, col, i; Int *src1, *src2; Int offset = pitch - MB_SIZE; ULong temp; for (i = 0; i < 4; i += 2) { src1 = yChan[i]; src2 = yChan[i+1]; row = B_SIZE; while (row--) { col = B_SIZE; while (col) { temp = *((ULong*)comp); *src1++ = (Int)(temp & 0xFF); *src1++ = (Int)((temp >> 8) & 0xFF); *src1++ = (Int)((temp >> 16) & 0xFF); *src1++ = (Int)((temp >> 24) & 0xFF); comp += 4; col -= 4; } col = B_SIZE; while (col) { temp = *((ULong*)comp); *src2++ = (Int)(temp & 0xFF); *src2++ = (Int)((temp >> 8) & 0xFF); *src2++ = (Int)((temp >> 16) & 0xFF); *src2++ = (Int)((temp >> 24) & 0xFF); comp += 4; col -= 4; } comp += offset; } } return ; } /* ====================================================================== / Function : Copy_B_from_Vop() Date : 04/17/2001 / ====================================================================== */ void Copy_B_from_Vop(UChar *comp, Int cChan[], Int pitch) { Int row, col; Int offset = pitch - B_SIZE; ULong temp; row = B_SIZE; while (row--) { col = B_SIZE; while (col) { temp = *((ULong*)comp); *cChan++ = (Int)(temp & 0xFF); *cChan++ = (Int)((temp >> 8) & 0xFF); *cChan++ = (Int)((temp >> 16) & 0xFF); *cChan++ = (Int)((temp >> 24) & 0xFF); comp += 4; col -= 4; } comp += offset; } } /* ====================================================================== / Function : Copy_MB_into_Vop() Date : 04/17/2001 History : From decoder / ====================================================================== */ void Copy_MB_into_Vop(UChar *comp, Int yChan[][NCOEFF_BLOCK], Int pitch) { Int row, col, i; Int *src1, *src2; Int offset = pitch - MB_SIZE; UChar mask = 0xFF; Int tmp; ULong temp; for (i = 0; i < 4; i += 2) { src1 = yChan[i]; src2 = yChan[i+1]; row = B_SIZE; while (row--) { col = B_SIZE; while (col) { tmp = (*src1++); if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31)); temp = tmp << 24; tmp = (*src1++); if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31)); temp |= (tmp << 16); tmp = (*src1++); if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31)); temp |= (tmp << 8); tmp = (*src1++); if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31)); temp |= tmp; *((ULong*)comp) = temp; comp += 4; col -= 4; } col = B_SIZE; while (col) { tmp = (*src2++); if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31)); temp = tmp << 24; tmp = (*src2++); if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31)); temp |= (tmp << 16); tmp = (*src2++); if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31)); temp |= (tmp << 8); tmp = (*src2++); if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31)); temp |= tmp; *((ULong*)comp) = temp; comp += 4; col -= 4; } comp += offset; } } return ; } /* ====================================================================== / Function : Copy_B_into_Vop() Date : 04/17/2001 History : From decoder / ====================================================================== */ void Copy_B_into_Vop(UChar *comp, Int cChan[], Int pitch) { Int row, col; Int offset = pitch - B_SIZE; Int tmp; UChar mask = 0xFF; ULong temp; row = B_SIZE; while (row--) { col = B_SIZE; while (col) { tmp = (*cChan++); if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31)); temp = tmp << 24; tmp = (*cChan++); if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31)); temp |= (tmp << 16); tmp = (*cChan++); if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31)); temp |= (tmp << 8); tmp = (*cChan++); if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31)); temp |= tmp; *((ULong*)comp) = temp; comp += 4; col -= 4; } comp += offset; } } /* ======================================================================== */ /* Function : get_MB( ) */ /* Date : 10/03/2000 */ /* Purpose : Copy 4 Y to reference frame */ /* In/out : */ /* Return : */ /* Modified : */ /* ======================================================================== */ void get_MB(UChar *c_prev, UChar *c_prev_u , UChar *c_prev_v, Short mb[6][64], Int lx, Int lx_uv) { Int i, j, count = 0, count1 = 0; Int k1 = lx - MB_SIZE, k2 = lx_uv - B_SIZE; for (i = 0; i < B_SIZE; i++) { for (j = 0; j < B_SIZE; j++) { mb[0][count] = (Int)(*c_prev++); mb[4][count] = (Int)(*c_prev_u++); mb[5][count++] = (Int)(*c_prev_v++); } for (j = 0; j < B_SIZE; j++) mb[1][count1++] = (Int)(*c_prev++); c_prev += k1; c_prev_u += k2; c_prev_v += k2; } count = count1 = 0; for (i = 0; i < B_SIZE; i++) { for (j = 0; j < B_SIZE; j++) mb[2][count++] = (Int)(*c_prev++); for (j = 0; j < B_SIZE; j++) mb[3][count1++] = (Int)(*c_prev++); c_prev += k1; } } void PutSkippedBlock(UChar *rec, UChar *prev, Int lx) { UChar *end; Int offset = (lx - 8) >> 2; Int *src, *dst; dst = (Int*)rec; src = (Int*)prev; end = prev + (lx << 3); do { *dst++ = *src++; *dst++ = *src++; dst += offset; src += offset; } while ((UInt)src < (UInt)end); return ; } ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/motion_est.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "mp4def.h" #include "mp4enc_lib.h" #include "mp4lib_int.h" #include "m4venc_oscl.h" //#define PRINT_MV #define MIN_GOP 1 /* minimum size of GOP, 1/23/01, need to be tested */ #define CANDIDATE_DISTANCE 0 /* distance candidate from one another to consider as a distinct one */ /* shouldn't be more than 3 */ #define ZERO_MV_PREF 0 /* 0: bias (0,0)MV before full-pel search, lowest complexity*/ /* 1: bias (0,0)MV after full-pel search, before half-pel, highest comp */ /* 2: bias (0,0)MV after half-pel, high comp, better PSNR */ #define RASTER_REFRESH /* instead of random INTRA refresh, do raster scan, 2/26/01 */ #ifdef RASTER_REFRESH #define TARGET_REFRESH_PER_REGION 4 /* , no. MB per frame to be INTRA refreshed */ #else #define TARGET_REFRESH_PER_REGION 1 /* , no. MB per region to be INTRA refreshed */ #endif #define ALL_CAND_EQUAL 10 /* any number greater than 5 will work */ #define NumPixelMB 256 /* number of pixels used in SAD calculation */ #define DEF_8X8_WIN 3 /* search region for 8x8 MVs around the 16x16 MV */ #define MB_Nb 256 #define PREF_NULL_VEC 129 /* for zero vector bias */ #define PREF_16_VEC 129 /* 1MV bias versus 4MVs*/ #define PREF_INTRA 512 /* bias for INTRA coding */ const static Int tab_exclude[9][9] = // [last_loc][curr_loc] { {0, 0, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 1, 1, 1, 0, 0}, {0, 0, 0, 0, 1, 1, 1, 1, 1}, {0, 0, 0, 0, 0, 0, 1, 1, 1}, {0, 1, 1, 0, 0, 0, 1, 1, 1}, {0, 1, 1, 0, 0, 0, 0, 0, 1}, {0, 1, 1, 1, 1, 0, 0, 0, 1}, {0, 0, 1, 1, 1, 0, 0, 0, 0}, {0, 0, 1, 1, 1, 1, 1, 0, 0} }; //to decide whether to continue or compute const static Int refine_next[8][2] = /* [curr_k][increment] */ { {0, 0}, {2, 0}, {1, 1}, {0, 2}, { -1, 1}, { -2, 0}, { -1, -1}, {0, -2} }; #ifdef __cplusplus extern "C" { #endif void MBMotionSearch(VideoEncData *video, UChar *cur, UChar *best_cand[], Int i0, Int j0, Int type_pred, Int fullsearch, Int *hp_guess); Int fullsearch(VideoEncData *video, Vol *currVol, UChar *ref, UChar *cur, Int *imin, Int *jmin, Int ilow, Int ihigh, Int jlow, Int jhigh); Int fullsearchBlk(VideoEncData *video, Vol *currVol, UChar *cent, UChar *cur, Int *imin, Int *jmin, Int ilow, Int ihigh, Int jlow, Int jhigh, Int range); void CandidateSelection(Int *mvx, Int *mvy, Int *num_can, Int imb, Int jmb, VideoEncData *video, Int type_pred); void RasterIntraUpdate(UChar *intraArray, UChar *Mode, Int totalMB, Int numRefresh); void ResetIntraUpdate(UChar *intraArray, Int totalMB); void ResetIntraUpdateRegion(UChar *intraArray, Int start_i, Int rwidth, Int start_j, Int rheight, Int mbwidth, Int mbheight); void MoveNeighborSAD(Int dn[], Int new_loc); Int FindMin(Int dn[]); void PrepareCurMB(VideoEncData *video, UChar *cur); #ifdef __cplusplus } #endif /***************************************/ /* 2/28/01, for HYPOTHESIS TESTING */ #ifdef HTFM /* defined in mp4def.h */ #ifdef __cplusplus extern "C" { #endif void CalcThreshold(double pf, double exp_lamda[], Int nrmlz_th[]); void HTFMPrepareCurMB(VideoEncData *video, HTFM_Stat *htfm_stat, UChar *cur); #ifdef __cplusplus } #endif #define HTFM_Pf 0.25 /* 3/2/1, probability of false alarm, can be varied from 0 to 0.5 */ /***************************************/ #endif #ifdef _SAD_STAT ULong num_MB = 0; ULong num_HP_MB = 0; ULong num_Blk = 0; ULong num_HP_Blk = 0; ULong num_cand = 0; ULong num_better_hp = 0; ULong i_dist_from_guess = 0; ULong j_dist_from_guess = 0; ULong num_hp_not_zero = 0; #endif /*================================================================== Function: MotionEstimation Date: 10/3/2000 Purpose: Go through all macroblock for motion search and determine scene change detection. ====================================================================*/ void MotionEstimation(VideoEncData *video) { UChar use_4mv = video->encParams->MV8x8_Enabled; Vol *currVol = video->vol[video->currLayer]; Vop *currVop = video->currVop; VideoEncFrameIO *currFrame = video->input; Int i, j, comp; Int mbwidth = currVol->nMBPerRow; Int mbheight = currVol->nMBPerCol; Int totalMB = currVol->nTotalMB; Int width = currFrame->pitch; UChar *mode_mb, *Mode = video->headerInfo.Mode; MOT *mot_mb, **mot = video->mot; UChar *intraArray = video->intraArray; Int FS_en = video->encParams->FullSearch_Enabled; void (*ComputeMBSum)(UChar *, Int, MOT *) = video->functionPointer->ComputeMBSum; void (*ChooseMode)(UChar*, UChar*, Int, Int) = video->functionPointer->ChooseMode; Int numIntra, start_i, numLoop, incr_i; Int mbnum, offset; UChar *cur, *best_cand[5]; Int sad8 = 0, sad16 = 0; Int totalSAD = 0; /* average SAD for rate control */ Int skip_halfpel_4mv; Int f_code_p, f_code_n, max_mag = 0, min_mag = 0; Int type_pred; Int xh[5] = {0, 0, 0, 0, 0}; Int yh[5] = {0, 0, 0, 0, 0}; /* half-pel */ UChar hp_mem4MV[17*17*4]; #ifdef HTFM /***** HYPOTHESIS TESTING ********/ /* 2/28/01 */ Int collect = 0; HTFM_Stat htfm_stat; double newvar[16]; double exp_lamda[15]; /*********************************/ #endif Int hp_guess = 0; #ifdef PRINT_MV FILE *fp_debug; #endif // FILE *fstat; // static int frame_num = 0; offset = 0; if (video->currVop->predictionType == I_VOP) { /* compute the SAV */ mbnum = 0; cur = currFrame->yChan; for (j = 0; j < mbheight; j++) { for (i = 0; i < mbwidth; i++) { video->mbnum = mbnum; mot_mb = mot[mbnum]; (*ComputeMBSum)(cur + (i << 4), width, mot_mb); totalSAD += mot_mb[0].sad; mbnum++; } cur += (width << 4); } video->sumMAD = (float)totalSAD / (float)NumPixelMB; ResetIntraUpdate(intraArray, totalMB); return ; } /* 09/20/05 */ if (video->prevBaseVop->padded == 0 && !video->encParams->H263_Enabled) { PaddingEdge(video->prevBaseVop); video->prevBaseVop->padded = 1; } /* Random INTRA update */ /* suggest to do it in CodeMB */ /* 2/21/2001 */ //if(video->encParams->RC_Type == CBR_1 || video->encParams->RC_Type == CBR_2) if (video->currLayer == 0 && video->encParams->Refresh) { RasterIntraUpdate(intraArray, Mode, totalMB, video->encParams->Refresh); } video->sad_extra_info = NULL; #ifdef HTFM /***** HYPOTHESIS TESTING ********/ /* 2/28/01 */ InitHTFM(video, &htfm_stat, newvar, &collect); /*********************************/ #endif if ((video->encParams->SceneChange_Det == 1) /*&& video->currLayer==0 */ && ((video->encParams->LayerFrameRate[0] < 5.0) || (video->numVopsInGOP > MIN_GOP))) /* do not try to detect a new scene if low frame rate and too close to previous I-frame */ { incr_i = 2; numLoop = 2; start_i = 1; type_pred = 0; /* for initial candidate selection */ } else { incr_i = 1; numLoop = 1; start_i = 0; type_pred = 2; } /* First pass, loop thru half the macroblock */ /* determine scene change */ /* Second pass, for the rest of macroblocks */ numIntra = 0; while (numLoop--) { for (j = 0; j < mbheight; j++) { if (incr_i > 1) start_i = (start_i == 0 ? 1 : 0) ; /* toggle 0 and 1 */ offset = width * (j << 4) + (start_i << 4); mbnum = j * mbwidth + start_i; for (i = start_i; i < mbwidth; i += incr_i) { video->mbnum = mbnum; mot_mb = mot[mbnum]; mode_mb = Mode + mbnum; cur = currFrame->yChan + offset; if (*mode_mb != MODE_INTRA) { #if defined(HTFM) HTFMPrepareCurMB(video, &htfm_stat, cur); #else PrepareCurMB(video, cur); #endif /************************************************************/ /******** full-pel 1MV and 4MVs search **********************/ #ifdef _SAD_STAT num_MB++; #endif MBMotionSearch(video, cur, best_cand, i << 4, j << 4, type_pred, FS_en, &hp_guess); #ifdef PRINT_MV fp_debug = fopen("c:\\bitstream\\mv1_debug.txt", "a"); fprintf(fp_debug, "#%d (%d,%d,%d) : ", mbnum, mot_mb[0].x, mot_mb[0].y, mot_mb[0].sad); fprintf(fp_debug, "(%d,%d,%d) : (%d,%d,%d) : (%d,%d,%d) : (%d,%d,%d) : ==>\n", mot_mb[1].x, mot_mb[1].y, mot_mb[1].sad, mot_mb[2].x, mot_mb[2].y, mot_mb[2].sad, mot_mb[3].x, mot_mb[3].y, mot_mb[3].sad, mot_mb[4].x, mot_mb[4].y, mot_mb[4].sad); fclose(fp_debug); #endif sad16 = mot_mb[0].sad; #ifdef NO_INTER4V sad8 = sad16; #else sad8 = mot_mb[1].sad + mot_mb[2].sad + mot_mb[3].sad + mot_mb[4].sad; #endif /* choose between INTRA or INTER */ (*ChooseMode)(mode_mb, cur, width, ((sad8 < sad16) ? sad8 : sad16)); } else /* INTRA update, use for prediction 3/23/01 */ { mot_mb[0].x = mot_mb[0].y = 0; } if (*mode_mb == MODE_INTRA) { numIntra++ ; /* compute SAV for rate control and fast DCT, 11/28/00 */ (*ComputeMBSum)(cur, width, mot_mb); /* leave mot_mb[0] as it is for fast motion search */ /* set the 4 MVs to zeros */ for (comp = 1; comp <= 4; comp++) { mot_mb[comp].x = 0; mot_mb[comp].y = 0; } #ifdef PRINT_MV fp_debug = fopen("c:\\bitstream\\mv1_debug.txt", "a"); fprintf(fp_debug, "\n"); fclose(fp_debug); #endif } else /* *mode_mb = MODE_INTER;*/ { if (video->encParams->HalfPel_Enabled) { #ifdef _SAD_STAT num_HP_MB++; #endif /* find half-pel resolution motion vector */ FindHalfPelMB(video, cur, mot_mb, best_cand[0], i << 4, j << 4, xh, yh, hp_guess); #ifdef PRINT_MV fp_debug = fopen("c:\\bitstream\\mv1_debug.txt", "a"); fprintf(fp_debug, "(%d,%d), %d\n", mot_mb[0].x, mot_mb[0].y, mot_mb[0].sad); fclose(fp_debug); #endif skip_halfpel_4mv = ((sad16 - mot_mb[0].sad) <= (MB_Nb >> 1) + 1); sad16 = mot_mb[0].sad; #ifndef NO_INTER4V if (use_4mv && !skip_halfpel_4mv) { /* Also decide 1MV or 4MV !!!!!!!!*/ sad8 = FindHalfPelBlk(video, cur, mot_mb, sad16, best_cand, mode_mb, i << 4, j << 4, xh, yh, hp_mem4MV); #ifdef PRINT_MV fp_debug = fopen("c:\\bitstream\\mv1_debug.txt", "a"); fprintf(fp_debug, " (%d,%d,%d) : (%d,%d,%d) : (%d,%d,%d) : (%d,%d,%d) \n", mot_mb[1].x, mot_mb[1].y, mot_mb[1].sad, mot_mb[2].x, mot_mb[2].y, mot_mb[2].sad, mot_mb[3].x, mot_mb[3].y, mot_mb[3].sad, mot_mb[4].x, mot_mb[4].y, mot_mb[4].sad); fclose(fp_debug); #endif } #endif /* NO_INTER4V */ } else /* HalfPel_Enabled ==0 */ { #ifndef NO_INTER4V //if(sad16 < sad8-PREF_16_VEC) if (sad16 - PREF_16_VEC > sad8) { *mode_mb = MODE_INTER4V; } #endif } #if (ZERO_MV_PREF==2) /* use mot_mb[7].sad as d0 computed in MBMotionSearch*/ /******************************************************/ if (mot_mb[7].sad - PREF_NULL_VEC < sad16 && mot_mb[7].sad - PREF_NULL_VEC < sad8) { mot_mb[0].sad = mot_mb[7].sad - PREF_NULL_VEC; mot_mb[0].x = mot_mb[0].y = 0; *mode_mb = MODE_INTER; } /******************************************************/ #endif if (*mode_mb == MODE_INTER) { if (mot_mb[0].x == 0 && mot_mb[0].y == 0) /* use zero vector */ mot_mb[0].sad += PREF_NULL_VEC; /* add back the bias */ mot_mb[1].sad = mot_mb[2].sad = mot_mb[3].sad = mot_mb[4].sad = (mot_mb[0].sad + 2) >> 2; mot_mb[1].x = mot_mb[2].x = mot_mb[3].x = mot_mb[4].x = mot_mb[0].x; mot_mb[1].y = mot_mb[2].y = mot_mb[3].y = mot_mb[4].y = mot_mb[0].y; } } /* find maximum magnitude */ /* compute average SAD for rate control, 11/28/00 */ if (*mode_mb == MODE_INTER) { #ifdef PRINT_MV fp_debug = fopen("c:\\bitstream\\mv1_debug.txt", "a"); fprintf(fp_debug, "%d MODE_INTER\n", mbnum); fclose(fp_debug); #endif totalSAD += mot_mb[0].sad; if (mot_mb[0].x > max_mag) max_mag = mot_mb[0].x; if (mot_mb[0].y > max_mag) max_mag = mot_mb[0].y; if (mot_mb[0].x < min_mag) min_mag = mot_mb[0].x; if (mot_mb[0].y < min_mag) min_mag = mot_mb[0].y; } else if (*mode_mb == MODE_INTER4V) { #ifdef PRINT_MV fp_debug = fopen("c:\\bitstream\\mv1_debug.txt", "a"); fprintf(fp_debug, "%d MODE_INTER4V\n", mbnum); fclose(fp_debug); #endif totalSAD += sad8; for (comp = 1; comp <= 4; comp++) { if (mot_mb[comp].x > max_mag) max_mag = mot_mb[comp].x; if (mot_mb[comp].y > max_mag) max_mag = mot_mb[comp].y; if (mot_mb[comp].x < min_mag) min_mag = mot_mb[comp].x; if (mot_mb[comp].y < min_mag) min_mag = mot_mb[comp].y; } } else /* MODE_INTRA */ { #ifdef PRINT_MV fp_debug = fopen("c:\\bitstream\\mv1_debug.txt", "a"); fprintf(fp_debug, "%d MODE_INTRA\n", mbnum); fclose(fp_debug); #endif totalSAD += mot_mb[0].sad; } mbnum += incr_i; offset += (incr_i << 4); } } if (incr_i > 1 && numLoop) /* scene change on and first loop */ { //if(numIntra > ((totalMB>>3)<<1) + (totalMB>>3)) /* 75% of 50%MBs */ if (numIntra > (0.30*(totalMB / 2.0))) /* 15% of 50%MBs */ { /******** scene change detected *******************/ currVop->predictionType = I_VOP; M4VENC_MEMSET(Mode, MODE_INTRA, sizeof(UChar)*totalMB); /* set this for MB level coding*/ currVop->quantizer = video->encParams->InitQuantIvop[video->currLayer]; /* compute the SAV for rate control & fast DCT */ totalSAD = 0; offset = 0; mbnum = 0; cur = currFrame->yChan; for (j = 0; j < mbheight; j++) { for (i = 0; i < mbwidth; i++) { video->mbnum = mbnum; mot_mb = mot[mbnum]; (*ComputeMBSum)(cur + (i << 4), width, mot_mb); totalSAD += mot_mb[0].sad; mbnum++; } cur += (width << 4); } video->sumMAD = (float)totalSAD / (float)NumPixelMB; ResetIntraUpdate(intraArray, totalMB); /* video->numVopsInGOP=0; 3/13/01 move it to vop.c*/ return ; } } /******** no scene change, continue motion search **********************/ start_i = 0; type_pred++; /* second pass */ } video->sumMAD = (float)totalSAD / (float)NumPixelMB; /* avg SAD */ /* find f_code , 10/27/2000 */ f_code_p = 1; while ((max_mag >> (4 + f_code_p)) > 0) f_code_p++; f_code_n = 1; min_mag *= -1; while ((min_mag - 1) >> (4 + f_code_n) > 0) f_code_n++; currVop->fcodeForward = (f_code_p > f_code_n ? f_code_p : f_code_n); #ifdef HTFM /***** HYPOTHESIS TESTING ********/ /* 2/28/01 */ if (collect) { collect = 0; UpdateHTFM(video, newvar, exp_lamda, &htfm_stat); } /*********************************/ #endif return ; } #ifdef HTFM void InitHTFM(VideoEncData *video, HTFM_Stat *htfm_stat, double *newvar, Int *collect) { Int i; Int lx = video->currVop->width; // padding Int lx2 = lx << 1; Int lx3 = lx2 + lx; Int rx = video->currVop->pitch; Int rx2 = rx << 1; Int rx3 = rx2 + rx; Int *offset, *offset2; /* 4/11/01, collect data every 30 frames, doesn't have to be base layer */ if (((Int)video->numVopsInGOP) % 30 == 1) { *collect = 1; htfm_stat->countbreak = 0; htfm_stat->abs_dif_mad_avg = 0; for (i = 0; i < 16; i++) { newvar[i] = 0.0; } // video->functionPointer->SAD_MB_PADDING = &SAD_MB_PADDING_HTFM_Collect; video->functionPointer->SAD_Macroblock = &SAD_MB_HTFM_Collect; video->functionPointer->SAD_MB_HalfPel[0] = NULL; video->functionPointer->SAD_MB_HalfPel[1] = &SAD_MB_HP_HTFM_Collectxh; video->functionPointer->SAD_MB_HalfPel[2] = &SAD_MB_HP_HTFM_Collectyh; video->functionPointer->SAD_MB_HalfPel[3] = &SAD_MB_HP_HTFM_Collectxhyh; video->sad_extra_info = (void*)(htfm_stat); offset = htfm_stat->offsetArray; offset2 = htfm_stat->offsetRef; } else { // video->functionPointer->SAD_MB_PADDING = &SAD_MB_PADDING_HTFM; video->functionPointer->SAD_Macroblock = &SAD_MB_HTFM; video->functionPointer->SAD_MB_HalfPel[0] = NULL; video->functionPointer->SAD_MB_HalfPel[1] = &SAD_MB_HP_HTFMxh; video->functionPointer->SAD_MB_HalfPel[2] = &SAD_MB_HP_HTFMyh; video->functionPointer->SAD_MB_HalfPel[3] = &SAD_MB_HP_HTFMxhyh; video->sad_extra_info = (void*)(video->nrmlz_th); offset = video->nrmlz_th + 16; offset2 = video->nrmlz_th + 32; } offset[0] = 0; offset[1] = lx2 + 2; offset[2] = 2; offset[3] = lx2; offset[4] = lx + 1; offset[5] = lx3 + 3; offset[6] = lx + 3; offset[7] = lx3 + 1; offset[8] = lx; offset[9] = lx3 + 2; offset[10] = lx3 ; offset[11] = lx + 2 ; offset[12] = 1; offset[13] = lx2 + 3; offset[14] = lx2 + 1; offset[15] = 3; offset2[0] = 0; offset2[1] = rx2 + 2; offset2[2] = 2; offset2[3] = rx2; offset2[4] = rx + 1; offset2[5] = rx3 + 3; offset2[6] = rx + 3; offset2[7] = rx3 + 1; offset2[8] = rx; offset2[9] = rx3 + 2; offset2[10] = rx3 ; offset2[11] = rx + 2 ; offset2[12] = 1; offset2[13] = rx2 + 3; offset2[14] = rx2 + 1; offset2[15] = 3; return ; } void UpdateHTFM(VideoEncData *video, double *newvar, double *exp_lamda, HTFM_Stat *htfm_stat) { if (htfm_stat->countbreak == 0) htfm_stat->countbreak = 1; newvar[0] = (double)(htfm_stat->abs_dif_mad_avg) / (htfm_stat->countbreak * 16.); if (newvar[0] < 0.001) { newvar[0] = 0.001; /* to prevent floating overflow */ } exp_lamda[0] = 1 / (newvar[0] * 1.4142136); exp_lamda[1] = exp_lamda[0] * 1.5825; exp_lamda[2] = exp_lamda[0] * 2.1750; exp_lamda[3] = exp_lamda[0] * 3.5065; exp_lamda[4] = exp_lamda[0] * 3.1436; exp_lamda[5] = exp_lamda[0] * 3.5315; exp_lamda[6] = exp_lamda[0] * 3.7449; exp_lamda[7] = exp_lamda[0] * 4.5854; exp_lamda[8] = exp_lamda[0] * 4.6191; exp_lamda[9] = exp_lamda[0] * 5.4041; exp_lamda[10] = exp_lamda[0] * 6.5974; exp_lamda[11] = exp_lamda[0] * 10.5341; exp_lamda[12] = exp_lamda[0] * 10.0719; exp_lamda[13] = exp_lamda[0] * 12.0516; exp_lamda[14] = exp_lamda[0] * 15.4552; CalcThreshold(HTFM_Pf, exp_lamda, video->nrmlz_th); return ; } void CalcThreshold(double pf, double exp_lamda[], Int nrmlz_th[]) { Int i; double temp[15]; // printf("\nLamda: "); /* parametric PREMODELling */ for (i = 0; i < 15; i++) { // printf("%g ",exp_lamda[i]); if (pf < 0.5) temp[i] = 1 / exp_lamda[i] * M4VENC_LOG(2 * pf); else temp[i] = -1 / exp_lamda[i] * M4VENC_LOG(2 * (1 - pf)); } nrmlz_th[15] = 0; for (i = 0; i < 15; i++) /* scale upto no.pixels */ nrmlz_th[i] = (Int)(temp[i] * ((i + 1) << 4) + 0.5); return ; } void HTFMPrepareCurMB(VideoEncData *video, HTFM_Stat *htfm_stat, UChar *cur) { void* tmp = (void*)(video->currYMB); ULong *htfmMB = (ULong*)tmp; UChar *ptr, byte; Int *offset; Int i; ULong word; Int width = video->currVop->width; if (((Int)video->numVopsInGOP) % 30 == 1) { offset = htfm_stat->offsetArray; } else { offset = video->nrmlz_th + 16; } for (i = 0; i < 16; i++) { ptr = cur + offset[i]; word = ptr[0]; byte = ptr[4]; word |= (byte << 8); byte = ptr[8]; word |= (byte << 16); byte = ptr[12]; word |= (byte << 24); *htfmMB++ = word; word = *(ptr += (width << 2)); byte = ptr[4]; word |= (byte << 8); byte = ptr[8]; word |= (byte << 16); byte = ptr[12]; word |= (byte << 24); *htfmMB++ = word; word = *(ptr += (width << 2)); byte = ptr[4]; word |= (byte << 8); byte = ptr[8]; word |= (byte << 16); byte = ptr[12]; word |= (byte << 24); *htfmMB++ = word; word = *(ptr += (width << 2)); byte = ptr[4]; word |= (byte << 8); byte = ptr[8]; word |= (byte << 16); byte = ptr[12]; word |= (byte << 24); *htfmMB++ = word; } return ; } #endif void PrepareCurMB(VideoEncData *video, UChar *cur) { void* tmp = (void*)(video->currYMB); ULong *currYMB = (ULong*)tmp; Int i; Int width = video->currVop->width; cur -= width; for (i = 0; i < 16; i++) { *currYMB++ = *((ULong*)(cur += width)); *currYMB++ = *((ULong*)(cur + 4)); *currYMB++ = *((ULong*)(cur + 8)); *currYMB++ = *((ULong*)(cur + 12)); } return ; } /*================================================================== Function: MBMotionSearch Date: 09/06/2000 Purpose: Perform motion estimation for a macroblock. Find 1MV and 4MVs in half-pels resolutions. Using ST1 algorithm provided by Chalidabhongse and Kuo CSVT March'98. ==================================================================*/ void MBMotionSearch(VideoEncData *video, UChar *cur, UChar *best_cand[], Int i0, Int j0, Int type_pred, Int FS_en, Int *hp_guess) { Vol *currVol = video->vol[video->currLayer]; UChar *ref, *cand, *ncand = NULL, *cur8; void *extra_info = video->sad_extra_info; Int mbnum = video->mbnum; Int width = video->currVop->width; /* 6/12/01, must be multiple of 16 */ Int height = video->currVop->height; MOT **mot = video->mot; UChar use_4mv = video->encParams->MV8x8_Enabled; UChar h263_mode = video->encParams->H263_Enabled; Int(*SAD_Macroblock)(UChar*, UChar*, Int, void*) = video->functionPointer->SAD_Macroblock; Int(*SAD_Block)(UChar*, UChar*, Int, Int, void*) = video->functionPointer->SAD_Block; VideoEncParams *encParams = video->encParams; Int range = encParams->SearchRange; Int lx = video->currVop->pitch; /* padding */ Int comp; Int i, j, imin, jmin, ilow, ihigh, jlow, jhigh, iorg, jorg; Int d, dmin, dn[9]; #if (ZERO_MV_PREF==1) /* compute (0,0) MV at the end */ Int d0; #endif Int k; Int mvx[5], mvy[5], imin0, jmin0; Int num_can, center_again; Int last_loc, new_loc = 0; Int step, max_step = range >> 1; Int next; ref = video->forwardRefVop->yChan; /* origin of actual frame */ cur = video->currYMB; /* use smaller memory space for current MB */ /* find limit of the search (adjusting search range)*/ if (!h263_mode) { ilow = i0 - range; if (ilow < -15) ilow = -15; ihigh = i0 + range - 1; if (ihigh > width - 1) ihigh = width - 1; jlow = j0 - range; if (jlow < -15) jlow = -15; jhigh = j0 + range - 1; if (jhigh > height - 1) jhigh = height - 1; } else { ilow = i0 - range; if (ilow < 0) ilow = 0; ihigh = i0 + range - 1; if (ihigh > width - 16) ihigh = width - 16; jlow = j0 - range; if (jlow < 0) jlow = 0; jhigh = j0 + range - 1; if (jhigh > height - 16) jhigh = height - 16; } imin = i0; jmin = j0; /* needed for fullsearch */ ncand = ref + imin + jmin * lx; /* for first row of MB, fullsearch can be used */ if (FS_en) { *hp_guess = 0; /* no guess for fast half-pel */ dmin = fullsearch(video, currVol, ref, cur, &imin, &jmin, ilow, ihigh, jlow, jhigh); ncand = ref + imin + jmin * lx; mot[mbnum][0].sad = dmin; mot[mbnum][0].x = (imin - i0) << 1; mot[mbnum][0].y = (jmin - j0) << 1; imin0 = imin << 1; /* 16x16 MV in half-pel resolution */ jmin0 = jmin << 1; best_cand[0] = ncand; } else { /* 4/7/01, modified this testing for fullsearch the top row to only upto (0,3) MB */ /* upto 30% complexity saving with the same complexity */ if (video->forwardRefVop->predictionType == I_VOP && j0 == 0 && i0 <= 64 && type_pred != 1) { *hp_guess = 0; /* no guess for fast half-pel */ dmin = fullsearch(video, currVol, ref, cur, &imin, &jmin, ilow, ihigh, jlow, jhigh); ncand = ref + imin + jmin * lx; } else { /************** initialize candidate **************************/ /* find initial motion vector */ CandidateSelection(mvx, mvy, &num_can, i0 >> 4, j0 >> 4, video, type_pred); dmin = 65535; /* check if all are equal */ if (num_can == ALL_CAND_EQUAL) { i = i0 + mvx[0]; j = j0 + mvy[0]; if (i >= ilow && i <= ihigh && j >= jlow && j <= jhigh) { cand = ref + i + j * lx; d = (*SAD_Macroblock)(cand, cur, (dmin << 16) | lx, extra_info); if (d < dmin) { dmin = d; imin = i; jmin = j; ncand = cand; } } } else { /************** evaluate unique candidates **********************/ for (k = 0; k < num_can; k++) { i = i0 + mvx[k]; j = j0 + mvy[k]; if (i >= ilow && i <= ihigh && j >= jlow && j <= jhigh) { cand = ref + i + j * lx; d = (*SAD_Macroblock)(cand, cur, (dmin << 16) | lx, extra_info); if (d < dmin) { dmin = d; imin = i; jmin = j; ncand = cand; } else if ((d == dmin) && PV_ABS(mvx[k]) + PV_ABS(mvy[k]) < PV_ABS(i0 - imin) + PV_ABS(j0 - jmin)) { dmin = d; imin = i; jmin = j; ncand = cand; } } } } if (num_can == 0 || dmin == 65535) /* no candidate selected */ { ncand = ref + i0 + j0 * lx; /* use (0,0) MV as initial value */ mot[mbnum][7].sad = dmin = (*SAD_Macroblock)(ncand, cur, (65535 << 16) | lx, extra_info); #if (ZERO_MV_PREF==1) /* compute (0,0) MV at the end */ d0 = dmin; #endif imin = i0; jmin = j0; } #if (ZERO_MV_PREF==0) /* COMPUTE ZERO VECTOR FIRST !!!!!*/ dmin -= PREF_NULL_VEC; #endif /******************* local refinement ***************************/ center_again = 0; last_loc = new_loc = 0; // ncand = ref + jmin*lx + imin; /* center of the search */ step = 0; dn[0] = dmin; while (!center_again && step <= max_step) { MoveNeighborSAD(dn, last_loc); center_again = 1; i = imin; j = jmin - 1; cand = ref + i + j * lx; /* starting from [0,-1] */ /* spiral check one step at a time*/ for (k = 2; k <= 8; k += 2) { if (!tab_exclude[last_loc][k]) /* exclude last step computation */ { /* not already computed */ if (i >= ilow && i <= ihigh && j >= jlow && j <= jhigh) { d = (*SAD_Macroblock)(cand, cur, (dmin << 16) | lx, extra_info); dn[k] = d; /* keep it for half pel use */ if (d < dmin) { ncand = cand; dmin = d; imin = i; jmin = j; center_again = 0; new_loc = k; } else if ((d == dmin) && PV_ABS(i0 - i) + PV_ABS(j0 - j) < PV_ABS(i0 - imin) + PV_ABS(j0 - jmin)) { ncand = cand; imin = i; jmin = j; center_again = 0; new_loc = k; } } } if (k == 8) /* end side search*/ { if (!center_again) { k = -1; /* start diagonal search */ cand -= lx; j--; } } else { next = refine_next[k][0]; i += next; cand += next; next = refine_next[k][1]; j += next; cand += lx * next; } } last_loc = new_loc; step ++; } if (!center_again) MoveNeighborSAD(dn, last_loc); *hp_guess = FindMin(dn); } #if (ZERO_MV_PREF==1) /* compute (0,0) MV at the end */ if (d0 - PREF_NULL_VEC < dmin) { ncand = ref + i0 + j0 * lx; dmin = d0; imin = i0; jmin = j0; } #endif mot[mbnum][0].sad = dmin; mot[mbnum][0].x = (imin - i0) << 1; mot[mbnum][0].y = (jmin - j0) << 1; imin0 = imin << 1; /* 16x16 MV in half-pel resolution */ jmin0 = jmin << 1; best_cand[0] = ncand; } /* imin and jmin is the best 1 MV */ #ifndef NO_INTER4V /******************* Find 4 motion vectors ****************************/ if (use_4mv && !h263_mode) { #ifdef _SAD_STAT num_Blk += 4; #endif /* starting from the best 1MV */ //offset = imin + jmin*lx; iorg = i0; jorg = j0; for (comp = 0; comp < 4; comp++) { i0 = iorg + ((comp & 1) << 3); j0 = jorg + ((comp & 2) << 2); imin = (imin0 >> 1) + ((comp & 1) << 3); /* starting point from 16x16 MV */ jmin = (jmin0 >> 1) + ((comp & 2) << 2); ncand = ref + imin + jmin * lx; cur8 = cur + ((comp & 1) << 3) + (((comp & 2) << 2) << 4) ; /* 11/30/05, smaller cache */ /* find limit of the search (adjusting search range)*/ ilow = i0 - range; ihigh = i0 + range - 1 ;/* 4/9/01 */ if (ilow < -15) ilow = -15; if (ihigh > width - 1) ihigh = width - 1; jlow = j0 - range; jhigh = j0 + range - 1 ;/* 4/9/01 */ if (jlow < -15) jlow = -15; if (jhigh > height - 1) jhigh = height - 1; SAD_Block = video->functionPointer->SAD_Block; if (FS_en) /* fullsearch enable, center around 16x16 MV */ { dmin = fullsearchBlk(video, currVol, ncand, cur8, &imin, &jmin, ilow, ihigh, jlow, jhigh, range); ncand = ref + imin + jmin * lx; mot[mbnum][comp+1].sad = dmin; mot[mbnum][comp+1].x = (imin - i0) << 1; mot[mbnum][comp+1].y = (jmin - j0) << 1; best_cand[comp+1] = ncand; } else /* no fullsearch, do local search */ { /* starting point from 16x16 */ dmin = (*SAD_Block)(ncand, cur8, 65536, lx, extra_info); /******************* local refinement ***************************/ center_again = 0; last_loc = 0; while (!center_again) { center_again = 1; i = imin; j = jmin - 1; cand = ref + i + j * lx; /* starting from [0,-1] */ /* spiral check one step at a time*/ for (k = 2; k <= 8; k += 2) { if (!tab_exclude[last_loc][k]) /* exclude last step computation */ { /* not already computed */ if (i >= ilow && i <= ihigh && j >= jlow && j <= jhigh) { d = (*SAD_Block)(cand, cur8, dmin, lx, extra_info); if (d < dmin) { ncand = cand; dmin = d; imin = i; jmin = j; center_again = 0; new_loc = k; } else if ((d == dmin) && PV_ABS(i0 - i) + PV_ABS(j0 - j) < PV_ABS(i0 - imin) + PV_ABS(j0 - jmin)) { ncand = cand; imin = i; jmin = j; center_again = 0; new_loc = k; } } } if (k == 8) /* end side search*/ { if (!center_again) { k = -1; /* start diagonal search */ if (j <= height - 1 && j > 0) cand -= lx; j--; } } else { next = refine_next[k][0]; cand += next; i += next; next = refine_next[k][1]; cand += lx * next; j += next; } } last_loc = new_loc; } mot[mbnum][comp+1].sad = dmin; mot[mbnum][comp+1].x = (imin - i0) << 1; mot[mbnum][comp+1].y = (jmin - j0) << 1; best_cand[comp+1] = ncand; } /********************************************/ } } else #endif /* NO_INTER4V */ { mot[mbnum][1].sad = mot[mbnum][2].sad = mot[mbnum][3].sad = mot[mbnum][4].sad = (dmin + 2) >> 2; mot[mbnum][1].x = mot[mbnum][2].x = mot[mbnum][3].x = mot[mbnum][4].x = mot[mbnum][0].x; mot[mbnum][1].y = mot[mbnum][2].y = mot[mbnum][3].y = mot[mbnum][4].y = mot[mbnum][0].y; best_cand[1] = best_cand[2] = best_cand[3] = best_cand[4] = ncand; } return ; } /*=============================================================================== Function: fullsearch Date: 09/16/2000 Purpose: Perform full-search motion estimation over the range of search region in a spiral-outward manner. Input/Output: VideoEncData, current Vol, previou Vop, pointer to the left corner of current VOP, current coord (also output), boundaries. ===============================================================================*/ Int fullsearch(VideoEncData *video, Vol *currVol, UChar *prev, UChar *cur, Int *imin, Int *jmin, Int ilow, Int ihigh, Int jlow, Int jhigh) { Int range = video->encParams->SearchRange; UChar *cand; Int i, j, k, l; Int d, dmin; Int i0 = *imin; /* current position */ Int j0 = *jmin; Int(*SAD_Macroblock)(UChar*, UChar*, Int, void*) = video->functionPointer->SAD_Macroblock; void *extra_info = video->sad_extra_info; // UChar h263_mode = video->encParams->H263_Enabled; Int lx = video->currVop->pitch; /* with padding */ Int offset = i0 + j0 * lx; OSCL_UNUSED_ARG(currVol); cand = prev + offset; dmin = (*SAD_Macroblock)(cand, cur, (65535 << 16) | lx, (void*)extra_info) - PREF_NULL_VEC; /* perform spiral search */ for (k = 1; k <= range; k++) { i = i0 - k; j = j0 - k; cand = prev + i + j * lx; for (l = 0; l < 8*k; l++) { /* no need for boundary checking again */ if (i >= ilow && i <= ihigh && j >= jlow && j <= jhigh) { d = (*SAD_Macroblock)(cand, cur, (dmin << 16) | lx, (void*)extra_info); if (d < dmin) { dmin = d; *imin = i; *jmin = j; } else if ((d == dmin) && PV_ABS(i0 - i) + PV_ABS(j0 - j) < PV_ABS(i0 - *imin) + PV_ABS(j0 - *jmin)) { dmin = d; *imin = i; *jmin = j; } } if (l < (k << 1)) { i++; cand++; } else if (l < (k << 2)) { j++; cand += lx; } else if (l < ((k << 2) + (k << 1))) { i--; cand--; } else { j--; cand -= lx; } } } return dmin; } #ifndef NO_INTER4V /*=============================================================================== Function: fullsearchBlk Date: 01/9/2001 Purpose: Perform full-search motion estimation of an 8x8 block over the range of search region in a spiral-outward manner centered at the 16x16 MV. Input/Output: VideoEncData, MB coordinate, pointer to the initial MV on the reference, pointer to coor of current block, search range. ===============================================================================*/ Int fullsearchBlk(VideoEncData *video, Vol *currVol, UChar *cent, UChar *cur, Int *imin, Int *jmin, Int ilow, Int ihigh, Int jlow, Int jhigh, Int range) { UChar *cand, *ref; Int i, j, k, l, istart, jstart; Int d, dmin; Int lx = video->currVop->pitch; /* with padding */ Int(*SAD_Block)(UChar*, UChar*, Int, Int, void*) = video->functionPointer->SAD_Block; void *extra_info = video->sad_extra_info; OSCL_UNUSED_ARG(currVol); /* starting point centered at 16x16 MV */ ref = cent; istart = *imin; jstart = *jmin; dmin = (*SAD_Block)(ref, cur, 65536, lx, (void*)extra_info); cand = ref; /* perform spiral search */ for (k = 1; k <= range; k++) { i = istart - k; j = jstart - k; cand -= (lx + 1); /* candidate region */ for (l = 0; l < 8*k; l++) { /* no need for boundary checking again */ if (i >= ilow && i <= ihigh && j >= jlow && j <= jhigh) { d = (*SAD_Block)(cand, cur, dmin, lx, (void*)extra_info); if (d < dmin) { dmin = d; *imin = i; *jmin = j; } else if ((d == dmin) && PV_ABS(istart - i) + PV_ABS(jstart - j) < PV_ABS(istart - *imin) + PV_ABS(jstart - *jmin)) { dmin = d; *imin = i; *jmin = j; } } if (l < (k << 1)) { i++; cand++; } else if (l < (k << 2)) { j++; cand += lx; } else if (l < ((k << 2) + (k << 1))) { i--; cand--; } else { j--; cand -= lx; } } } return dmin; } #endif /* NO_INTER4V */ /*=============================================================================== Function: CandidateSelection Date: 09/16/2000 Purpose: Fill up the list of candidate using spatio-temporal correlation among neighboring blocks. Input/Output: type_pred = 0: first pass, 1: second pass, or no SCD Modified: 09/23/01, get rid of redundant candidates before passing back. ===============================================================================*/ void CandidateSelection(Int *mvx, Int *mvy, Int *num_can, Int imb, Int jmb, VideoEncData *video, Int type_pred) { MOT **mot = video->mot; MOT *pmot; Int mbnum = video->mbnum; Vol *currVol = video->vol[video->currLayer]; Int mbwidth = currVol->nMBPerRow; Int mbheight = currVol->nMBPerCol; Int i, j, same, num1; *num_can = 0; if (video->forwardRefVop->predictionType == P_VOP) { /* Spatio-Temporal Candidate (five candidates) */ if (type_pred == 0) /* first pass */ { pmot = &mot[mbnum][0]; /* same coordinate previous frame */ mvx[(*num_can)] = (pmot->x) >> 1; mvy[(*num_can)++] = (pmot->y) >> 1; if (imb >= (mbwidth >> 1) && imb > 0) /*left neighbor previous frame */ { pmot = &mot[mbnum-1][0]; mvx[(*num_can)] = (pmot->x) >> 1; mvy[(*num_can)++] = (pmot->y) >> 1; } else if (imb + 1 < mbwidth) /*right neighbor previous frame */ { pmot = &mot[mbnum+1][0]; mvx[(*num_can)] = (pmot->x) >> 1; mvy[(*num_can)++] = (pmot->y) >> 1; } if (jmb < mbheight - 1) /*bottom neighbor previous frame */ { pmot = &mot[mbnum+mbwidth][0]; mvx[(*num_can)] = (pmot->x) >> 1; mvy[(*num_can)++] = (pmot->y) >> 1; } else if (jmb > 0) /*upper neighbor previous frame */ { pmot = &mot[mbnum-mbwidth][0]; mvx[(*num_can)] = (pmot->x) >> 1; mvy[(*num_can)++] = (pmot->y) >> 1; } if (imb > 0 && jmb > 0) /* upper-left neighbor current frame*/ { pmot = &mot[mbnum-mbwidth-1][0]; mvx[(*num_can)] = (pmot->x) >> 1; mvy[(*num_can)++] = (pmot->y) >> 1; } if (jmb > 0 && imb < mbheight - 1) /* upper right neighbor current frame*/ { pmot = &mot[mbnum-mbwidth+1][0]; mvx[(*num_can)] = (pmot->x) >> 1; mvy[(*num_can)++] = (pmot->y) >> 1; } } else /* second pass */ /* original ST1 algorithm */ { pmot = &mot[mbnum][0]; /* same coordinate previous frame */ mvx[(*num_can)] = (pmot->x) >> 1; mvy[(*num_can)++] = (pmot->y) >> 1; if (imb > 0) /*left neighbor current frame */ { pmot = &mot[mbnum-1][0]; mvx[(*num_can)] = (pmot->x) >> 1; mvy[(*num_can)++] = (pmot->y) >> 1; } if (jmb > 0) /*upper neighbor current frame */ { pmot = &mot[mbnum-mbwidth][0]; mvx[(*num_can)] = (pmot->x) >> 1; mvy[(*num_can)++] = (pmot->y) >> 1; } if (imb < mbwidth - 1) /*right neighbor previous frame */ { pmot = &mot[mbnum+1][0]; mvx[(*num_can)] = (pmot->x) >> 1; mvy[(*num_can)++] = (pmot->y) >> 1; } if (jmb < mbheight - 1) /*bottom neighbor previous frame */ { pmot = &mot[mbnum+mbwidth][0]; mvx[(*num_can)] = (pmot->x) >> 1; mvy[(*num_can)++] = (pmot->y) >> 1; } } } else /* only Spatial Candidate (four candidates)*/ { if (type_pred == 0) /*first pass*/ { if (imb > 1) /* neighbor two blocks away to the left */ { pmot = &mot[mbnum-2][0]; mvx[(*num_can)] = (pmot->x) >> 1; mvy[(*num_can)++] = (pmot->y) >> 1; } if (imb > 0 && jmb > 0) /* upper-left neighbor */ { pmot = &mot[mbnum-mbwidth-1][0]; mvx[(*num_can)] = (pmot->x) >> 1; mvy[(*num_can)++] = (pmot->y) >> 1; } if (jmb > 0 && imb < mbheight - 1) /* upper right neighbor */ { pmot = &mot[mbnum-mbwidth+1][0]; mvx[(*num_can)] = (pmot->x) >> 1; mvy[(*num_can)++] = (pmot->y) >> 1; } } //#ifdef SCENE_CHANGE_DETECTION /* second pass (ST2 algorithm)*/ else if (type_pred == 1) /* 4/7/01 */ { if (imb > 0) /*left neighbor current frame */ { pmot = &mot[mbnum-1][0]; mvx[(*num_can)] = (pmot->x) >> 1; mvy[(*num_can)++] = (pmot->y) >> 1; } if (jmb > 0) /*upper neighbor current frame */ { pmot = &mot[mbnum-mbwidth][0]; mvx[(*num_can)] = (pmot->x) >> 1; mvy[(*num_can)++] = (pmot->y) >> 1; } if (imb < mbwidth - 1) /*right neighbor current frame */ { pmot = &mot[mbnum+1][0]; mvx[(*num_can)] = (pmot->x) >> 1; mvy[(*num_can)++] = (pmot->y) >> 1; } if (jmb < mbheight - 1) /*bottom neighbor current frame */ { pmot = &mot[mbnum+mbwidth][0]; mvx[(*num_can)] = (pmot->x) >> 1; mvy[(*num_can)++] = (pmot->y) >> 1; } } //#else else /* original ST1 algorithm */ { if (imb > 0) /*left neighbor current frame */ { pmot = &mot[mbnum-1][0]; mvx[(*num_can)] = (pmot->x) >> 1; mvy[(*num_can)++] = (pmot->y) >> 1; if (jmb > 0) /*upper-left neighbor current frame */ { pmot = &mot[mbnum-mbwidth-1][0]; mvx[(*num_can)] = (pmot->x) >> 1; mvy[(*num_can)++] = (pmot->y) >> 1; } } if (jmb > 0) /*upper neighbor current frame */ { pmot = &mot[mbnum-mbwidth][0]; mvx[(*num_can)] = (pmot->x) >> 1; mvy[(*num_can)++] = (pmot->y) >> 1; if (imb < mbheight - 1) /*upper-right neighbor current frame */ { pmot = &mot[mbnum-mbwidth+1][0]; mvx[(*num_can)] = (pmot->x) >> 1; mvy[(*num_can)++] = (pmot->y) >> 1; } } } //#endif } /* 3/23/01, remove redundant candidate (possible k-mean) */ num1 = *num_can; *num_can = 1; for (i = 1; i < num1; i++) { same = 0; j = 0; while (!same && j < *num_can) { #if (CANDIDATE_DISTANCE==0) if (mvx[i] == mvx[j] && mvy[i] == mvy[j]) #else // modified k-mean, 3/24/01, shouldn't be greater than 3 if (PV_ABS(mvx[i] - mvx[j]) + PV_ABS(mvy[i] - mvy[j]) < CANDIDATE_DISTANCE) #endif same = 1; j++; } if (!same) { mvx[*num_can] = mvx[i]; mvy[*num_can] = mvy[i]; (*num_can)++; } } #ifdef _SAD_STAT num_cand += (*num_can); #endif if (num1 == 5 && *num_can == 1) *num_can = ALL_CAND_EQUAL; /* all are equal */ return ; } /*=========================================================================== Function: RasterIntraUpdate Date: 2/26/01 Purpose: To raster-scan assign INTRA-update . N macroblocks are updated (also was programmable). ===========================================================================*/ void RasterIntraUpdate(UChar *intraArray, UChar *Mode, Int totalMB, Int numRefresh) { Int indx, i; /* find the last refresh MB */ indx = 0; while (intraArray[indx] == 1 && indx < totalMB) indx++; /* add more */ for (i = 0; i < numRefresh && indx < totalMB; i++) { Mode[indx] = MODE_INTRA; intraArray[indx++] = 1; } /* if read the end of frame, reset and loop around */ if (indx >= totalMB - 1) { ResetIntraUpdate(intraArray, totalMB); indx = 0; while (i < numRefresh && indx < totalMB) { intraArray[indx] = 1; Mode[indx++] = MODE_INTRA; i++; } } return ; } /*=========================================================================== Function: ResetIntraUpdate Date: 11/28/00 Purpose: Reset already intra updated flags to all zero ===========================================================================*/ void ResetIntraUpdate(UChar *intraArray, Int totalMB) { M4VENC_MEMSET(intraArray, 0, sizeof(UChar)*totalMB); return ; } /*=========================================================================== Function: ResetIntraUpdateRegion Date: 12/1/00 Purpose: Reset already intra updated flags in one region to all zero ===========================================================================*/ void ResetIntraUpdateRegion(UChar *intraArray, Int start_i, Int rwidth, Int start_j, Int rheight, Int mbwidth, Int mbheight) { Int indx, j; if (start_i + rwidth >= mbwidth) rwidth = mbwidth - start_i; if (start_j + rheight >= mbheight) rheight = mbheight - start_j; for (j = start_j; j < start_j + rheight; j++) { indx = j * mbwidth; M4VENC_MEMSET(intraArray + indx + start_i, 0, sizeof(UChar)*rwidth); } return ; } /************************************************************* Function: MoveNeighborSAD Date: 3/27/01 Purpose: Move neighboring SAD around when center has shifted *************************************************************/ void MoveNeighborSAD(Int dn[], Int new_loc) { Int tmp[9]; tmp[0] = dn[0]; tmp[1] = dn[1]; tmp[2] = dn[2]; tmp[3] = dn[3]; tmp[4] = dn[4]; tmp[5] = dn[5]; tmp[6] = dn[6]; tmp[7] = dn[7]; tmp[8] = dn[8]; dn[0] = dn[1] = dn[2] = dn[3] = dn[4] = dn[5] = dn[6] = dn[7] = dn[8] = 65536; switch (new_loc) { case 0: break; case 1: dn[4] = tmp[2]; dn[5] = tmp[0]; dn[6] = tmp[8]; break; case 2: dn[4] = tmp[3]; dn[5] = tmp[4]; dn[6] = tmp[0]; dn[7] = tmp[8]; dn[8] = tmp[1]; break; case 3: dn[6] = tmp[4]; dn[7] = tmp[0]; dn[8] = tmp[2]; break; case 4: dn[1] = tmp[2]; dn[2] = tmp[3]; dn[6] = tmp[5]; dn[7] = tmp[6]; dn[8] = tmp[0]; break; case 5: dn[1] = tmp[0]; dn[2] = tmp[4]; dn[8] = tmp[6]; break; case 6: dn[1] = tmp[8]; dn[2] = tmp[0]; dn[3] = tmp[4]; dn[4] = tmp[5]; dn[8] = tmp[7]; break; case 7: dn[2] = tmp[8]; dn[3] = tmp[0]; dn[4] = tmp[6]; break; case 8: dn[2] = tmp[1]; dn[3] = tmp[2]; dn[4] = tmp[0]; dn[5] = tmp[6]; dn[6] = tmp[7]; break; } dn[0] = tmp[new_loc]; return ; } /* 3/28/01, find minimal of dn[9] */ Int FindMin(Int dn[]) { Int min, i; Int dmin; dmin = dn[1]; min = 1; for (i = 2; i < 9; i++) { if (dn[i] < dmin) { dmin = dn[i]; min = i; } } return min; } ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/mp4def.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef _PVDECDEF_H_ #define _PVDECDEF_H_ /********** platform dependent in-line assembly *****************************/ /*************** Intel *****************/ /*************** ARM *****************/ /* for general ARM instruction. #define __ARM has to be defined in compiler set up.*/ /* for DSP MUL */ #ifdef __TARGET_FEATURE_DSPMUL #define _ARM_DSP_MUL #endif /* for Count Leading Zero instruction */ #ifdef __TARGET_ARCH_5T #define _ARM_CLZ #endif #ifdef __TARGET_ARCH_5TE #define _ARM_CLZ #endif /****************************************************************************/ #ifndef _PV_TYPES_ #define _PV_TYPES_ typedef unsigned char UChar; typedef char Char; typedef unsigned int UInt; typedef int Int; typedef unsigned short UShort; typedef short Short; typedef short int SInt; typedef unsigned int Bool; typedef unsigned long ULong; typedef void Void; #define PV_CODEC_INIT 0 #define PV_CODEC_STOP 1 #define PV_CODEC_RUNNING 2 #define PV_CODEC_RESET 3 #endif typedef enum { PV_SUCCESS, PV_FAIL, PV_EOS, /* hit End_Of_Sequence */ PV_MB_STUFFING, /* hit Macroblock_Stuffing */ PV_END_OF_VOP, /* hit End_of_Video_Object_Plane */ PV_END_OF_MB, /* hit End_of_Macroblock */ PV_END_OF_BUF /* hit End_of_Bitstream_Buffer */ } PV_STATUS; typedef UChar PIXEL; //typedef Int MOT; /* : "int" type runs faster on RISC machine */ #define HTFM /* 3/2/01, Hypothesis Test Fast Matching for early drop-out*/ //#define _MOVE_INTERFACE //#define RANDOM_REFSELCODE /* handle the case of devision by zero in RC */ #define MAD_MIN 1 /* 4/11/01, if SSE or MMX, no HTFM, no SAD_HP_FLY */ /* Code size reduction related Macros */ #ifdef H263_ONLY #ifndef NO_RVLC #define NO_RVLC #endif #ifndef NO_MPEG_QUANT #define NO_MPEG_QUANT #endif #ifndef NO_INTER4V #define NO_INTER4V #endif #endif /**************************************/ #define TRUE 1 #define FALSE 0 #define PV_ABS(x) (((x)<0)? -(x) : (x)) #define PV_SIGN(x) (((x)<0)? -1 : 1) #define PV_SIGN0(a) (((a)<0)? -1 : (((a)>0) ? 1 : 0)) #define PV_MAX(a,b) ((a)>(b)? (a):(b)) #define PV_MIN(a,b) ((a)<(b)? (a):(b)) #define MODE_INTRA 0 #define MODE_INTER 1 #define MODE_INTRA_Q 2 #define MODE_INTER_Q 3 #define MODE_INTER4V 4 #define MODE_SKIPPED 6 #define I_VOP 0 #define P_VOP 1 #define B_VOP 2 /*09/04/00 Add MB height and width */ #define MB_WIDTH 16 #define MB_HEIGHT 16 #define VOP_BRIGHT_WHITEENC 255 #define LUMINANCE_DC_TYPE 1 #define CHROMINANCE_DC_TYPE 2 #define EOB_CODE 1 #define EOB_CODE_LENGTH 32 /* 11/30/98 */ #define FoundRM 1 /* Resync Marker */ #define FoundVSC 2 /* VOP_START_CODE. */ #define FoundGSC 3 /* GROUP_START_CODE */ #define FoundEOB 4 /* EOB_CODE */ /* 05/08/2000, the error code returned from BitstreamShowBits() */ #define BITSTREAM_ERROR_CODE 0xFFFFFFFF /* PacketVideo "absolution timestamp" object. 06/13/2000 */ #define PVTS_START_CODE 0x01C4 #define PVTS_START_CODE_LENGTH 32 /* session layer and vop layer start codes */ #define SESSION_START_CODE 0x01B0 #define SESSION_END_CODE 0x01B1 #define VISUAL_OBJECT_START_CODE 0x01B5 #define VO_START_CODE 0x8 #define VO_HEADER_LENGTH 32 /* lengtho of VO header: VO_START_CODE + VO_ID */ #define SOL_START_CODE 0x01BE #define SOL_START_CODE_LENGTH 32 #define VOL_START_CODE 0x12 #define VOL_START_CODE_LENGTH 28 #define VOP_START_CODE 0x1B6 #define VOP_START_CODE_LENGTH 32 #define GROUP_START_CODE 0x01B3 #define GROUP_START_CODE_LENGTH 32 #define VOP_ID_CODE_LENGTH 5 #define VOP_TEMP_REF_CODE_LENGTH 16 #define USER_DATA_START_CODE 0x01B2 #define USER_DATA_START_CODE_LENGTH 32 #define START_CODE_PREFIX 0x01 #define START_CODE_PREFIX_LENGTH 24 #define SHORT_VIDEO_START_MARKER 0x20 #define SHORT_VIDEO_START_MARKER_LENGTH 22 #define SHORT_VIDEO_END_MARKER 0x3F #define GOB_RESYNC_MARKER 0x01 #define GOB_RESYNC_MARKER_LENGTH 17 /* motion and resync markers used in error resilient mode */ #define DC_MARKER 438273 #define DC_MARKER_LENGTH 19 #define MOTION_MARKER_COMB 126977 #define MOTION_MARKER_COMB_LENGTH 17 #define MOTION_MARKER_SEP 81921 #define MOTION_MARKER_SEP_LENGTH 17 #define RESYNC_MARKER 1 #define RESYNC_MARKER_LENGTH 17 #define SPRITE_NOT_USED 0 #define STATIC_SPRITE 1 #define ONLINE_SPRITE 2 #define GMC_SPRITE 3 /* macroblock and block size */ #define MB_SIZE 16 #define NCOEFF_MB (MB_SIZE*MB_SIZE) #define B_SIZE 8 #define NCOEFF_BLOCK (B_SIZE*B_SIZE) #define NCOEFF_Y NCOEFF_MB #define NCOEFF_U NCOEFF_BLOCK #define NCOEFF_V NCOEFF_BLOCK /* overrun buffer size */ #define DEFAULT_OVERRUN_BUFFER_SIZE 1000 /* VLC decoding related definitions */ #define VLC_ERROR (-1) #define VLC_ESCAPE 7167 #endif /* _PVDECDEF_H_ */ ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/mp4enc_api.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2010 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "mp4enc_lib.h" #include "bitstream_io.h" #include "rate_control.h" #include "m4venc_oscl.h" /* Inverse normal zigzag */ const static Int zigzag_i[NCOEFF_BLOCK] = { 0, 1, 8, 16, 9, 2, 3, 10, 17, 24, 32, 25, 18, 11, 4, 5, 12, 19, 26, 33, 40, 48, 41, 34, 27, 20, 13, 6, 7, 14, 21, 28, 35, 42, 49, 56, 57, 50, 43, 36, 29, 22, 15, 23, 30, 37, 44, 51, 58, 59, 52, 45, 38, 31, 39, 46, 53, 60, 61, 54, 47, 55, 62, 63 }; /* INTRA */ const static Int mpeg_iqmat_def[NCOEFF_BLOCK] = { 8, 17, 18, 19, 21, 23, 25, 27, 17, 18, 19, 21, 23, 25, 27, 28, 20, 21, 22, 23, 24, 26, 28, 30, 21, 22, 23, 24, 26, 28, 30, 32, 22, 23, 24, 26, 28, 30, 32, 35, 23, 24, 26, 28, 30, 32, 35, 38, 25, 26, 28, 30, 32, 35, 38, 41, 27, 28, 30, 32, 35, 38, 41, 45 }; /* INTER */ const static Int mpeg_nqmat_def[64] = { 16, 17, 18, 19, 20, 21, 22, 23, 17, 18, 19, 20, 21, 22, 23, 24, 18, 19, 20, 21, 22, 23, 24, 25, 19, 20, 21, 22, 23, 24, 26, 27, 20, 21, 22, 23, 25, 26, 27, 28, 21, 22, 23, 24, 26, 27, 28, 30, 22, 23, 24, 26, 27, 28, 30, 31, 23, 24, 25, 27, 28, 30, 31, 33 }; /* Profiles and levels */ /* Simple profile(level 0-3) and Core profile (level 1-2) */ /* {SPL0, SPL1, SPL2, SPL3, SPL4a, SPL5, CPL1, CPL2} , SPL0: Simple Profile@Level0, CPL1: Core Profile@Level1 */ const static Int profile_level_code[MAX_BASE_PROFILE+1] = { 0x08, 0x01, 0x02, 0x03, 0x04, 0x05, 0x21, 0x22 }; const static Int profile_level_max_bitrate[MAX_BASE_PROFILE+1] = { 64000, 64000, 128000, 384000, 4000000, 8000000, 384000, 2000000 }; const static Int profile_level_max_packet_size[MAX_BASE_PROFILE+1] = { 2048, 2048, 4096, 8192, 16384, 16384, 4096, 8192 }; const static Int profile_level_max_mbsPerSec[MAX_BASE_PROFILE+1] = { 1485, 1485, 5940, 11880, 36000, 40500, 5940, 23760 }; const static Int profile_level_max_VBV_size[MAX_BASE_PROFILE+1] = { 163840, 163840, 655360, 655360, 1310720, 1835008, 262144, 1310720 }; /* Scalable profiles for nLayers = 2 */ /* Simple scalable profile (level 0-2) and Core scalable profile (level 1-3) */ /* {SSPL0, SSPL1, SSPL2, CSPL1, CSPL2, CSPL3} , SSPL0: Simple Scalable Profile@Level0, CSPL1: Core Scalable Profile@Level1, the fourth is redundant for easy table manipulation */ const static Int scalable_profile_level_code[MAX_SCALABLE_PROFILE - MAX_BASE_PROFILE] = { 0x10, 0x11, 0x12, 0xA1, 0xA2, 0xA3 }; const static Int scalable_profile_level_max_bitrate[MAX_SCALABLE_PROFILE - MAX_BASE_PROFILE] = { 128000, 128000, 256000, 768000, 1500000, 4000000 }; /* in bits */ const static Int scalable_profile_level_max_packet_size[MAX_SCALABLE_PROFILE - MAX_BASE_PROFILE] = { 2048, 2048, 4096, 4096, 4096, 16384 }; const static Int scalable_profile_level_max_mbsPerSec[MAX_SCALABLE_PROFILE - MAX_BASE_PROFILE] = { 1485, 7425, 23760, 14850, 29700, 120960 }; const static Int scalable_profile_level_max_VBV_size[MAX_SCALABLE_PROFILE - MAX_BASE_PROFILE] = { 163840, 655360, 655360, 1048576, 1310720, 1310720 }; /* H263 profile 0 @ level 10-70 */ const static Int h263Level[8] = {0, 10, 20, 30, 40, 50, 60, 70}; const static float rBR_bound[8] = {0, 1, 2, 6, 32, 64, 128, 256}; const static float max_h263_framerate[2] = {(float)30000 / (float)2002, (float)30000 / (float)1001 }; const static Int max_h263_width[2] = {176, 352}; const static Int max_h263_height[2] = {144, 288}; /* 6/2/2001, newly added functions to make PVEncodeVop more readable. */ Int DetermineCodingLayer(VideoEncData *video, Int *nLayer, ULong modTime); void DetermineVopType(VideoEncData *video, Int currLayer); Int UpdateSkipNextFrame(VideoEncData *video, ULong *modTime, Int *size, PV_STATUS status); Bool SetProfile_BufferSize(VideoEncData *video, float delay, Int bInitialized); #ifdef PRINT_RC_INFO extern FILE *facct; extern int tiTotalNumBitsGenerated; extern int iStuffBits; #endif #ifdef PRINT_EC extern FILE *fec; #endif /* ======================================================================== */ /* Function : PVGetDefaultEncOption() */ /* Date : 12/12/2005 */ /* Purpose : */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVGetDefaultEncOption(VideoEncOptions *encOption, Int encUseCase) { VideoEncOptions defaultUseCase = {H263_MODE, profile_level_max_packet_size[SIMPLE_PROFILE_LEVEL0] >> 3, SIMPLE_PROFILE_LEVEL0, PV_OFF, 0, 1, 1000, 33, {144, 144}, {176, 176}, {15, 30}, {64000, 128000}, {10, 10}, {12, 12}, {0, 0}, CBR_1, 0.0, PV_OFF, -1, 0, PV_OFF, 16, PV_OFF, 0, PV_ON }; OSCL_UNUSED_ARG(encUseCase); // unused for now. Later we can add more defaults setting and use this // argument to select the right one. /* in the future we can create more meaningful use-cases */ if (encOption == NULL) { return PV_FALSE; } M4VENC_MEMCPY(encOption, &defaultUseCase, sizeof(VideoEncOptions)); return PV_TRUE; } /* ======================================================================== */ /* Function : PVInitVideoEncoder() */ /* Date : 08/22/2000 */ /* Purpose : Initialization of MP4 Encoder and VO bitstream */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : 5/21/01, allocate only yChan and assign uChan & vChan */ /* 12/12/05, add encoding option as input argument */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVInitVideoEncoder(VideoEncControls *encoderControl, VideoEncOptions *encOption) { Bool status = PV_TRUE; Int nLayers, idx, i, j; Int max = 0, max_width = 0, max_height = 0, pitch, offset; Int size = 0, nTotalMB = 0; VideoEncData *video; Vol *pVol; VideoEncParams *pEncParams; Int temp_w, temp_h, mbsPerSec; /******************************************/ /* this part use to be PVSetEncode() */ Int profile_table_index, *profile_level_table; Int profile_level = encOption->profile_level; Int PacketSize = encOption->packetSize << 3; Int timeInc, timeIncRes; float profile_max_framerate; VideoEncParams *encParams; if (encoderControl->videoEncoderData) /* this has been called */ { if (encoderControl->videoEncoderInit) /* check if PVInitVideoEncoder() has been called */ { PVCleanUpVideoEncoder(encoderControl); encoderControl->videoEncoderInit = 0; } M4VENC_FREE(encoderControl->videoEncoderData); encoderControl->videoEncoderData = NULL; } encoderControl->videoEncoderInit = 0; /* reset this value */ video = (VideoEncData *)M4VENC_MALLOC(sizeof(VideoEncData)); /* allocate memory for encData */ if (video == NULL) return PV_FALSE; M4VENC_MEMSET(video, 0, sizeof(VideoEncData)); encoderControl->videoEncoderData = (void *) video; /* set up pointer in VideoEncData structure */ video->encParams = (VideoEncParams *)M4VENC_MALLOC(sizeof(VideoEncParams)); if (video->encParams == NULL) goto CLEAN_UP; M4VENC_MEMSET(video->encParams, 0, sizeof(VideoEncParams)); encParams = video->encParams; encParams->nLayers = encOption->numLayers; /* Check whether the input packetsize is valid (Note: put code here (before any memory allocation) in order to avoid memory leak */ if ((Int)profile_level <= (Int)(MAX_BASE_PROFILE)) /* non-scalable profile */ { profile_level_table = (Int *)profile_level_max_packet_size; profile_table_index = (Int)profile_level; if (encParams->nLayers != 1) { goto CLEAN_UP; } encParams->LayerMaxMbsPerSec[0] = profile_level_max_mbsPerSec[profile_table_index]; } else /* scalable profile */ { profile_level_table = (Int *)scalable_profile_level_max_packet_size; profile_table_index = (Int)profile_level - (Int)(MAX_BASE_PROFILE) - 1; if (encParams->nLayers < 2) { goto CLEAN_UP; } for (i = 0; i < encParams->nLayers; i++) { encParams->LayerMaxMbsPerSec[i] = scalable_profile_level_max_mbsPerSec[profile_table_index]; } } /* cannot have zero size packet with these modes */ if (PacketSize == 0) { if (encOption->encMode == DATA_PARTITIONING_MODE) { goto CLEAN_UP; } if (encOption->encMode == COMBINE_MODE_WITH_ERR_RES) { encOption->encMode = COMBINE_MODE_NO_ERR_RES; } } if (encOption->gobHeaderInterval == 0) { if (encOption->encMode == H263_MODE_WITH_ERR_RES) { encOption->encMode = H263_MODE; } if (encOption->encMode == SHORT_HEADER_WITH_ERR_RES) { encOption->encMode = SHORT_HEADER; } } if (PacketSize > profile_level_table[profile_table_index]) goto CLEAN_UP; /* Initial Defaults for all Modes */ encParams->SequenceStartCode = 1; encParams->GOV_Enabled = 0; encParams->RoundingType = 0; encParams->IntraDCVlcThr = PV_MAX(PV_MIN(encOption->intraDCVlcTh, 7), 0); encParams->ACDCPrediction = ((encOption->useACPred == PV_ON) ? TRUE : FALSE); encParams->RC_Type = encOption->rcType; encParams->Refresh = encOption->numIntraMB; encParams->ResyncMarkerDisable = 0; /* Enable Resync Marker */ for (i = 0; i < encOption->numLayers; i++) { #ifdef NO_MPEG_QUANT encParams->QuantType[i] = 0; #else encParams->QuantType[i] = encOption->quantType[i]; /* H263 */ #endif if (encOption->pQuant[i] >= 1 && encOption->pQuant[i] <= 31) { encParams->InitQuantPvop[i] = encOption->pQuant[i]; } else { goto CLEAN_UP; } if (encOption->iQuant[i] >= 1 && encOption->iQuant[i] <= 31) { encParams->InitQuantIvop[i] = encOption->iQuant[i]; } else { goto CLEAN_UP; } } encParams->HalfPel_Enabled = 1; encParams->SearchRange = encOption->searchRange; /* 4/16/2001 */ encParams->FullSearch_Enabled = 0; #ifdef NO_INTER4V encParams->MV8x8_Enabled = 0; #else encParams->MV8x8_Enabled = 0;// comment out for now!! encOption->mv8x8Enable; #endif encParams->H263_Enabled = 0; encParams->GOB_Header_Interval = 0; // need to be reset to 0 encParams->IntraPeriod = encOption->intraPeriod; /* Intra update period update default*/ encParams->SceneChange_Det = encOption->sceneDetect; encParams->FineFrameSkip_Enabled = 0; encParams->NoFrameSkip_Enabled = encOption->noFrameSkipped; encParams->NoPreSkip_Enabled = encOption->noFrameSkipped; encParams->GetVolHeader[0] = 0; encParams->GetVolHeader[1] = 0; encParams->ResyncPacketsize = encOption->packetSize << 3; encParams->LayerMaxBitRate[0] = 0; encParams->LayerMaxBitRate[1] = 0; encParams->LayerMaxFrameRate[0] = (float)0.0; encParams->LayerMaxFrameRate[1] = (float)0.0; encParams->VBV_delay = encOption->vbvDelay; /* 2sec VBV buffer size */ switch (encOption->encMode) { case SHORT_HEADER: case SHORT_HEADER_WITH_ERR_RES: /* From Table 6-26 */ encParams->nLayers = 1; encParams->QuantType[0] = 0; /*H263 */ encParams->ResyncMarkerDisable = 1; /* Disable Resync Marker */ encParams->DataPartitioning = 0; /* Combined Mode */ encParams->ReversibleVLC = 0; /* Disable RVLC */ encParams->RoundingType = 0; encParams->IntraDCVlcThr = 7; /* use_intra_dc_vlc = 0 */ encParams->MV8x8_Enabled = 0; encParams->GOB_Header_Interval = encOption->gobHeaderInterval; encParams->H263_Enabled = 2; encParams->GOV_Enabled = 0; encParams->TimeIncrementRes = 30000; /* timeIncrementRes for H263 */ break; case H263_MODE: case H263_MODE_WITH_ERR_RES: /* From Table 6-26 */ encParams->nLayers = 1; encParams->QuantType[0] = 0; /*H263 */ encParams->ResyncMarkerDisable = 1; /* Disable Resync Marker */ encParams->DataPartitioning = 0; /* Combined Mode */ encParams->ReversibleVLC = 0; /* Disable RVLC */ encParams->RoundingType = 0; encParams->IntraDCVlcThr = 7; /* use_intra_dc_vlc = 0 */ encParams->MV8x8_Enabled = 0; encParams->H263_Enabled = 1; encParams->GOV_Enabled = 0; encParams->TimeIncrementRes = 30000; /* timeIncrementRes for H263 */ break; #ifndef H263_ONLY case DATA_PARTITIONING_MODE: encParams->DataPartitioning = 1; /* Base Layer Data Partitioning */ encParams->ResyncMarkerDisable = 0; /* Resync Marker */ #ifdef NO_RVLC encParams->ReversibleVLC = 0; #else encParams->ReversibleVLC = (encOption->rvlcEnable == PV_ON); /* RVLC when Data Partitioning */ #endif encParams->ResyncPacketsize = PacketSize; break; case COMBINE_MODE_WITH_ERR_RES: encParams->DataPartitioning = 0; /* Combined Mode */ encParams->ResyncMarkerDisable = 0; /* Resync Marker */ encParams->ReversibleVLC = 0; /* No RVLC */ encParams->ResyncPacketsize = PacketSize; break; case COMBINE_MODE_NO_ERR_RES: encParams->DataPartitioning = 0; /* Combined Mode */ encParams->ResyncMarkerDisable = 1; /* Disable Resync Marker */ encParams->ReversibleVLC = 0; /* No RVLC */ break; #endif default: goto CLEAN_UP; } /* Set the constraints (maximum values) according to the input profile and level */ /* Note that profile_table_index is already figured out above */ /* base layer */ encParams->profile_table_index = profile_table_index; /* Used to limit the profile and level in SetProfile_BufferSize() */ /* check timeIncRes */ timeIncRes = encOption->timeIncRes; timeInc = encOption->tickPerSrc; if ((timeIncRes >= 1) && (timeIncRes <= 65536) && (timeInc < timeIncRes) && (timeInc != 0)) { /* AGI RCS 08/12/09 */ encParams->TimeIncrementRes = timeIncRes; video->FrameRate = timeIncRes / ((float)timeInc); } else { goto CLEAN_UP; } /* check frame dimension */ if (encParams->H263_Enabled) { switch (encOption->encWidth[0]) { case 128: if (encOption->encHeight[0] != 96) /* source_format = 1 */ goto CLEAN_UP; break; case 176: if (encOption->encHeight[0] != 144) /* source_format = 2 */ goto CLEAN_UP; break; case 352: if (encOption->encHeight[0] != 288) /* source_format = 2 */ goto CLEAN_UP; break; case 704: if (encOption->encHeight[0] != 576) /* source_format = 2 */ goto CLEAN_UP; break; case 1408: if (encOption->encHeight[0] != 1152) /* source_format = 2 */ goto CLEAN_UP; break; default: goto CLEAN_UP; } } for (i = 0; i < encParams->nLayers; i++) { encParams->LayerHeight[i] = encOption->encHeight[i]; encParams->LayerWidth[i] = encOption->encWidth[i]; } /* check frame rate */ for (i = 0; i < encParams->nLayers; i++) { encParams->LayerFrameRate[i] = encOption->encFrameRate[i]; } if (encParams->nLayers > 1) { if (encOption->encFrameRate[0] == encOption->encFrameRate[1] || encOption->encFrameRate[0] == 0. || encOption->encFrameRate[1] == 0.) /* 7/31/03 */ goto CLEAN_UP; } /* set max frame rate */ for (i = 0; i < encParams->nLayers; i++) { /* Make sure the maximum framerate is consistent with the given profile and level */ nTotalMB = ((encParams->LayerWidth[i] + 15) / 16) * ((encParams->LayerHeight[i] + 15) / 16); if (nTotalMB > 0) profile_max_framerate = (float)encParams->LayerMaxMbsPerSec[i] / (float)nTotalMB; else profile_max_framerate = (float)30.0; encParams->LayerMaxFrameRate[i] = PV_MIN(profile_max_framerate, encParams->LayerFrameRate[i]); } /* check bit rate */ /* set max bit rate */ for (i = 0; i < encParams->nLayers; i++) { encParams->LayerBitRate[i] = encOption->bitRate[i]; encParams->LayerMaxBitRate[i] = encOption->bitRate[i]; } if (encParams->nLayers > 1) { if (encOption->bitRate[0] == encOption->bitRate[1] || encOption->bitRate[0] == 0 || encOption->bitRate[1] == 0) /* 7/31/03 */ goto CLEAN_UP; } /* check rate control and vbv delay*/ encParams->RC_Type = encOption->rcType; if (encOption->vbvDelay == 0.0) /* set to default */ { switch (encOption->rcType) { case CBR_1: case CBR_2: encParams->VBV_delay = (float)2.0; /* default 2sec VBV buffer size */ break; case CBR_LOWDELAY: encParams->VBV_delay = (float)0.5; /* default 0.5sec VBV buffer size */ break; case VBR_1: case VBR_2: encParams->VBV_delay = (float)10.0; /* default 10sec VBV buffer size */ break; default: break; } } else /* force this value */ { encParams->VBV_delay = encOption->vbvDelay; } /* check search range */ if (encParams->H263_Enabled && encOption->searchRange > 16) { encParams->SearchRange = 16; /* 4/16/2001 */ } /*****************************************/ /* checking for conflict between options */ /*****************************************/ if (video->encParams->RC_Type == CBR_1 || video->encParams->RC_Type == CBR_2 || video->encParams->RC_Type == CBR_LOWDELAY) /* if CBR */ { #ifdef _PRINT_STAT if (video->encParams->NoFrameSkip_Enabled == PV_ON || video->encParams->NoPreSkip_Enabled == PV_ON) /* don't allow frame skip*/ printf("WARNING!!!! CBR with NoFrameSkip\n"); #endif } else if (video->encParams->RC_Type == CONSTANT_Q) /* constant_Q */ { video->encParams->NoFrameSkip_Enabled = PV_ON; /* no frame skip */ video->encParams->NoPreSkip_Enabled = PV_ON; /* no frame skip */ #ifdef _PRINT_STAT printf("Turn on NoFrameSkip\n"); #endif } if (video->encParams->NoFrameSkip_Enabled == PV_ON) /* if no frame skip */ { video->encParams->FineFrameSkip_Enabled = PV_OFF; #ifdef _PRINT_STAT printf("NoFrameSkip !!! may violate VBV_BUFFER constraint.\n"); printf("Turn off FineFrameSkip\n"); #endif } /******************************************/ /******************************************/ nLayers = video->encParams->nLayers; /* Number of Layers to be encoded */ /* Find the maximum width*height for memory allocation of the VOPs */ for (idx = 0; idx < nLayers; idx++) { temp_w = video->encParams->LayerWidth[idx]; temp_h = video->encParams->LayerHeight[idx]; if ((temp_w*temp_h) > max) { max = temp_w * temp_h; max_width = ((temp_w + 15) >> 4) << 4; max_height = ((temp_h + 15) >> 4) << 4; nTotalMB = ((max_width * max_height) >> 8); } /* Check if the video size and framerate(MBsPerSec) are vald */ mbsPerSec = (Int)(nTotalMB * video->encParams->LayerFrameRate[idx]); if (mbsPerSec > video->encParams->LayerMaxMbsPerSec[idx]) status = PV_FALSE; } /****************************************************/ /* Set Profile and Video Buffer Size for each layer */ /****************************************************/ if (video->encParams->RC_Type == CBR_LOWDELAY) video->encParams->VBV_delay = 0.5; /* For CBR_LOWDELAY, we set 0.5sec buffer */ status = SetProfile_BufferSize(video, video->encParams->VBV_delay, 1); if (status != PV_TRUE) goto CLEAN_UP; /****************************************/ /* memory allocation and initialization */ /****************************************/ if (video == NULL) goto CLEAN_UP; /* cyclic reference for passing through both structures */ video->videoEncControls = encoderControl; //video->currLayer = 0; /* Set current Layer to 0 */ //video->currFrameNo = 0; /* Set current frame Number to 0 */ video->nextModTime = 0; video->nextEncIVop = 0; /* Sets up very first frame to be I-VOP! */ video->numVopsInGOP = 0; /* counter for Vops in Gop, 2/8/01 */ //video->frameRate = video->encParams->LayerFrameRate[0]; /* Set current layer frame rate */ video->QPMB = (UChar *) M4VENC_MALLOC(nTotalMB * sizeof(UChar)); /* Memory for MB quantizers */ if (video->QPMB == NULL) goto CLEAN_UP; video->headerInfo.Mode = (UChar *) M4VENC_MALLOC(sizeof(UChar) * nTotalMB); /* Memory for MB Modes */ if (video->headerInfo.Mode == NULL) goto CLEAN_UP; video->headerInfo.CBP = (UChar *) M4VENC_MALLOC(sizeof(UChar) * nTotalMB); /* Memory for CBP (Y and C) of each MB */ if (video->headerInfo.CBP == NULL) goto CLEAN_UP; /* Allocating motion vector space and interpolation memory*/ video->mot = (MOT **)M4VENC_MALLOC(sizeof(MOT *) * nTotalMB); if (video->mot == NULL) goto CLEAN_UP; for (idx = 0; idx < nTotalMB; idx++) { video->mot[idx] = (MOT *)M4VENC_MALLOC(sizeof(MOT) * 8); if (video->mot[idx] == NULL) { goto CLEAN_UP; } } video->intraArray = (UChar *)M4VENC_MALLOC(sizeof(UChar) * nTotalMB); if (video->intraArray == NULL) goto CLEAN_UP; video->sliceNo = (UChar *) M4VENC_MALLOC(nTotalMB); /* Memory for Slice Numbers */ if (video->sliceNo == NULL) goto CLEAN_UP; /* Allocating space for predDCAC[][8][16], Not that I intentionally */ /* increase the dimension of predDCAC from [][6][15] to [][8][16] */ /* so that compilers can generate faster code to indexing the */ /* data inside (by using << instead of *). 04/14/2000. */ /* 5/29/01, use decoder lib ACDC prediction memory scheme. */ video->predDC = (typeDCStore *) M4VENC_MALLOC(nTotalMB * sizeof(typeDCStore)); if (video->predDC == NULL) goto CLEAN_UP; if (!video->encParams->H263_Enabled) { video->predDCAC_col = (typeDCACStore *) M4VENC_MALLOC(((max_width >> 4) + 1) * sizeof(typeDCACStore)); if (video->predDCAC_col == NULL) goto CLEAN_UP; /* element zero will be used for storing vertical (col) AC coefficients */ /* the rest will be used for storing horizontal (row) AC coefficients */ video->predDCAC_row = video->predDCAC_col + 1; /* ACDC */ video->acPredFlag = (Int *) M4VENC_MALLOC(nTotalMB * sizeof(Int)); /* Memory for acPredFlag */ if (video->acPredFlag == NULL) goto CLEAN_UP; } video->outputMB = (MacroBlock *) M4VENC_MALLOC(sizeof(MacroBlock)); /* Allocating macroblock space */ if (video->outputMB == NULL) goto CLEAN_UP; M4VENC_MEMSET(video->outputMB->block[0], 0, (sizeof(Short) << 6)*6); M4VENC_MEMSET(video->dataBlock, 0, sizeof(Short) << 7); /* Allocate (2*packetsize) working bitstreams */ video->bitstream1 = BitStreamCreateEnc(2 * 4096); /*allocate working stream 1*/ if (video->bitstream1 == NULL) goto CLEAN_UP; video->bitstream2 = BitStreamCreateEnc(2 * 4096); /*allocate working stream 2*/ if (video->bitstream2 == NULL) goto CLEAN_UP; video->bitstream3 = BitStreamCreateEnc(2 * 4096); /*allocate working stream 3*/ if (video->bitstream3 == NULL) goto CLEAN_UP; /* allocate overrun buffer */ // this buffer is used when user's buffer is too small to hold one frame. // It is not needed for slice-based encoding. if (nLayers == 1) { video->oBSize = encParams->BufferSize[0] >> 3; } else { video->oBSize = PV_MAX((encParams->BufferSize[0] >> 3), (encParams->BufferSize[1] >> 3)); } if (video->oBSize > DEFAULT_OVERRUN_BUFFER_SIZE || encParams->RC_Type == CONSTANT_Q) // set limit { video->oBSize = DEFAULT_OVERRUN_BUFFER_SIZE; } video->overrunBuffer = (UChar*) M4VENC_MALLOC(sizeof(UChar) * video->oBSize); if (video->overrunBuffer == NULL) goto CLEAN_UP; video->currVop = (Vop *) M4VENC_MALLOC(sizeof(Vop)); /* Memory for Current VOP */ if (video->currVop == NULL) goto CLEAN_UP; /* add padding, 09/19/05 */ if (video->encParams->H263_Enabled) /* make it conditional 11/28/05 */ { pitch = max_width; offset = 0; } else { pitch = max_width + 32; offset = (pitch << 4) + 16; max_height += 32; } size = pitch * max_height; video->currVop->yChan = (PIXEL *)M4VENC_MALLOC(sizeof(PIXEL) * (size + (size >> 1))); /* Memory for currVop Y */ if (video->currVop->yChan == NULL) goto CLEAN_UP; video->currVop->uChan = video->currVop->yChan + size;/* Memory for currVop U */ video->currVop->vChan = video->currVop->uChan + (size >> 2);/* Memory for currVop V */ /* shift for the offset */ if (offset) { video->currVop->yChan += offset; /* offset to the origin.*/ video->currVop->uChan += (offset >> 2) + 4; video->currVop->vChan += (offset >> 2) + 4; } video->forwardRefVop = video->currVop; /* Initialize forwardRefVop */ video->backwardRefVop = video->currVop; /* Initialize backwardRefVop */ video->prevBaseVop = (Vop *) M4VENC_MALLOC(sizeof(Vop)); /* Memory for Previous Base Vop */ if (video->prevBaseVop == NULL) goto CLEAN_UP; video->prevBaseVop->yChan = (PIXEL *) M4VENC_MALLOC(sizeof(PIXEL) * (size + (size >> 1))); /* Memory for prevBaseVop Y */ if (video->prevBaseVop->yChan == NULL) goto CLEAN_UP; video->prevBaseVop->uChan = video->prevBaseVop->yChan + size; /* Memory for prevBaseVop U */ video->prevBaseVop->vChan = video->prevBaseVop->uChan + (size >> 2); /* Memory for prevBaseVop V */ if (offset) { video->prevBaseVop->yChan += offset; /* offset to the origin.*/ video->prevBaseVop->uChan += (offset >> 2) + 4; video->prevBaseVop->vChan += (offset >> 2) + 4; } if (0) /* If B Frames */ { video->nextBaseVop = (Vop *) M4VENC_MALLOC(sizeof(Vop)); /* Memory for Next Base Vop */ if (video->nextBaseVop == NULL) goto CLEAN_UP; video->nextBaseVop->yChan = (PIXEL *) M4VENC_MALLOC(sizeof(PIXEL) * (size + (size >> 1))); /* Memory for nextBaseVop Y */ if (video->nextBaseVop->yChan == NULL) goto CLEAN_UP; video->nextBaseVop->uChan = video->nextBaseVop->yChan + size; /* Memory for nextBaseVop U */ video->nextBaseVop->vChan = video->nextBaseVop->uChan + (size >> 2); /* Memory for nextBaseVop V */ if (offset) { video->nextBaseVop->yChan += offset; /* offset to the origin.*/ video->nextBaseVop->uChan += (offset >> 2) + 4; video->nextBaseVop->vChan += (offset >> 2) + 4; } } if (nLayers > 1) /* If enhancement layers */ { video->prevEnhanceVop = (Vop *) M4VENC_MALLOC(sizeof(Vop)); /* Memory for Previous Enhancement Vop */ if (video->prevEnhanceVop == NULL) goto CLEAN_UP; video->prevEnhanceVop->yChan = (PIXEL *) M4VENC_MALLOC(sizeof(PIXEL) * (size + (size >> 1))); /* Memory for Previous Ehancement Y */ if (video->prevEnhanceVop->yChan == NULL) goto CLEAN_UP; video->prevEnhanceVop->uChan = video->prevEnhanceVop->yChan + size; /* Memory for Previous Enhancement U */ video->prevEnhanceVop->vChan = video->prevEnhanceVop->uChan + (size >> 2); /* Memory for Previous Enhancement V */ if (offset) { video->prevEnhanceVop->yChan += offset; /* offset to the origin.*/ video->prevEnhanceVop->uChan += (offset >> 2) + 4; video->prevEnhanceVop->vChan += (offset >> 2) + 4; } } video->numberOfLayers = nLayers; /* Number of Layers */ video->sumMAD = 0; /* 04/09/01, for Vops in the use multipass processing */ for (idx = 0; idx < nLayers; idx++) { video->pMP[idx] = (MultiPass *)M4VENC_MALLOC(sizeof(MultiPass)); if (video->pMP[idx] == NULL) goto CLEAN_UP; M4VENC_MEMSET(video->pMP[idx], 0, sizeof(MultiPass)); video->pMP[idx]->encoded_frames = -1; /* forget about the very first I frame */ /* RDInfo **pRDSamples */ video->pMP[idx]->pRDSamples = (RDInfo **)M4VENC_MALLOC(30 * sizeof(RDInfo *)); if (video->pMP[idx]->pRDSamples == NULL) goto CLEAN_UP; for (i = 0; i < 30; i++) { video->pMP[idx]->pRDSamples[i] = (RDInfo *)M4VENC_MALLOC(32 * sizeof(RDInfo)); if (video->pMP[idx]->pRDSamples[i] == NULL) goto CLEAN_UP; for (j = 0; j < 32; j++) M4VENC_MEMSET(&(video->pMP[idx]->pRDSamples[i][j]), 0, sizeof(RDInfo)); } video->pMP[idx]->frameRange = (Int)(video->encParams->LayerFrameRate[idx] * 1.0); /* 1.0s time frame*/ video->pMP[idx]->frameRange = PV_MAX(video->pMP[idx]->frameRange, 5); video->pMP[idx]->frameRange = PV_MIN(video->pMP[idx]->frameRange, 30); video->pMP[idx]->framePos = -1; } /* /// End /////////////////////////////////////// */ video->vol = (Vol **)M4VENC_MALLOC(nLayers * sizeof(Vol *)); /* Memory for VOL pointers */ /* Memory allocation and Initialization of Vols and writing of headers */ if (video->vol == NULL) goto CLEAN_UP; for (idx = 0; idx < nLayers; idx++) { video->volInitialize[idx] = 1; video->refTick[idx] = 0; video->relLayerCodeTime[idx] = 1000; video->vol[idx] = (Vol *)M4VENC_MALLOC(sizeof(Vol)); if (video->vol[idx] == NULL) goto CLEAN_UP; pVol = video->vol[idx]; pEncParams = video->encParams; M4VENC_MEMSET(video->vol[idx], 0, sizeof(Vol)); /* Initialize some VOL parameters */ pVol->volID = idx; /* Set VOL ID */ pVol->shortVideoHeader = pEncParams->H263_Enabled; /*Short Header */ pVol->GOVStart = pEncParams->GOV_Enabled; /* GOV Header */ pVol->timeIncrementResolution = video->encParams->TimeIncrementRes; pVol->nbitsTimeIncRes = 1; while (pVol->timeIncrementResolution > (1 << pVol->nbitsTimeIncRes)) { pVol->nbitsTimeIncRes++; } /* timing stuff */ pVol->timeIncrement = 0; pVol->moduloTimeBase = 0; pVol->fixedVopRate = 0; /* No fixed VOP rate */ pVol->stream = (BitstreamEncVideo *)M4VENC_MALLOC(sizeof(BitstreamEncVideo)); /* allocate BitstreamEncVideo Instance */ if (pVol->stream == NULL) goto CLEAN_UP; pVol->width = pEncParams->LayerWidth[idx]; /* Layer Width */ pVol->height = pEncParams->LayerHeight[idx]; /* Layer Height */ // pVol->intra_acdcPredDisable = pEncParams->ACDCPrediction; /* ACDC Prediction */ pVol->ResyncMarkerDisable = pEncParams->ResyncMarkerDisable; /* Resync Marker Mode */ pVol->dataPartitioning = pEncParams->DataPartitioning; /* Data Partitioning */ pVol->useReverseVLC = pEncParams->ReversibleVLC; /* RVLC */ if (idx > 0) /* Scalability layers */ { pVol->ResyncMarkerDisable = 1; pVol->dataPartitioning = 0; pVol->useReverseVLC = 0; /* No RVLC */ } pVol->quantType = pEncParams->QuantType[idx]; /* Quantizer Type */ /* no need to init Quant Matrices */ pVol->scalability = 0; /* Vol Scalability */ if (idx > 0) pVol->scalability = 1; /* Multiple layers => Scalability */ /* Initialize Vol to Temporal scalability. It can change during encoding */ pVol->scalType = 1; /* Initialize reference Vol ID to the base layer = 0 */ pVol->refVolID = 0; /* Initialize layer resolution to same as the reference */ pVol->refSampDir = 0; pVol->horSamp_m = 1; pVol->horSamp_n = 1; pVol->verSamp_m = 1; pVol->verSamp_n = 1; pVol->enhancementType = 0; /* We always enhance the entire region */ pVol->nMBPerRow = (pVol->width + 15) / 16; pVol->nMBPerCol = (pVol->height + 15) / 16; pVol->nTotalMB = pVol->nMBPerRow * pVol->nMBPerCol; if (pVol->nTotalMB >= 1) pVol->nBitsForMBID = 1; if (pVol->nTotalMB >= 3) pVol->nBitsForMBID = 2; if (pVol->nTotalMB >= 5) pVol->nBitsForMBID = 3; if (pVol->nTotalMB >= 9) pVol->nBitsForMBID = 4; if (pVol->nTotalMB >= 17) pVol->nBitsForMBID = 5; if (pVol->nTotalMB >= 33) pVol->nBitsForMBID = 6; if (pVol->nTotalMB >= 65) pVol->nBitsForMBID = 7; if (pVol->nTotalMB >= 129) pVol->nBitsForMBID = 8; if (pVol->nTotalMB >= 257) pVol->nBitsForMBID = 9; if (pVol->nTotalMB >= 513) pVol->nBitsForMBID = 10; if (pVol->nTotalMB >= 1025) pVol->nBitsForMBID = 11; if (pVol->nTotalMB >= 2049) pVol->nBitsForMBID = 12; if (pVol->nTotalMB >= 4097) pVol->nBitsForMBID = 13; if (pVol->nTotalMB >= 8193) pVol->nBitsForMBID = 14; if (pVol->nTotalMB >= 16385) pVol->nBitsForMBID = 15; if (pVol->nTotalMB >= 32769) pVol->nBitsForMBID = 16; if (pVol->nTotalMB >= 65537) pVol->nBitsForMBID = 17; if (pVol->nTotalMB >= 131073) pVol->nBitsForMBID = 18; if (pVol->shortVideoHeader) { switch (pVol->width) { case 128: if (pVol->height == 96) /* source_format = 1 */ { pVol->nGOBinVop = 6; pVol->nMBinGOB = 8; } else status = PV_FALSE; break; case 176: if (pVol->height == 144) /* source_format = 2 */ { pVol->nGOBinVop = 9; pVol->nMBinGOB = 11; } else status = PV_FALSE; break; case 352: if (pVol->height == 288) /* source_format = 2 */ { pVol->nGOBinVop = 18; pVol->nMBinGOB = 22; } else status = PV_FALSE; break; case 704: if (pVol->height == 576) /* source_format = 2 */ { pVol->nGOBinVop = 18; pVol->nMBinGOB = 88; } else status = PV_FALSE; break; case 1408: if (pVol->height == 1152) /* source_format = 2 */ { pVol->nGOBinVop = 18; pVol->nMBinGOB = 352; } else status = PV_FALSE; break; default: status = PV_FALSE; break; } } } /***************************************************/ /* allocate and initialize rate control parameters */ /***************************************************/ /* BEGIN INITIALIZATION OF ANNEX L RATE CONTROL */ if (video->encParams->RC_Type != CONSTANT_Q) { for (idx = 0; idx < nLayers; idx++) /* 12/25/00 */ { video->rc[idx] = (rateControl *)M4VENC_MALLOC(sizeof(rateControl)); if (video->rc[idx] == NULL) goto CLEAN_UP; M4VENC_MEMSET(video->rc[idx], 0, sizeof(rateControl)); } if (PV_SUCCESS != RC_Initialize(video)) { goto CLEAN_UP; } /* initialization for 2-pass rate control */ } /* END INITIALIZATION OF ANNEX L RATE CONTROL */ /********** assign platform dependent functions ***********************/ /* 1/23/01 */ /* This must be done at run-time not a compile time */ video->functionPointer = (FuncPtr*) M4VENC_MALLOC(sizeof(FuncPtr)); if (video->functionPointer == NULL) goto CLEAN_UP; video->functionPointer->ComputeMBSum = &ComputeMBSum_C; video->functionPointer->SAD_MB_HalfPel[0] = NULL; video->functionPointer->SAD_MB_HalfPel[1] = &SAD_MB_HalfPel_Cxh; video->functionPointer->SAD_MB_HalfPel[2] = &SAD_MB_HalfPel_Cyh; video->functionPointer->SAD_MB_HalfPel[3] = &SAD_MB_HalfPel_Cxhyh; #ifndef NO_INTER4V video->functionPointer->SAD_Blk_HalfPel = &SAD_Blk_HalfPel_C; video->functionPointer->SAD_Block = &SAD_Block_C; #endif video->functionPointer->SAD_Macroblock = &SAD_Macroblock_C; video->functionPointer->ChooseMode = &ChooseMode_C; video->functionPointer->GetHalfPelMBRegion = &GetHalfPelMBRegion_C; // video->functionPointer->SAD_MB_PADDING = &SAD_MB_PADDING; /* 4/21/01 */ encoderControl->videoEncoderInit = 1; /* init done! */ return PV_TRUE; CLEAN_UP: PVCleanUpVideoEncoder(encoderControl); return PV_FALSE; } /* ======================================================================== */ /* Function : PVCleanUpVideoEncoder() */ /* Date : 08/22/2000 */ /* Purpose : Deallocates allocated memory from InitVideoEncoder() */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : 5/21/01, free only yChan in Vop */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVCleanUpVideoEncoder(VideoEncControls *encoderControl) { Int idx, i; VideoEncData *video = (VideoEncData *)encoderControl->videoEncoderData; int nTotalMB; int max_width, offset; #ifdef PRINT_RC_INFO if (facct != NULL) { fprintf(facct, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); fprintf(facct, "TOTAL NUM BITS GENERATED %d\n", tiTotalNumBitsGenerated); fprintf(facct, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); fprintf(facct, "TOTAL NUMBER OF FRAMES CODED %d\n", video->encParams->rc[0]->totalFrameNumber); fprintf(facct, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); fprintf(facct, "Average BitRate %d\n", (tiTotalNumBitsGenerated / (90 / 30))); fprintf(facct, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); fprintf(facct, "TOTAL NUMBER OF STUFF BITS %d\n", (iStuffBits + 10740)); fprintf(facct, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); fprintf(facct, "TOTAL NUMBER OF BITS TO NETWORK %d\n", (35800*90 / 30));; fprintf(facct, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); fprintf(facct, "SUM OF STUFF BITS AND GENERATED BITS %d\n", (tiTotalNumBitsGenerated + iStuffBits + 10740)); fprintf(facct, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); fprintf(facct, "UNACCOUNTED DIFFERENCE %d\n", ((35800*90 / 30) - (tiTotalNumBitsGenerated + iStuffBits + 10740))); fprintf(facct, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); fclose(facct); } #endif #ifdef PRINT_EC fclose(fec); #endif if (video != NULL) { if (video->QPMB) M4VENC_FREE(video->QPMB); if (video->headerInfo.Mode)M4VENC_FREE(video->headerInfo.Mode); if (video->headerInfo.CBP)M4VENC_FREE(video->headerInfo.CBP); if (video->mot) { nTotalMB = video->vol[0]->nTotalMB; for (idx = 1; idx < video->currLayer; idx++) if (video->vol[idx]->nTotalMB > nTotalMB) nTotalMB = video->vol[idx]->nTotalMB; for (idx = 0; idx < nTotalMB; idx++) { if (video->mot[idx]) M4VENC_FREE(video->mot[idx]); } M4VENC_FREE(video->mot); } if (video->intraArray) M4VENC_FREE(video->intraArray); if (video->sliceNo)M4VENC_FREE(video->sliceNo); if (video->acPredFlag)M4VENC_FREE(video->acPredFlag); // if(video->predDCAC)M4VENC_FREE(video->predDCAC); if (video->predDC) M4VENC_FREE(video->predDC); video->predDCAC_row = NULL; if (video->predDCAC_col) M4VENC_FREE(video->predDCAC_col); if (video->outputMB)M4VENC_FREE(video->outputMB); if (video->bitstream1)BitstreamCloseEnc(video->bitstream1); if (video->bitstream2)BitstreamCloseEnc(video->bitstream2); if (video->bitstream3)BitstreamCloseEnc(video->bitstream3); if (video->overrunBuffer) M4VENC_FREE(video->overrunBuffer); max_width = video->encParams->LayerWidth[0]; max_width = (((max_width + 15) >> 4) << 4); /* 09/19/05 */ if (video->encParams->H263_Enabled) { offset = 0; } else { offset = ((max_width + 32) << 4) + 16; } if (video->currVop) { if (video->currVop->yChan) { video->currVop->yChan -= offset; M4VENC_FREE(video->currVop->yChan); } M4VENC_FREE(video->currVop); } if (video->nextBaseVop) { if (video->nextBaseVop->yChan) { video->nextBaseVop->yChan -= offset; M4VENC_FREE(video->nextBaseVop->yChan); } M4VENC_FREE(video->nextBaseVop); } if (video->prevBaseVop) { if (video->prevBaseVop->yChan) { video->prevBaseVop->yChan -= offset; M4VENC_FREE(video->prevBaseVop->yChan); } M4VENC_FREE(video->prevBaseVop); } if (video->prevEnhanceVop) { if (video->prevEnhanceVop->yChan) { video->prevEnhanceVop->yChan -= offset; M4VENC_FREE(video->prevEnhanceVop->yChan); } M4VENC_FREE(video->prevEnhanceVop); } /* 04/09/01, for Vops in the use multipass processing */ for (idx = 0; idx < video->encParams->nLayers; idx++) { if (video->pMP[idx]) { if (video->pMP[idx]->pRDSamples) { for (i = 0; i < 30; i++) { if (video->pMP[idx]->pRDSamples[i]) M4VENC_FREE(video->pMP[idx]->pRDSamples[i]); } M4VENC_FREE(video->pMP[idx]->pRDSamples); } M4VENC_MEMSET(video->pMP[idx], 0, sizeof(MultiPass)); M4VENC_FREE(video->pMP[idx]); } } /* // End /////////////////////////////////////// */ if (video->vol) { for (idx = 0; idx < video->encParams->nLayers; idx++) { if (video->vol[idx]) { if (video->vol[idx]->stream) M4VENC_FREE(video->vol[idx]->stream); M4VENC_FREE(video->vol[idx]); } } M4VENC_FREE(video->vol); } /***************************************************/ /* stop rate control parameters */ /***************************************************/ /* ANNEX L RATE CONTROL */ if (video->encParams->RC_Type != CONSTANT_Q) { RC_Cleanup(video->rc, video->encParams->nLayers); for (idx = 0; idx < video->encParams->nLayers; idx++) { if (video->rc[idx]) M4VENC_FREE(video->rc[idx]); } } if (video->functionPointer) M4VENC_FREE(video->functionPointer); /* If application has called PVCleanUpVideoEncoder then we deallocate */ /* If PVInitVideoEncoder class it, then we DO NOT deallocate */ if (video->encParams) { M4VENC_FREE(video->encParams); } M4VENC_FREE(video); encoderControl->videoEncoderData = NULL; /* video */ } encoderControl->videoEncoderInit = 0; return PV_TRUE; } /* ======================================================================== */ /* Function : PVGetVolHeader() */ /* Date : 7/17/2001, */ /* Purpose : */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVGetVolHeader(VideoEncControls *encCtrl, UChar *volHeader, Int *size, Int layer) { VideoEncData *encData; PV_STATUS EncodeVOS_Start(VideoEncControls *encCtrl); encData = (VideoEncData *)encCtrl->videoEncoderData; if (encData == NULL) return PV_FALSE; if (encData->encParams == NULL) return PV_FALSE; encData->currLayer = layer; /* Set Layer */ /*pv_status = */ EncodeVOS_Start(encCtrl); /* Encode VOL Header */ encData->encParams->GetVolHeader[layer] = 1; /* Set usage flag: Needed to support old method*/ /* Copy bitstream to buffer and set the size */ if (*size > encData->bitstream1->byteCount) { *size = encData->bitstream1->byteCount; M4VENC_MEMCPY(volHeader, encData->bitstream1->bitstreamBuffer, *size); } else return PV_FALSE; /* Reset bitstream1 buffer parameters */ BitstreamEncReset(encData->bitstream1); return PV_TRUE; } /* ======================================================================== */ /* Function : PVGetOverrunBuffer() */ /* Purpose : Get the overrun buffer ` */ /* In/out : */ /* Return : Pointer to overrun buffer. */ /* Modified : */ /* ======================================================================== */ OSCL_EXPORT_REF UChar* PVGetOverrunBuffer(VideoEncControls *encCtrl) { VideoEncData *video = (VideoEncData *)encCtrl->videoEncoderData; Int currLayer = video->currLayer; Vol *currVol = video->vol[currLayer]; if (currVol->stream->bitstreamBuffer != video->overrunBuffer) // not used { return NULL; } return video->overrunBuffer; } /* ======================================================================== */ /* Function : EncodeVideoFrame() */ /* Date : 08/22/2000 */ /* Purpose : Encode video frame and return bitstream */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* 02.14.2001 */ /* Finishing new timestamp 32-bit input */ /* Applications need to take care of wrap-around */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVEncodeVideoFrame(VideoEncControls *encCtrl, VideoEncFrameIO *vid_in, VideoEncFrameIO *vid_out, ULong *nextModTime, UChar *bstream, Int *size, Int *nLayer) { Bool status = PV_TRUE; PV_STATUS pv_status; VideoEncData *video = (VideoEncData *)encCtrl->videoEncoderData; VideoEncParams *encParams = video->encParams; Vol *currVol; Vop *tempForwRefVop = NULL; Int tempRefSelCode = 0; PV_STATUS EncodeVOS_Start(VideoEncControls *encCtrl); Int width_16, height_16; Int width, height; Vop *temp; Int encodeVop = 0; void PaddingEdge(Vop *padVop); Int currLayer = -1; //Int nLayers = encParams->nLayers; ULong modTime = vid_in->timestamp; #ifdef RANDOM_REFSELCODE /* add random selection of reference Vop */ Int random_val[30] = {0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0}; static Int rand_idx = 0; #endif /*******************************************************/ /* Determine Next Vop to encode, if any, and nLayer */ /*******************************************************/ //i = nLayers-1; if (video->volInitialize[0]) /* first vol to code */ { /* AGI RCS 08/12/09 */ video->nextModTime = video->modTimeRef = ((modTime) - ((modTime) % video->encParams->TimeIncrementRes)); } encodeVop = DetermineCodingLayer(video, nLayer, modTime); currLayer = *nLayer; if ((currLayer < 0) || (currLayer > encParams->nLayers - 1)) return PV_FALSE; /******************************************/ /* If post-skipping still effective --- return */ /******************************************/ if (!encodeVop) /* skip enh layer, no base layer coded --- return */ { #ifdef _PRINT_STAT printf("No frame coded. Continue to next frame."); #endif /* expected next code time, convert back to millisec */ *nextModTime = video->nextModTime; #ifdef ALLOW_VOP_NOT_CODED if (video->vol[0]->shortVideoHeader) /* Short Video Header = 1 */ { *size = 0; *nLayer = -1; } else { *nLayer = 0; EncodeVopNotCoded(video, bstream, size, modTime); *size = video->vol[0]->stream->byteCount; } #else *size = 0; *nLayer = -1; #endif return status; } //ENCODE_VOP_AGAIN: /* 12/30/00 */ /**************************************************************/ /* Initialize Vol stream structure with application bitstream */ /**************************************************************/ currVol = video->vol[currLayer]; currVol->stream->bitstreamBuffer = bstream; currVol->stream->bufferSize = *size; BitstreamEncReset(currVol->stream); BitstreamSetOverrunBuffer(currVol->stream, video->overrunBuffer, video->oBSize, video); /***********************************************************/ /* Encode VOS and VOL Headers on first call for each layer */ /***********************************************************/ if (video->volInitialize[currLayer]) { video->currVop->timeInc = 0; video->prevBaseVop->timeInc = 0; if (!video->encParams->GetVolHeader[currLayer]) pv_status = EncodeVOS_Start(encCtrl); } /***************************************************/ /* Copy Input Video Frame to Internal Video Buffer */ /***************************************************/ /* Determine Width and Height of Vop Layer */ width = encParams->LayerWidth[currLayer]; /* Get input width */ height = encParams->LayerHeight[currLayer]; /* Get input height */ /* Round Up to nearest multiple of 16 : MPEG-4 Standard */ width_16 = ((width + 15) / 16) * 16; /* Round up to nearest multiple of 16 */ height_16 = ((height + 15) / 16) * 16; /* Round up to nearest multiple of 16 */ video->input = vid_in; /* point to the frame input */ /*// End ////////////////////////////// */ /**************************************/ /* Determine VOP Type */ /* 6/2/2001, separate function */ /**************************************/ DetermineVopType(video, currLayer); /****************************/ /* Initialize VOP */ /****************************/ video->currVop->volID = currVol->volID; video->currVop->width = width_16; video->currVop->height = height_16; if (video->encParams->H263_Enabled) /* 11/28/05 */ { video->currVop->pitch = width_16; } else { video->currVop->pitch = width_16 + 32; } video->currVop->timeInc = currVol->timeIncrement; video->currVop->vopCoded = 1; video->currVop->roundingType = 0; video->currVop->intraDCVlcThr = encParams->IntraDCVlcThr; if (currLayer == 0 #ifdef RANDOM_REFSELCODE /* add random selection of reference Vop */ || random_val[rand_idx] || video->volInitialize[currLayer] #endif ) { tempForwRefVop = video->forwardRefVop; /* keep initial state */ if (tempForwRefVop != NULL) tempRefSelCode = tempForwRefVop->refSelectCode; video->forwardRefVop = video->prevBaseVop; video->forwardRefVop->refSelectCode = 1; } #ifdef RANDOM_REFSELCODE else { tempForwRefVop = video->forwardRefVop; /* keep initial state */ if (tempForwRefVop != NULL) tempRefSelCode = tempForwRefVop->refSelectCode; video->forwardRefVop = video->prevEnhanceVop; video->forwardRefVop->refSelectCode = 0; } rand_idx++; rand_idx %= 30; #endif video->currVop->refSelectCode = video->forwardRefVop->refSelectCode; video->currVop->gobNumber = 0; video->currVop->gobFrameID = video->currVop->predictionType; /* * AGI 08/12/2009 */ video->currVop->temporalRef = (Int)(modTime * video->FrameRate / video->encParams->TimeIncrementRes) % 256; video->currVop->temporalInterval = 0; if (video->currVop->predictionType == I_VOP) video->currVop->quantizer = encParams->InitQuantIvop[currLayer]; else video->currVop->quantizer = encParams->InitQuantPvop[currLayer]; /****************/ /* Encode Vop */ /****************/ video->slice_coding = 0; pv_status = EncodeVop(video); #ifdef _PRINT_STAT if (video->currVop->predictionType == I_VOP) printf(" I-VOP "); else printf(" P-VOP (ref.%d)", video->forwardRefVop->refSelectCode); #endif /************************************/ /* Update Skip Next Frame */ /************************************/ *nLayer = UpdateSkipNextFrame(video, nextModTime, size, pv_status); if (*nLayer == -1) /* skip current frame */ { /* make sure that pointers are restored to the previous state */ if (currLayer == 0) { video->forwardRefVop = tempForwRefVop; /* For P-Vop base only */ video->forwardRefVop->refSelectCode = tempRefSelCode; } return status; } /* If I-VOP was encoded, reset IntraPeriod */ if ((currLayer == 0) && (encParams->IntraPeriod > 0) && (video->currVop->predictionType == I_VOP)) video->nextEncIVop = encParams->IntraPeriod; /* Set HintTrack Information */ if (currLayer != -1) { if (currVol->prevModuloTimeBase) video->hintTrackInfo.MTB = 1; else video->hintTrackInfo.MTB = 0; video->hintTrackInfo.LayerID = (UChar)currVol->volID; video->hintTrackInfo.CodeType = (UChar)video->currVop->predictionType; video->hintTrackInfo.RefSelCode = (UChar)video->currVop->refSelectCode; } /************************************************/ /* Determine nLayer and timeInc for next encode */ /* 12/27/00 always go by the highest layer*/ /************************************************/ /**********************************************************/ /* Copy Reconstructed Buffer to Output Video Frame Buffer */ /**********************************************************/ vid_out->yChan = video->currVop->yChan; vid_out->uChan = video->currVop->uChan; vid_out->vChan = video->currVop->vChan; if (video->encParams->H263_Enabled) { vid_out->height = video->currVop->height; /* padded height */ vid_out->pitch = video->currVop->width; /* padded width */ } else { vid_out->height = video->currVop->height + 32; /* padded height */ vid_out->pitch = video->currVop->width + 32; /* padded width */ } //video_out->timestamp = video->modTime; /* AGI RCS 08/12/09 */ vid_out->timestamp = (ULong)(((video->prevFrameNum[currLayer] * video->encParams->TimeIncrementRes) / encParams->LayerFrameRate[currLayer]) + video->modTimeRef + 0.5); /*// End /////////////////////// */ /***********************************/ /* Update Ouput bstream byte count */ /***********************************/ *size = currVol->stream->byteCount; /****************************************/ /* Swap Vop Pointers for Base Layer */ /****************************************/ if (currLayer == 0) { temp = video->prevBaseVop; video->prevBaseVop = video->currVop; video->prevBaseVop->padded = 0; /* not padded */ video->currVop = temp; video->forwardRefVop = video->prevBaseVop; /* For P-Vop base only */ video->forwardRefVop->refSelectCode = 1; } else { temp = video->prevEnhanceVop; video->prevEnhanceVop = video->currVop; video->prevEnhanceVop->padded = 0; /* not padded */ video->currVop = temp; video->forwardRefVop = video->prevEnhanceVop; video->forwardRefVop->refSelectCode = 0; } /****************************************/ /* Modify the intialize flag at the end.*/ /****************************************/ if (video->volInitialize[currLayer]) video->volInitialize[currLayer] = 0; return status; } #ifndef NO_SLICE_ENCODE /* ======================================================================== */ /* Function : PVEncodeFrameSet() */ /* Date : 04/18/2000 */ /* Purpose : Enter a video frame and perform front-end time check plus ME */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVEncodeFrameSet(VideoEncControls *encCtrl, VideoEncFrameIO *vid_in, ULong *nextModTime, Int *nLayer) { Bool status = PV_TRUE; VideoEncData *video = (VideoEncData *)encCtrl->videoEncoderData; VideoEncParams *encParams = video->encParams; Vol *currVol; PV_STATUS EncodeVOS_Start(VideoEncControls *encCtrl); Int width_16, height_16; Int width, height; Int encodeVop = 0; void PaddingEdge(Vop *padVop); Int currLayer = -1; //Int nLayers = encParams->nLayers; ULong modTime = vid_in->timestamp; #ifdef RANDOM_REFSELCODE /* add random selection of reference Vop */ Int random_val[30] = {0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0}; static Int rand_idx = 0; #endif /*******************************************************/ /* Determine Next Vop to encode, if any, and nLayer */ /*******************************************************/ video->modTime = modTime; //i = nLayers-1; if (video->volInitialize[0]) /* first vol to code */ { /* AGI RCS 08/12/09 */ video->nextModTime = video->modTimeRef = ((modTime) - ((modTime) % video->encParams->TimeIncrementRes)); } encodeVop = DetermineCodingLayer(video, nLayer, modTime); currLayer = *nLayer; /******************************************/ /* If post-skipping still effective --- return */ /******************************************/ if (!encodeVop) /* skip enh layer, no base layer coded --- return */ { #ifdef _PRINT_STAT printf("No frame coded. Continue to next frame."); #endif *nLayer = -1; /* expected next code time, convert back to millisec */ *nextModTime = video->nextModTime;; return status; } /**************************************************************/ /* Initialize Vol stream structure with application bitstream */ /**************************************************************/ currVol = video->vol[currLayer]; currVol->stream->bufferSize = 0; BitstreamEncReset(currVol->stream); /***********************************************************/ /* Encode VOS and VOL Headers on first call for each layer */ /***********************************************************/ if (video->volInitialize[currLayer]) { video->currVop->timeInc = 0; video->prevBaseVop->timeInc = 0; } /***************************************************/ /* Copy Input Video Frame to Internal Video Buffer */ /***************************************************/ /* Determine Width and Height of Vop Layer */ width = encParams->LayerWidth[currLayer]; /* Get input width */ height = encParams->LayerHeight[currLayer]; /* Get input height */ /* Round Up to nearest multiple of 16 : MPEG-4 Standard */ width_16 = ((width + 15) / 16) * 16; /* Round up to nearest multiple of 16 */ height_16 = ((height + 15) / 16) * 16; /* Round up to nearest multiple of 16 */ video->input = vid_in; /* point to the frame input */ /*// End ////////////////////////////// */ /**************************************/ /* Determine VOP Type */ /* 6/2/2001, separate function */ /**************************************/ DetermineVopType(video, currLayer); /****************************/ /* Initialize VOP */ /****************************/ video->currVop->volID = currVol->volID; video->currVop->width = width_16; video->currVop->height = height_16; if (video->encParams->H263_Enabled) /* 11/28/05 */ { video->currVop->pitch = width_16; } else { video->currVop->pitch = width_16 + 32; } video->currVop->timeInc = currVol->timeIncrement; video->currVop->vopCoded = 1; video->currVop->roundingType = 0; video->currVop->intraDCVlcThr = encParams->IntraDCVlcThr; if (currLayer == 0 #ifdef RANDOM_REFSELCODE /* add random selection of reference Vop */ || random_val[rand_idx] || video->volInitialize[currLayer] #endif ) { video->tempForwRefVop = video->forwardRefVop; /* keep initial state */ if (video->tempForwRefVop != NULL) video->tempRefSelCode = video->tempForwRefVop->refSelectCode; video->forwardRefVop = video->prevBaseVop; video->forwardRefVop->refSelectCode = 1; } #ifdef RANDOM_REFSELCODE else { video->tempForwRefVop = video->forwardRefVop; /* keep initial state */ if (video->tempForwRefVop != NULL) video->tempRefSelCode = video->tempForwRefVop->refSelectCode; video->forwardRefVop = video->prevEnhanceVop; video->forwardRefVop->refSelectCode = 0; } rand_idx++; rand_idx %= 30; #endif video->currVop->refSelectCode = video->forwardRefVop->refSelectCode; video->currVop->gobNumber = 0; video->currVop->gobFrameID = video->currVop->predictionType; /* AGI RCS 08/12/09 */ video->currVop->temporalRef = (Int)((modTime) * video->FrameRate / video->encParams->TimeIncrementRes) % 256; video->currVop->temporalInterval = 0; if (video->currVop->predictionType == I_VOP) video->currVop->quantizer = encParams->InitQuantIvop[currLayer]; else video->currVop->quantizer = encParams->InitQuantPvop[currLayer]; /****************/ /* Encode Vop */ /****************/ video->slice_coding = 1; /*pv_status =*/ EncodeVop(video); #ifdef _PRINT_STAT if (video->currVop->predictionType == I_VOP) printf(" I-VOP "); else printf(" P-VOP (ref.%d)", video->forwardRefVop->refSelectCode); #endif /* Set HintTrack Information */ if (currVol->prevModuloTimeBase) video->hintTrackInfo.MTB = 1; else video->hintTrackInfo.MTB = 0; video->hintTrackInfo.LayerID = (UChar)currVol->volID; video->hintTrackInfo.CodeType = (UChar)video->currVop->predictionType; video->hintTrackInfo.RefSelCode = (UChar)video->currVop->refSelectCode; return status; } #endif /* NO_SLICE_ENCODE */ #ifndef NO_SLICE_ENCODE /* ======================================================================== */ /* Function : PVEncodePacket() */ /* Date : 04/18/2002 */ /* Purpose : Encode one packet and return bitstream */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVEncodeSlice(VideoEncControls *encCtrl, UChar *bstream, Int *size, Int *endofFrame, VideoEncFrameIO *vid_out, ULong *nextModTime) { PV_STATUS pv_status; VideoEncData *video = (VideoEncData *)encCtrl->videoEncoderData; VideoEncParams *encParams = video->encParams; Vol *currVol; PV_STATUS EncodeVOS_Start(VideoEncControls *encCtrl); Vop *temp; void PaddingEdge(Vop *padVop); Int currLayer = video->currLayer; Int pre_skip; Int pre_size; /**************************************************************/ /* Initialize Vol stream structure with application bitstream */ /**************************************************************/ currVol = video->vol[currLayer]; currVol->stream->bitstreamBuffer = bstream; pre_size = currVol->stream->byteCount; currVol->stream->bufferSize = pre_size + (*size); /***********************************************************/ /* Encode VOS and VOL Headers on first call for each layer */ /***********************************************************/ if (video->volInitialize[currLayer]) { if (!video->encParams->GetVolHeader[currLayer]) pv_status = EncodeVOS_Start(encCtrl); } /****************/ /* Encode Slice */ /****************/ pv_status = EncodeSlice(video); *endofFrame = 0; if (video->mbnum >= currVol->nTotalMB && !video->end_of_buf) { *endofFrame = 1; /************************************/ /* Update Skip Next Frame */ /************************************/ pre_skip = UpdateSkipNextFrame(video, nextModTime, size, pv_status); /* modified such that no pre-skipped */ if (pre_skip == -1) /* error */ { *endofFrame = -1; /* make sure that pointers are restored to the previous state */ if (currLayer == 0) { video->forwardRefVop = video->tempForwRefVop; /* For P-Vop base only */ video->forwardRefVop->refSelectCode = video->tempRefSelCode; } return pv_status; } /* If I-VOP was encoded, reset IntraPeriod */ if ((currLayer == 0) && (encParams->IntraPeriod > 0) && (video->currVop->predictionType == I_VOP)) video->nextEncIVop = encParams->IntraPeriod; /**********************************************************/ /* Copy Reconstructed Buffer to Output Video Frame Buffer */ /**********************************************************/ vid_out->yChan = video->currVop->yChan; vid_out->uChan = video->currVop->uChan; vid_out->vChan = video->currVop->vChan; if (video->encParams->H263_Enabled) { vid_out->height = video->currVop->height; /* padded height */ vid_out->pitch = video->currVop->width; /* padded width */ } else { vid_out->height = video->currVop->height + 32; /* padded height */ vid_out->pitch = video->currVop->width + 32; /* padded width */ } //vid_out->timestamp = video->modTime; vid_out->timestamp = (ULong)(((video->prevFrameNum[currLayer] * 1000) / encParams->LayerFrameRate[currLayer]) + video->modTimeRef + 0.5); /*// End /////////////////////// */ /****************************************/ /* Swap Vop Pointers for Base Layer */ /****************************************/ if (currLayer == 0) { temp = video->prevBaseVop; video->prevBaseVop = video->currVop; video->prevBaseVop->padded = 0; /* not padded */ video->currVop = temp; video->forwardRefVop = video->prevBaseVop; /* For P-Vop base only */ video->forwardRefVop->refSelectCode = 1; } else { temp = video->prevEnhanceVop; video->prevEnhanceVop = video->currVop; video->prevEnhanceVop->padded = 0; /* not padded */ video->currVop = temp; video->forwardRefVop = video->prevEnhanceVop; video->forwardRefVop->refSelectCode = 0; } } /***********************************/ /* Update Ouput bstream byte count */ /***********************************/ *size = currVol->stream->byteCount - pre_size; /****************************************/ /* Modify the intialize flag at the end.*/ /****************************************/ if (video->volInitialize[currLayer]) video->volInitialize[currLayer] = 0; return pv_status; } #endif /* NO_SLICE_ENCODE */ /* ======================================================================== */ /* Function : PVGetH263ProfileLevelID() */ /* Date : 02/05/2003 */ /* Purpose : Get H.263 Profile ID and level ID for profile 0 */ /* In/out : Profile ID=0, levelID is what we want */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* Note : h263Level[8], rBR_bound[8], max_h263_framerate[2] */ /* max_h263_width[2], max_h263_height[2] are global */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVGetH263ProfileLevelID(VideoEncControls *encCtrl, Int *profileID, Int *levelID) { VideoEncData *encData; Int width, height; float bitrate_r, framerate; /* For this version, we only support H.263 profile 0 */ *profileID = 0; *levelID = 0; encData = (VideoEncData *)encCtrl->videoEncoderData; if (encData == NULL) return PV_FALSE; if (encData->encParams == NULL) return PV_FALSE; if (!encData->encParams->H263_Enabled) return PV_FALSE; /* get image width, height, bitrate and framerate */ width = encData->encParams->LayerWidth[0]; height = encData->encParams->LayerHeight[0]; bitrate_r = (float)(encData->encParams->LayerBitRate[0]) / (float)64000.0; framerate = encData->encParams->LayerFrameRate[0]; if (!width || !height || !(bitrate_r > 0 && framerate > 0)) return PV_FALSE; /* This is the most frequent case : level 10 */ if (bitrate_r <= rBR_bound[1] && framerate <= max_h263_framerate[0] && (width <= max_h263_width[0] && height <= max_h263_height[0])) { *levelID = h263Level[1]; return PV_TRUE; } else if (bitrate_r > rBR_bound[4] || (width > max_h263_width[1] || height > max_h263_height[1]) || framerate > max_h263_framerate[1]) /* check the highest level 70 */ { *levelID = h263Level[7]; return PV_TRUE; } else /* search level 20, 30, 40 */ { /* pick out level 20 */ if (bitrate_r <= rBR_bound[2] && ((width <= max_h263_width[0] && height <= max_h263_height[0] && framerate <= max_h263_framerate[1]) || (width <= max_h263_width[1] && height <= max_h263_height[1] && framerate <= max_h263_framerate[0]))) { *levelID = h263Level[2]; return PV_TRUE; } else /* width, height and framerate are ok, now choose level 30 or 40 */ { *levelID = (bitrate_r <= rBR_bound[3] ? h263Level[3] : h263Level[4]); return PV_TRUE; } } } /* ======================================================================== */ /* Function : PVGetMPEG4ProfileLevelID() */ /* Date : 26/06/2008 */ /* Purpose : Get MPEG4 Level after initialized */ /* In/out : profile_level according to interface */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVGetMPEG4ProfileLevelID(VideoEncControls *encCtrl, Int *profile_level, Int nLayer) { VideoEncData* video; Int i; video = (VideoEncData *)encCtrl->videoEncoderData; if (nLayer == 0) { for (i = 0; i < MAX_BASE_PROFILE + 1; i++) { if (video->encParams->ProfileLevel[0] == profile_level_code[i]) { break; } } *profile_level = i; } else { for (i = 0; i < MAX_SCALABLE_PROFILE - MAX_BASE_PROFILE; i++) { if (video->encParams->ProfileLevel[1] == scalable_profile_level_code[i]) { break; } } *profile_level = i + MAX_BASE_PROFILE + 1; } return true; } #ifndef LIMITED_API /* ======================================================================== */ /* Function : PVUpdateEncFrameRate */ /* Date : 04/08/2002 */ /* Purpose : Update target frame rates of the encoded base and enhance */ /* layer(if any) while encoding operation is ongoing */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVUpdateEncFrameRate(VideoEncControls *encCtrl, float *frameRate) { VideoEncData *encData; Int i;// nTotalMB, mbPerSec; encData = (VideoEncData *)encCtrl->videoEncoderData; if (encData == NULL) return PV_FALSE; if (encData->encParams == NULL) return PV_FALSE; /* Update the framerates for all the layers */ for (i = 0; i < encData->encParams->nLayers; i++) { /* New check: encoding framerate should be consistent with the given profile and level */ //nTotalMB = (((encData->encParams->LayerWidth[i]+15)/16)*16)*(((encData->encParams->LayerHeight[i]+15)/16)*16)/(16*16); //mbPerSec = (Int)(nTotalMB * frameRate[i]); //if(mbPerSec > encData->encParams->LayerMaxMbsPerSec[i]) return PV_FALSE; if (frameRate[i] > encData->encParams->LayerMaxFrameRate[i]) return PV_FALSE; /* set by users or profile */ encData->encParams->LayerFrameRate[i] = frameRate[i]; } RC_UpdateBXRCParams((void*) encData); return PV_TRUE; } #endif #ifndef LIMITED_API /* ======================================================================== */ /* Function : PVUpdateBitRate */ /* Date : 04/08/2002 */ /* Purpose : Update target bit rates of the encoded base and enhance */ /* layer(if any) while encoding operation is ongoing */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVUpdateBitRate(VideoEncControls *encCtrl, Int *bitRate) { VideoEncData *encData; Int i; encData = (VideoEncData *)encCtrl->videoEncoderData; if (encData == NULL) return PV_FALSE; if (encData->encParams == NULL) return PV_FALSE; /* Update the bitrates for all the layers */ for (i = 0; i < encData->encParams->nLayers; i++) { if (bitRate[i] > encData->encParams->LayerMaxBitRate[i]) /* set by users or profile */ { return PV_FALSE; } encData->encParams->LayerBitRate[i] = bitRate[i]; } RC_UpdateBXRCParams((void*) encData); return PV_TRUE; } #endif #ifndef LIMITED_API /* ============================================================================ */ /* Function : PVUpdateVBVDelay() */ /* Date : 4/23/2004 */ /* Purpose : Update VBV buffer size(in delay) */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ============================================================================ */ Bool PVUpdateVBVDelay(VideoEncControls *encCtrl, float delay) { VideoEncData *encData; Int total_bitrate, max_buffer_size; int index; encData = (VideoEncData *)encCtrl->videoEncoderData; if (encData == NULL) return PV_FALSE; if (encData->encParams == NULL) return PV_FALSE; /* Check whether the input delay is valid based on the given profile */ total_bitrate = (encData->encParams->nLayers == 1 ? encData->encParams->LayerBitRate[0] : encData->encParams->LayerBitRate[1]); index = encData->encParams->profile_table_index; max_buffer_size = (encData->encParams->nLayers == 1 ? profile_level_max_VBV_size[index] : scalable_profile_level_max_VBV_size[index]); if (total_bitrate*delay > (float)max_buffer_size) return PV_FALSE; encData->encParams->VBV_delay = delay; return PV_TRUE; } #endif #ifndef LIMITED_API /* ======================================================================== */ /* Function : PVUpdateIFrameInterval() */ /* Date : 04/10/2002 */ /* Purpose : updates the INTRA frame refresh interval while encoding */ /* is ongoing */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVUpdateIFrameInterval(VideoEncControls *encCtrl, Int aIFramePeriod) { VideoEncData *encData; encData = (VideoEncData *)encCtrl->videoEncoderData; if (encData == NULL) return PV_FALSE; if (encData->encParams == NULL) return PV_FALSE; encData->encParams->IntraPeriod = aIFramePeriod; return PV_TRUE; } #endif #ifndef LIMITED_API /* ======================================================================== */ /* Function : PVSetNumIntraMBRefresh() */ /* Date : 08/05/2003 */ /* Purpose : */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVUpdateNumIntraMBRefresh(VideoEncControls *encCtrl, Int numMB) { VideoEncData *encData; encData = (VideoEncData *)encCtrl->videoEncoderData; if (encData == NULL) return PV_FALSE; encData->encParams->Refresh = numMB; return PV_TRUE; } #endif #ifndef LIMITED_API /* ======================================================================== */ /* Function : PVIFrameRequest() */ /* Date : 04/10/2002 */ /* Purpose : encodes the next base frame as an I-Vop */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVIFrameRequest(VideoEncControls *encCtrl) { VideoEncData *encData; encData = (VideoEncData *)encCtrl->videoEncoderData; if (encData == NULL) return PV_FALSE; if (encData->encParams == NULL) return PV_FALSE; encData->nextEncIVop = 1; return PV_TRUE; } #endif #ifndef LIMITED_API /* ======================================================================== */ /* Function : PVGetEncMemoryUsage() */ /* Date : 10/17/2000 */ /* Purpose : */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Int PVGetEncMemoryUsage(VideoEncControls *encCtrl) { VideoEncData *encData; encData = (VideoEncData *)encCtrl->videoEncoderData; if (encData == NULL) return PV_FALSE; if (encData->encParams == NULL) return PV_FALSE; return encData->encParams->MemoryUsage; } #endif /* ======================================================================== */ /* Function : PVGetHintTrack() */ /* Date : 1/17/2001, */ /* Purpose : */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVGetHintTrack(VideoEncControls *encCtrl, MP4HintTrack *info) { VideoEncData *encData; encData = (VideoEncData *)encCtrl->videoEncoderData; if (encData == NULL) return PV_FALSE; if (encData->encParams == NULL) return PV_FALSE; info->MTB = encData->hintTrackInfo.MTB; info->LayerID = encData->hintTrackInfo.LayerID; info->CodeType = encData->hintTrackInfo.CodeType; info->RefSelCode = encData->hintTrackInfo.RefSelCode; return PV_TRUE; } /* ======================================================================== */ /* Function : PVGetMaxVideoFrameSize() */ /* Date : 7/17/2001, */ /* Purpose : Function merely returns the maximum buffer size */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVGetMaxVideoFrameSize(VideoEncControls *encCtrl, Int *maxVideoFrameSize) { VideoEncData *encData; encData = (VideoEncData *)encCtrl->videoEncoderData; if (encData == NULL) return PV_FALSE; if (encData->encParams == NULL) return PV_FALSE; *maxVideoFrameSize = encData->encParams->BufferSize[0]; if (encData->encParams->nLayers == 2) if (*maxVideoFrameSize < encData->encParams->BufferSize[1]) *maxVideoFrameSize = encData->encParams->BufferSize[1]; *maxVideoFrameSize >>= 3; /* Convert to Bytes */ if (*maxVideoFrameSize <= 4000) *maxVideoFrameSize = 4000; return PV_TRUE; } #ifndef LIMITED_API /* ======================================================================== */ /* Function : PVGetVBVSize() */ /* Date : 4/15/2002 */ /* Purpose : Function merely returns the maximum buffer size */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVGetVBVSize(VideoEncControls *encCtrl, Int *VBVSize) { VideoEncData *encData; encData = (VideoEncData *)encCtrl->videoEncoderData; if (encData == NULL) return PV_FALSE; if (encData->encParams == NULL) return PV_FALSE; *VBVSize = encData->encParams->BufferSize[0]; if (encData->encParams->nLayers == 2) *VBVSize += encData->encParams->BufferSize[1]; return PV_TRUE; } #endif /* ======================================================================== */ /* Function : EncodeVOS_Start() */ /* Date : 08/22/2000 */ /* Purpose : Encodes the VOS,VO, and VOL or Short Headers */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ PV_STATUS EncodeVOS_Start(VideoEncControls *encoderControl) { VideoEncData *video = (VideoEncData *)encoderControl->videoEncoderData; Vol *currVol = video->vol[video->currLayer]; PV_STATUS status = PV_SUCCESS; //int profile_level=0x01; BitstreamEncVideo *stream = video->bitstream1; int i, j; /********************************/ /* Check for short_video_header */ /********************************/ if (currVol->shortVideoHeader == 1) return status; else { /* Short Video Header or M4V */ /**************************/ /* VisualObjectSequence ()*/ /**************************/ status = BitstreamPutGT16Bits(stream, 32, SESSION_START_CODE); /* Determine profile_level */ status = BitstreamPutBits(stream, 8, video->encParams->ProfileLevel[video->currLayer]); /******************/ /* VisualObject() */ /******************/ status = BitstreamPutGT16Bits(stream, 32, VISUAL_OBJECT_START_CODE); status = BitstreamPut1Bits(stream, 0x00); /* visual object identifier */ status = BitstreamPutBits(stream, 4, 0x01); /* visual object Type == "video ID" */ status = BitstreamPut1Bits(stream, 0x00); /* no video signal type */ /*temp = */ BitstreamMpeg4ByteAlignStuffing(stream); status = BitstreamPutGT16Bits(stream, 27, VO_START_CODE);/* byte align: should be 2 bits */ status = BitstreamPutBits(stream, 5, 0x00);/* Video ID = 0 */ /**********************/ /* VideoObjectLayer() */ /**********************/ if (currVol->shortVideoHeader == 0) { /* M4V else Short Video Header */ status = BitstreamPutGT16Bits(stream, VOL_START_CODE_LENGTH, VOL_START_CODE); status = BitstreamPutBits(stream, 4, currVol->volID);/* video_object_layer_id */ status = BitstreamPut1Bits(stream, 0x00);/* Random Access = 0 */ if (video->currLayer == 0) status = BitstreamPutBits(stream, 8, 0x01);/* Video Object Type Indication = 1 ... Simple Object Type */ else status = BitstreamPutBits(stream, 8, 0x02);/* Video Object Type Indication = 2 ... Simple Scalable Object Type */ status = BitstreamPut1Bits(stream, 0x00);/* is_object_layer_identifer = 0 */ status = BitstreamPutBits(stream, 4, 0x01); /* aspect_ratio_info = 1 ... 1:1(Square) */ status = BitstreamPut1Bits(stream, 0x00);/* vol_control_parameters = 0 */ status = BitstreamPutBits(stream, 2, 0x00);/* video_object_layer_shape = 00 ... rectangular */ status = BitstreamPut1Bits(stream, 0x01);/* marker bit */ status = BitstreamPutGT8Bits(stream, 16, currVol->timeIncrementResolution);/* vop_time_increment_resolution */ status = BitstreamPut1Bits(stream, 0x01);/* marker bit */ status = BitstreamPut1Bits(stream, currVol->fixedVopRate);/* fixed_vop_rate = 0 */ /* For Rectangular VO layer shape */ status = BitstreamPut1Bits(stream, 0x01);/* marker bit */ status = BitstreamPutGT8Bits(stream, 13, currVol->width);/* video_object_layer_width */ status = BitstreamPut1Bits(stream, 0x01);/* marker bit */ status = BitstreamPutGT8Bits(stream, 13, currVol->height);/* video_object_layer_height */ status = BitstreamPut1Bits(stream, 0x01);/*marker bit */ status = BitstreamPut1Bits(stream, 0x00);/*interlaced = 0 */ status = BitstreamPut1Bits(stream, 0x01);/* obmc_disable = 1 */ status = BitstreamPut1Bits(stream, 0x00);/* sprite_enable = 0 */ status = BitstreamPut1Bits(stream, 0x00);/* not_8_bit = 0 */ status = BitstreamPut1Bits(stream, currVol->quantType);/* quant_type */ if (currVol->quantType) { status = BitstreamPut1Bits(stream, currVol->loadIntraQuantMat); /* Intra quant matrix */ if (currVol->loadIntraQuantMat) { for (j = 63; j >= 1; j--) if (currVol->iqmat[*(zigzag_i+j)] != currVol->iqmat[*(zigzag_i+j-1)]) break; if ((j == 1) && (currVol->iqmat[*(zigzag_i+j)] == currVol->iqmat[*(zigzag_i+j-1)])) j = 0; for (i = 0; i < j + 1; i++) BitstreamPutBits(stream, 8, currVol->iqmat[*(zigzag_i+i)]); if (j < 63) BitstreamPutBits(stream, 8, 0); } else { for (j = 0; j < 64; j++) currVol->iqmat[j] = mpeg_iqmat_def[j]; } status = BitstreamPut1Bits(stream, currVol->loadNonIntraQuantMat); /* Non-Intra quant matrix */ if (currVol->loadNonIntraQuantMat) { for (j = 63; j >= 1; j--) if (currVol->niqmat[*(zigzag_i+j)] != currVol->niqmat[*(zigzag_i+j-1)]) break; if ((j == 1) && (currVol->niqmat[*(zigzag_i+j)] == currVol->niqmat[*(zigzag_i+j-1)])) j = 0; for (i = 0; i < j + 1; i++) BitstreamPutBits(stream, 8, currVol->niqmat[*(zigzag_i+i)]); if (j < 63) BitstreamPutBits(stream, 8, 0); } else { for (j = 0; j < 64; j++) currVol->niqmat[j] = mpeg_nqmat_def[j]; } } status = BitstreamPut1Bits(stream, 0x01); /* complexity_estimation_disable = 1 */ status = BitstreamPut1Bits(stream, currVol->ResyncMarkerDisable);/* Resync_marker_disable */ status = BitstreamPut1Bits(stream, currVol->dataPartitioning);/* Data partitioned */ if (currVol->dataPartitioning) status = BitstreamPut1Bits(stream, currVol->useReverseVLC); /* Reversible_vlc */ if (currVol->scalability) /* Scalability*/ { status = BitstreamPut1Bits(stream, currVol->scalability);/* Scalability = 1 */ status = BitstreamPut1Bits(stream, currVol->scalType);/* hierarchy _type ... Spatial= 0 and Temporal = 1 */ status = BitstreamPutBits(stream, 4, currVol->refVolID);/* ref_layer_id */ status = BitstreamPut1Bits(stream, currVol->refSampDir);/* ref_layer_sampling_direc*/ status = BitstreamPutBits(stream, 5, currVol->horSamp_n);/*hor_sampling_factor_n*/ status = BitstreamPutBits(stream, 5, currVol->horSamp_m);/*hor_sampling_factor_m*/ status = BitstreamPutBits(stream, 5, currVol->verSamp_n);/*vert_sampling_factor_n*/ status = BitstreamPutBits(stream, 5, currVol->verSamp_m);/*vert_sampling_factor_m*/ status = BitstreamPut1Bits(stream, currVol->enhancementType);/* enhancement_type*/ } else /* No Scalability */ status = BitstreamPut1Bits(stream, currVol->scalability);/* Scalability = 0 */ /*temp = */ BitstreamMpeg4ByteAlignStuffing(stream); /* Byte align Headers for VOP */ } } return status; } /* ======================================================================== */ /* Function : VOS_End() */ /* Date : 08/22/2000 */ /* Purpose : Visual Object Sequence End */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ PV_STATUS VOS_End(VideoEncControls *encoderControl) { PV_STATUS status = PV_SUCCESS; VideoEncData *video = (VideoEncData *)encoderControl->videoEncoderData; Vol *currVol = video->vol[video->currLayer]; BitstreamEncVideo *stream = currVol->stream; status = BitstreamPutBits(stream, SESSION_END_CODE, 32); return status; } /* ======================================================================== */ /* Function : DetermineCodingLayer */ /* Date : 06/02/2001 */ /* Purpose : Find layer to code based on current mod time, assuming that it's time to encode enhanced layer. */ /* In/out : */ /* Return : Number of layer to code. */ /* Modified : */ /* */ /* ======================================================================== */ Int DetermineCodingLayer(VideoEncData *video, Int *nLayer, ULong modTime) { Vol **vol = video->vol; VideoEncParams *encParams = video->encParams; Int numLayers = encParams->nLayers; UInt modTimeRef = video->modTimeRef; float *LayerFrameRate = encParams->LayerFrameRate; UInt frameNum[4], frameTick; ULong frameModTime, nextFrmModTime; #ifdef REDUCE_FRAME_VARIANCE /* To limit how close 2 frames can be */ float frameInterval; #endif float srcFrameInterval; Int frameInc; Int i, extra_skip; Int encodeVop = 0; i = numLayers - 1; if (modTime - video->nextModTime > ((ULong)(-1)) >> 1) /* next time wrapped around */ return 0; /* not time to code it yet */ video->relLayerCodeTime[i] -= 1000; video->nextEncIVop--; /* number of Vops in highest layer resolution. */ video->numVopsInGOP++; /* from this point frameModTime and nextFrmModTime are internal */ frameNum[i] = (UInt)((modTime - modTimeRef) * LayerFrameRate[i] + 500) / 1000; if (video->volInitialize[i]) { video->prevFrameNum[i] = frameNum[i] - 1; } else if (frameNum[i] <= video->prevFrameNum[i]) { return 0; /* do not encode this frame */ } /**** this part computes expected next frame *******/ frameModTime = (ULong)(((frameNum[i] * 1000) / LayerFrameRate[i]) + modTimeRef + 0.5); /* rec. time */ nextFrmModTime = (ULong)((((frameNum[i] + 1) * 1000) / LayerFrameRate[i]) + modTimeRef + 0.5); /* rec. time */ srcFrameInterval = 1000 / video->FrameRate; video->nextModTime = nextFrmModTime - (ULong)(srcFrameInterval / 2.) - 1; /* between current and next frame */ #ifdef REDUCE_FRAME_VARIANCE /* To limit how close 2 frames can be */ frameInterval = 1000 / LayerFrameRate[i]; /* next rec. time */ delta = (Int)(frameInterval / 4); /* empirical number */ if (video->nextModTime - modTime < (ULong)delta) /* need to move nextModTime further. */ { video->nextModTime += ((delta - video->nextModTime + modTime)); /* empirical formula */ } #endif /****************************************************/ /* map frame no.to tick from modTimeRef */ /*frameTick = (frameNum[i]*vol[i]->timeIncrementResolution) ; frameTick = (UInt)((frameTick + (encParams->LayerFrameRate[i]/2))/encParams->LayerFrameRate[i]);*/ /* 11/16/01, change frameTick to be the closest tick from the actual modTime */ /* 12/12/02, add (double) to prevent large number wrap-around */ frameTick = (Int)(((double)(modTime - modTimeRef) * vol[i]->timeIncrementResolution + 500) / 1000); /* find timeIncrement to be put in the bitstream */ /* refTick is second boundary reference. */ vol[i]->timeIncrement = frameTick - video->refTick[i]; vol[i]->moduloTimeBase = 0; while (vol[i]->timeIncrement >= vol[i]->timeIncrementResolution) { vol[i]->timeIncrement -= vol[i]->timeIncrementResolution; vol[i]->moduloTimeBase++; /* do not update refTick and modTimeRef yet, do it after encoding!! */ } if (video->relLayerCodeTime[i] <= 0) /* no skipping */ { encodeVop = 1; video->currLayer = *nLayer = i; video->relLayerCodeTime[i] += 1000; /* takes care of more dropped frame than expected */ extra_skip = -1; frameInc = (frameNum[i] - video->prevFrameNum[i]); extra_skip += frameInc; if (extra_skip > 0) { /* update rc->Nr, rc->B, (rc->Rr)*/ video->nextEncIVop -= extra_skip; video->numVopsInGOP += extra_skip; if (encParams->RC_Type != CONSTANT_Q) { RC_UpdateBuffer(video, i, extra_skip); } } } /* update frame no. */ video->prevFrameNum[i] = frameNum[i]; /* go through all lower layer */ for (i = (numLayers - 2); i >= 0; i--) { video->relLayerCodeTime[i] -= 1000; /* find timeIncrement to be put in the bitstream */ vol[i]->timeIncrement = frameTick - video->refTick[i]; if (video->relLayerCodeTime[i] <= 0) /* time to encode base */ { /* 12/27/00 */ encodeVop = 1; video->currLayer = *nLayer = i; video->relLayerCodeTime[i] += (Int)((1000.0 * encParams->LayerFrameRate[numLayers-1]) / encParams->LayerFrameRate[i]); vol[i]->moduloTimeBase = 0; while (vol[i]->timeIncrement >= vol[i]->timeIncrementResolution) { vol[i]->timeIncrement -= vol[i]->timeIncrementResolution; vol[i]->moduloTimeBase++; /* do not update refTick and modTimeRef yet, do it after encoding!! */ } /* takes care of more dropped frame than expected */ frameNum[i] = (UInt)((frameModTime - modTimeRef) * encParams->LayerFrameRate[i] + 500) / 1000; if (video->volInitialize[i]) video->prevFrameNum[i] = frameNum[i] - 1; extra_skip = -1; frameInc = (frameNum[i] - video->prevFrameNum[i]); extra_skip += frameInc; if (extra_skip > 0) { /* update rc->Nr, rc->B, (rc->Rr)*/ if (encParams->RC_Type != CONSTANT_Q) { RC_UpdateBuffer(video, i, extra_skip); } } /* update frame no. */ video->prevFrameNum[i] = frameNum[i]; } } #ifdef _PRINT_STAT if (encodeVop) printf(" TI: %d ", vol[*nLayer]->timeIncrement); #endif return encodeVop; } /* ======================================================================== */ /* Function : DetermineVopType */ /* Date : 06/02/2001 */ /* Purpose : The name says it all. */ /* In/out : */ /* Return : void . */ /* Modified : */ /* */ /* ======================================================================== */ void DetermineVopType(VideoEncData *video, Int currLayer) { VideoEncParams *encParams = video->encParams; // Vol *currVol = video->vol[currLayer]; if (encParams->IntraPeriod == 0) /* I-VOPs only */ { if (video->currLayer > 0) video->currVop->predictionType = P_VOP; else { video->currVop->predictionType = I_VOP; if (video->numVopsInGOP >= 132) video->numVopsInGOP = 0; } } else if (encParams->IntraPeriod == -1) /* IPPPPP... */ { /* maintain frame type if previous frame is pre-skipped, 06/02/2001 */ if (encParams->RC_Type == CONSTANT_Q || video->rc[currLayer]->skip_next_frame != -1) video->currVop->predictionType = P_VOP; if (video->currLayer == 0) { if (/*video->numVopsInGOP>=132 || */video->volInitialize[currLayer]) { video->currVop->predictionType = I_VOP; video->numVopsInGOP = 0; /* force INTRA update every 132 base frames*/ video->nextEncIVop = 1; } else if (video->nextEncIVop == 0 || video->currVop->predictionType == I_VOP) { video->numVopsInGOP = 0; video->nextEncIVop = 1; } } } else /* IntraPeriod>0 : IPPPPPIPPPPPI... */ { /* maintain frame type if previous frame is pre-skipped, 06/02/2001 */ if (encParams->RC_Type == CONSTANT_Q || video->rc[currLayer]->skip_next_frame != -1) video->currVop->predictionType = P_VOP; if (currLayer == 0) { if (video->nextEncIVop <= 0 || video->currVop->predictionType == I_VOP) { video->nextEncIVop = encParams->IntraPeriod; video->currVop->predictionType = I_VOP; video->numVopsInGOP = 0; } } } return ; } /* ======================================================================== */ /* Function : UpdateSkipNextFrame */ /* Date : 06/02/2001 */ /* Purpose : From rate control frame skipping decision, update timing related parameters. */ /* In/out : */ /* Return : Current coded layer. */ /* Modified : */ /* */ /* ======================================================================== */ Int UpdateSkipNextFrame(VideoEncData *video, ULong *modTime, Int *size, PV_STATUS status) { Int currLayer = video->currLayer; Int nLayer = currLayer; VideoEncParams *encParams = video->encParams; Int numLayers = encParams->nLayers; Vol *currVol = video->vol[currLayer]; Vol **vol = video->vol; Int num_skip, extra_skip; Int i; UInt newRefTick, deltaModTime; UInt temp; if (encParams->RC_Type != CONSTANT_Q) { if (video->volInitialize[0] && currLayer == 0) /* always encode the first frame */ { RC_ResetSkipNextFrame(video, currLayer); //return currLayer; 09/15/05 } else { if (RC_GetSkipNextFrame(video, currLayer) < 0 || status == PV_END_OF_BUF) /* Skip Current Frame */ { #ifdef _PRINT_STAT printf("Skip current frame"); #endif currVol->moduloTimeBase = currVol->prevModuloTimeBase; /*********************/ /* prepare to return */ /*********************/ *size = 0; /* Set Bitstream buffer to zero */ /* Determine nLayer and modTime for next encode */ *modTime = video->nextModTime; nLayer = -1; return nLayer; /* return immediately without updating RefTick & modTimeRef */ /* If I-VOP was attempted, then ensure next base is I-VOP */ /*if((encParams->IntraPeriod>0) && (video->currVop->predictionType == I_VOP)) video->nextEncIVop = 0; commented out by 06/05/01 */ } else if ((num_skip = RC_GetSkipNextFrame(video, currLayer)) > 0) { #ifdef _PRINT_STAT printf("Skip next %d frames", num_skip); #endif /* to keep the Nr of enh layer the same */ /* adjust relLayerCodeTime only, do not adjust layerCodeTime[numLayers-1] */ extra_skip = 0; for (i = 0; i < currLayer; i++) { if (video->relLayerCodeTime[i] <= 1000) { extra_skip = 1; break; } } for (i = currLayer; i < numLayers; i++) { video->relLayerCodeTime[i] += (num_skip + extra_skip) * ((Int)((1000.0 * encParams->LayerFrameRate[numLayers-1]) / encParams->LayerFrameRate[i])); } } }/* first frame */ } /***** current frame is encoded, now update refTick ******/ video->refTick[currLayer] += vol[currLayer]->prevModuloTimeBase * vol[currLayer]->timeIncrementResolution; /* Reset layerCodeTime every I-VOP to prevent overflow */ if (currLayer == 0) { /* 12/12/02, fix for weird targer frame rate of 9.99 fps or 3.33 fps */ if (((encParams->IntraPeriod != 0) /*&& (video->currVop->predictionType==I_VOP)*/) || ((encParams->IntraPeriod == 0) && (video->numVopsInGOP == 0))) { newRefTick = video->refTick[0]; for (i = 1; i < numLayers; i++) { if (video->refTick[i] < newRefTick) newRefTick = video->refTick[i]; } /* check to make sure that the update is integer multiple of frame number */ /* how many msec elapsed from last modTimeRef */ deltaModTime = (newRefTick / vol[0]->timeIncrementResolution) * 1000; for (i = numLayers - 1; i >= 0; i--) { temp = (UInt)(deltaModTime * encParams->LayerFrameRate[i]); /* 12/12/02 */ if (temp % 1000) newRefTick = 0; } if (newRefTick > 0) { video->modTimeRef += deltaModTime; for (i = numLayers - 1; i >= 0; i--) { video->prevFrameNum[i] -= (UInt)(deltaModTime * encParams->LayerFrameRate[i]) / 1000; video->refTick[i] -= newRefTick; } } } } *modTime = video->nextModTime; return nLayer; } #ifndef ORIGINAL_VERSION /* ======================================================================== */ /* Function : SetProfile_BufferSize */ /* Date : 04/08/2002 */ /* Purpose : Set profile and video buffer size, copied from Jim's code */ /* in PVInitVideoEncoder(.), since we have different places */ /* to reset profile and video buffer size */ /* In/out : */ /* Return : */ /* Modified : */ /* */ /* ======================================================================== */ Bool SetProfile_BufferSize(VideoEncData *video, float delay, Int bInitialized) { Int i, j, start, end; // Int BaseMBsPerSec = 0, EnhMBsPerSec = 0; Int nTotalMB = 0; Int idx, temp_w, temp_h, max = 0, max_width, max_height; Int nLayers = video->encParams->nLayers; /* Number of Layers to be encoded */ Int total_bitrate = 0, base_bitrate; Int total_packet_size = 0, base_packet_size; Int total_MBsPerSec = 0, base_MBsPerSec; Int total_VBV_size = 0, base_VBV_size, enhance_VBV_size = 0; float total_framerate, base_framerate; float upper_bound_ratio; Int bFound = 0; Int k = 0, width16, height16, index; Int lowest_level; #define MIN_BUFF 16000 /* 16k minimum buffer size */ #define BUFF_CONST 2.0 /* 2000ms */ #define UPPER_BOUND_RATIO 8.54 /* upper_bound = 1.4*(1.1+bound/10)*bitrate/framerate */ #define QCIF_WIDTH 176 #define QCIF_HEIGHT 144 index = video->encParams->profile_table_index; /* Calculate "nTotalMB" */ /* Find the maximum width*height for memory allocation of the VOPs */ for (idx = 0; idx < nLayers; idx++) { temp_w = video->encParams->LayerWidth[idx]; temp_h = video->encParams->LayerHeight[idx]; if ((temp_w*temp_h) > max) { max = temp_w * temp_h; max_width = temp_w; max_height = temp_h; nTotalMB = ((max_width + 15) >> 4) * ((max_height + 15) >> 4); } } upper_bound_ratio = (video->encParams->RC_Type == CBR_LOWDELAY ? (float)5.0 : (float)UPPER_BOUND_RATIO); /* Get the basic information: bitrate, packet_size, MBs/s and VBV_size */ base_bitrate = video->encParams->LayerBitRate[0]; if (video->encParams->LayerMaxBitRate[0] != 0) /* video->encParams->LayerMaxBitRate[0] == 0 means it has not been set */ { base_bitrate = PV_MAX(base_bitrate, video->encParams->LayerMaxBitRate[0]); } else /* if the max is not set, set it to the specified profile/level */ { video->encParams->LayerMaxBitRate[0] = profile_level_max_bitrate[index]; } base_framerate = video->encParams->LayerFrameRate[0]; if (video->encParams->LayerMaxFrameRate[0] != 0) { base_framerate = PV_MAX(base_framerate, video->encParams->LayerMaxFrameRate[0]); } else /* if the max is not set, set it to the specified profile/level */ { video->encParams->LayerMaxFrameRate[0] = (float)profile_level_max_mbsPerSec[index] / nTotalMB; } base_packet_size = video->encParams->ResyncPacketsize; base_MBsPerSec = (Int)(base_framerate * nTotalMB); base_VBV_size = PV_MAX((Int)(base_bitrate * delay), (Int)(upper_bound_ratio * base_bitrate / base_framerate)); base_VBV_size = PV_MAX(base_VBV_size, MIN_BUFF); /* if the buffer is larger than maximum buffer size, we'll clip it */ if (base_VBV_size > profile_level_max_VBV_size[SIMPLE_PROFILE_LEVEL5]) base_VBV_size = profile_level_max_VBV_size[SIMPLE_PROFILE_LEVEL5]; /* Check if the buffer exceeds the maximum buffer size given the maximum profile and level */ if (nLayers == 1 && base_VBV_size > profile_level_max_VBV_size[index]) return FALSE; if (nLayers == 2) /* check both enhanced and base layer */ { total_bitrate = video->encParams->LayerBitRate[1]; if (video->encParams->LayerMaxBitRate[1] != 0) { total_bitrate = PV_MIN(total_bitrate, video->encParams->LayerMaxBitRate[1]); } else /* if the max is not set, set it to the specified profile/level */ { video->encParams->LayerMaxBitRate[1] = scalable_profile_level_max_bitrate[index]; } total_framerate = video->encParams->LayerFrameRate[1]; if (video->encParams->LayerMaxFrameRate[1] != 0) { total_framerate = PV_MIN(total_framerate, video->encParams->LayerMaxFrameRate[1]); } else /* if the max is not set, set it to the specified profile/level */ { video->encParams->LayerMaxFrameRate[1] = (float)scalable_profile_level_max_mbsPerSec[index] / nTotalMB; } total_packet_size = video->encParams->ResyncPacketsize; total_MBsPerSec = (Int)(total_framerate * nTotalMB); enhance_VBV_size = PV_MAX((Int)((total_bitrate - base_bitrate) * delay), (Int)(upper_bound_ratio * (total_bitrate - base_bitrate) / (total_framerate - base_framerate))); enhance_VBV_size = PV_MAX(enhance_VBV_size, MIN_BUFF); total_VBV_size = base_VBV_size + enhance_VBV_size; /* if the buffer is larger than maximum buffer size, we'll clip it */ if (total_VBV_size > scalable_profile_level_max_VBV_size[CORE_SCALABLE_PROFILE_LEVEL3 - MAX_BASE_PROFILE - 1]) { total_VBV_size = scalable_profile_level_max_VBV_size[CORE_SCALABLE_PROFILE_LEVEL3 - MAX_BASE_PROFILE - 1]; enhance_VBV_size = total_VBV_size - base_VBV_size; } /* Check if the buffer exceeds the maximum buffer size given the maximum profile and level */ if (total_VBV_size > scalable_profile_level_max_VBV_size[index]) return FALSE; } if (!bInitialized) /* Has been initialized --> profile @ level has been figured out! */ { video->encParams->BufferSize[0] = base_VBV_size; if (nLayers > 1) video->encParams->BufferSize[1] = enhance_VBV_size; return PV_TRUE; } /* Profile @ level determination */ if (nLayers == 1) { /* check other parameters */ /* BASE ONLY : Simple Profile(SP) Or Core Profile(CP) */ if (base_bitrate > profile_level_max_bitrate[index] || base_packet_size > profile_level_max_packet_size[index] || base_MBsPerSec > profile_level_max_mbsPerSec[index] || base_VBV_size > profile_level_max_VBV_size[index]) return PV_FALSE; /* Beyond the bound of Core Profile @ Level2 */ /* For H263/Short header, determine k*16384 */ /* This part only applies to Short header mode, but not H.263 */ width16 = ((video->encParams->LayerWidth[0] + 15) >> 4) << 4; height16 = ((video->encParams->LayerHeight[0] + 15) >> 4) << 4; if (video->encParams->H263_Enabled) { k = 4; if (width16 == 2*QCIF_WIDTH && height16 == 2*QCIF_HEIGHT) /* CIF */ k = 16; else if (width16 == 4*QCIF_WIDTH && height16 == 4*QCIF_HEIGHT) /* 4CIF */ k = 32; else if (width16 == 8*QCIF_WIDTH && height16 == 8*QCIF_HEIGHT) /* 16CIF */ k = 64; video->encParams->maxFrameSize = k * 16384; /* Make sure the buffer size is limited to the top profile and level: the Core profile and level 2 */ /* AGI RCS 08/12/09 */ if (base_VBV_size > (Int)(video->encParams->maxFrameSize + video->encParams->VBV_delay*(float)profile_level_max_bitrate[SIMPLE_PROFILE_LEVEL5])) base_VBV_size = (Int)(video->encParams->maxFrameSize + video->encParams->VBV_delay*(float)profile_level_max_bitrate[SIMPLE_PROFILE_LEVEL5]); if (base_VBV_size > (Int)(video->encParams->maxFrameSize + video->encParams->VBV_delay*(float)profile_level_max_bitrate[index])) return PV_FALSE; } /* Search the appropriate profile@level index */ if (!video->encParams->H263_Enabled && (video->encParams->IntraDCVlcThr != 0 || video->encParams->SearchRange > 16)) { lowest_level = SIMPLE_PROFILE_LEVEL1; /* cannot allow SPL0 */ } else { lowest_level = SIMPLE_PROFILE_LEVEL0; /* SPL0 */ } for (i = lowest_level; i <= index; i++) { /* Since CPL1 is smaller than SPL4A, SPL5, this search favors Simple Profile. */ if (base_bitrate <= profile_level_max_bitrate[i] && base_packet_size <= profile_level_max_packet_size[i] && base_MBsPerSec <= profile_level_max_mbsPerSec[i] && /* AGI RCS 08/12/09 */ base_VBV_size <= (video->encParams->H263_Enabled ? (Int)(k*16384 + video->encParams->VBV_delay*(float)profile_level_max_bitrate[i]) : profile_level_max_VBV_size[i])) break; } if (i > index) return PV_FALSE; /* Nothing found!! */ /* Found out the actual profile @ level : index "i" */ if (i == 0) { /* For Simple Profile @ Level 0, we need to do one more check: image size <= QCIF */ if (width16 > QCIF_WIDTH || height16 > QCIF_HEIGHT) i = 1; /* image size > QCIF, then set SP level1 */ } video->encParams->ProfileLevel[0] = profile_level_code[i]; video->encParams->BufferSize[0] = base_VBV_size; if (video->encParams->LayerMaxBitRate[0] == 0) video->encParams->LayerMaxBitRate[0] = profile_level_max_bitrate[i]; if (video->encParams->LayerMaxFrameRate[0] == 0) video->encParams->LayerMaxFrameRate[0] = PV_MIN(30, (float)profile_level_max_mbsPerSec[i] / nTotalMB); /* For H263/Short header, one special constraint for VBV buffer size */ if (video->encParams->H263_Enabled) /* AGI RCS 08/12/09 */ video->encParams->BufferSize[0] = (Int)(k * 16384 + 4 * (float)profile_level_max_bitrate[i] * (video->encParams->TimeIncrementRes / video->FrameRate) / video->encParams->TimeIncrementRes); } else { /* SCALABALE MODE: Simple Scalable Profile(SSP) Or Core Scalable Profile(CSP) */ if (total_bitrate > scalable_profile_level_max_bitrate[index] || total_packet_size > scalable_profile_level_max_packet_size[index] || total_MBsPerSec > scalable_profile_level_max_mbsPerSec[index] || total_VBV_size > scalable_profile_level_max_VBV_size[index]) return PV_FALSE; /* Beyond given profile and level */ /* One-time check: Simple Scalable Profile or Core Scalable Profile */ if (total_bitrate <= scalable_profile_level_max_bitrate[CORE_SCALABLE_PROFILE_LEVEL1 - MAX_BASE_PROFILE - 1] && total_packet_size <= scalable_profile_level_max_packet_size[CORE_SCALABLE_PROFILE_LEVEL1 - MAX_BASE_PROFILE - 1] && total_MBsPerSec <= scalable_profile_level_max_mbsPerSec[CORE_SCALABLE_PROFILE_LEVEL1 - MAX_BASE_PROFILE - 1] && total_VBV_size <= scalable_profile_level_max_VBV_size[CORE_SCALABLE_PROFILE_LEVEL1 - MAX_BASE_PROFILE - 1]) { start = 0; end = index; } else { start = CORE_SCALABLE_PROFILE_LEVEL1 - MAX_BASE_PROFILE - 1; end = index; } /* Search the scalable profile */ for (i = start; i <= end; i++) { if (total_bitrate <= scalable_profile_level_max_bitrate[i] && total_packet_size <= scalable_profile_level_max_packet_size[i] && total_MBsPerSec <= scalable_profile_level_max_mbsPerSec[i] && total_VBV_size <= scalable_profile_level_max_VBV_size[i]) break; } if (i > end) return PV_FALSE; /* Search for matching base profile */ if (i == 0) { j = 0; bFound = 1; } else bFound = 0; if (i >= CORE_SCALABLE_PROFILE_LEVEL1 - MAX_BASE_PROFILE - 1) { start = CORE_PROFILE_LEVEL1; /* range for CORE PROFILE */ end = CORE_PROFILE_LEVEL2; } else { start = SIMPLE_PROFILE_LEVEL0; /* range for SIMPLE PROFILE */ end = SIMPLE_PROFILE_LEVEL5; } for (j = start; !bFound && j <= end; j++) { if (base_bitrate <= profile_level_max_bitrate[j] && base_packet_size <= profile_level_max_packet_size[j] && base_MBsPerSec <= profile_level_max_mbsPerSec[j] && base_VBV_size <= profile_level_max_VBV_size[j]) { bFound = 1; break; } } if (!bFound) // && start == 4) return PV_FALSE; /* mis-match in the profiles between base layer and enhancement layer */ /* j for base layer, i for enhancement layer */ video->encParams->ProfileLevel[0] = profile_level_code[j]; video->encParams->ProfileLevel[1] = scalable_profile_level_code[i]; video->encParams->BufferSize[0] = base_VBV_size; video->encParams->BufferSize[1] = enhance_VBV_size; if (video->encParams->LayerMaxBitRate[0] == 0) video->encParams->LayerMaxBitRate[0] = profile_level_max_bitrate[j]; if (video->encParams->LayerMaxBitRate[1] == 0) video->encParams->LayerMaxBitRate[1] = scalable_profile_level_max_bitrate[i]; if (video->encParams->LayerMaxFrameRate[0] == 0) video->encParams->LayerMaxFrameRate[0] = PV_MIN(30, (float)profile_level_max_mbsPerSec[j] / nTotalMB); if (video->encParams->LayerMaxFrameRate[1] == 0) video->encParams->LayerMaxFrameRate[1] = PV_MIN(30, (float)scalable_profile_level_max_mbsPerSec[i] / nTotalMB); } /* end of: if(nLayers == 1) */ if (!video->encParams->H263_Enabled && (video->encParams->ProfileLevel[0] == 0x08)) /* SPL0 restriction*/ { /* PV only allow frame-based rate control, no QP change from one MB to another if(video->encParams->ACDCPrediction == TRUE && MB-based rate control) return PV_FALSE */ } return PV_TRUE; } #endif /* #ifndef ORIGINAL_VERSION */ ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/mp4enc_api.cpp.original ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2010 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "mp4enc_lib.h" #include "bitstream_io.h" #include "rate_control.h" #include "m4venc_oscl.h" /* Inverse normal zigzag */ const static Int zigzag_i[NCOEFF_BLOCK] = { 0, 1, 8, 16, 9, 2, 3, 10, 17, 24, 32, 25, 18, 11, 4, 5, 12, 19, 26, 33, 40, 48, 41, 34, 27, 20, 13, 6, 7, 14, 21, 28, 35, 42, 49, 56, 57, 50, 43, 36, 29, 22, 15, 23, 30, 37, 44, 51, 58, 59, 52, 45, 38, 31, 39, 46, 53, 60, 61, 54, 47, 55, 62, 63 }; /* INTRA */ const static Int mpeg_iqmat_def[NCOEFF_BLOCK] = { 8, 17, 18, 19, 21, 23, 25, 27, 17, 18, 19, 21, 23, 25, 27, 28, 20, 21, 22, 23, 24, 26, 28, 30, 21, 22, 23, 24, 26, 28, 30, 32, 22, 23, 24, 26, 28, 30, 32, 35, 23, 24, 26, 28, 30, 32, 35, 38, 25, 26, 28, 30, 32, 35, 38, 41, 27, 28, 30, 32, 35, 38, 41, 45 }; /* INTER */ const static Int mpeg_nqmat_def[64] = { 16, 17, 18, 19, 20, 21, 22, 23, 17, 18, 19, 20, 21, 22, 23, 24, 18, 19, 20, 21, 22, 23, 24, 25, 19, 20, 21, 22, 23, 24, 26, 27, 20, 21, 22, 23, 25, 26, 27, 28, 21, 22, 23, 24, 26, 27, 28, 30, 22, 23, 24, 26, 27, 28, 30, 31, 23, 24, 25, 27, 28, 30, 31, 33 }; /* Profiles and levels */ /* Simple profile(level 0-3) and Core profile (level 1-2) */ /* {SPL0, SPL1, SPL2, SPL3, SPL4a, SPL5, CPL1, CPL2} , SPL0: Simple Profile@Level0, CPL1: Core Profile@Level1 */ const static Int profile_level_code[MAX_BASE_PROFILE+1] = { 0x08, 0x01, 0x02, 0x03, 0x04, 0x05, 0x21, 0x22 }; const static Int profile_level_max_bitrate[MAX_BASE_PROFILE+1] = { 64000, 64000, 128000, 384000, 4000000, 8000000, 384000, 2000000 }; const static Int profile_level_max_packet_size[MAX_BASE_PROFILE+1] = { 2048, 2048, 4096, 8192, 16384, 16384, 4096, 8192 }; const static Int profile_level_max_mbsPerSec[MAX_BASE_PROFILE+1] = { 1485, 1485, 5940, 11880, 36000, 40500, 5940, 23760 }; const static Int profile_level_max_VBV_size[MAX_BASE_PROFILE+1] = { 163840, 163840, 655360, 655360, 1310720, 1835008, 262144, 1310720 }; /* Scalable profiles for nLayers = 2 */ /* Simple scalable profile (level 0-2) and Core scalable profile (level 1-3) */ /* {SSPL0, SSPL1, SSPL2, CSPL1, CSPL2, CSPL3} , SSPL0: Simple Scalable Profile@Level0, CSPL1: Core Scalable Profile@Level1, the fourth is redundant for easy table manipulation */ const static Int scalable_profile_level_code[MAX_SCALABLE_PROFILE - MAX_BASE_PROFILE] = { 0x10, 0x11, 0x12, 0xA1, 0xA2, 0xA3 }; const static Int scalable_profile_level_max_bitrate[MAX_SCALABLE_PROFILE - MAX_BASE_PROFILE] = { 128000, 128000, 256000, 768000, 1500000, 4000000 }; /* in bits */ const static Int scalable_profile_level_max_packet_size[MAX_SCALABLE_PROFILE - MAX_BASE_PROFILE] = { 2048, 2048, 4096, 4096, 4096, 16384 }; const static Int scalable_profile_level_max_mbsPerSec[MAX_SCALABLE_PROFILE - MAX_BASE_PROFILE] = { 1485, 7425, 23760, 14850, 29700, 120960 }; const static Int scalable_profile_level_max_VBV_size[MAX_SCALABLE_PROFILE - MAX_BASE_PROFILE] = { 163840, 655360, 655360, 1048576, 1310720, 1310720 }; /* H263 profile 0 @ level 10-70 */ const static Int h263Level[8] = {0, 10, 20, 30, 40, 50, 60, 70}; const static float rBR_bound[8] = {0, 1, 2, 6, 32, 64, 128, 256}; const static float max_h263_framerate[2] = {(float)30000 / (float)2002, (float)30000 / (float)1001 }; const static Int max_h263_width[2] = {176, 352}; const static Int max_h263_height[2] = {144, 288}; /* 6/2/2001, newly added functions to make PVEncodeVop more readable. */ Int DetermineCodingLayer(VideoEncData *video, Int *nLayer, ULong modTime); void DetermineVopType(VideoEncData *video, Int currLayer); Int UpdateSkipNextFrame(VideoEncData *video, ULong *modTime, Int *size, PV_STATUS status); Bool SetProfile_BufferSize(VideoEncData *video, float delay, Int bInitialized); #ifdef PRINT_RC_INFO extern FILE *facct; extern int tiTotalNumBitsGenerated; extern int iStuffBits; #endif #ifdef PRINT_EC extern FILE *fec; #endif /* ======================================================================== */ /* Function : PVGetDefaultEncOption() */ /* Date : 12/12/2005 */ /* Purpose : */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVGetDefaultEncOption(VideoEncOptions *encOption, Int encUseCase) { VideoEncOptions defaultUseCase = {H263_MODE, profile_level_max_packet_size[SIMPLE_PROFILE_LEVEL0] >> 3, SIMPLE_PROFILE_LEVEL0, PV_OFF, 0, 1, 1000, 33, {144, 144}, {176, 176}, {15, 30}, {64000, 128000}, {10, 10}, {12, 12}, {0, 0}, CBR_1, 0.0, PV_OFF, -1, 0, PV_OFF, 16, PV_OFF, 0, PV_ON }; OSCL_UNUSED_ARG(encUseCase); // unused for now. Later we can add more defaults setting and use this // argument to select the right one. /* in the future we can create more meaningful use-cases */ if (encOption == NULL) { return PV_FALSE; } M4VENC_MEMCPY(encOption, &defaultUseCase, sizeof(VideoEncOptions)); return PV_TRUE; } /* ======================================================================== */ /* Function : PVInitVideoEncoder() */ /* Date : 08/22/2000 */ /* Purpose : Initialization of MP4 Encoder and VO bitstream */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : 5/21/01, allocate only yChan and assign uChan & vChan */ /* 12/12/05, add encoding option as input argument */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVInitVideoEncoder(VideoEncControls *encoderControl, VideoEncOptions *encOption) { Bool status = PV_TRUE; Int nLayers, idx, i, j; Int max = 0, max_width = 0, max_height = 0, pitch, offset; Int size = 0, nTotalMB = 0; VideoEncData *video; Vol *pVol; VideoEncParams *pEncParams; Int temp_w, temp_h, mbsPerSec; /******************************************/ /* this part use to be PVSetEncode() */ Int profile_table_index, *profile_level_table; Int profile_level = encOption->profile_level; Int PacketSize = encOption->packetSize << 3; Int timeInc, timeIncRes; float profile_max_framerate; VideoEncParams *encParams; if (encoderControl->videoEncoderData) /* this has been called */ { if (encoderControl->videoEncoderInit) /* check if PVInitVideoEncoder() has been called */ { PVCleanUpVideoEncoder(encoderControl); encoderControl->videoEncoderInit = 0; } M4VENC_FREE(encoderControl->videoEncoderData); encoderControl->videoEncoderData = NULL; } encoderControl->videoEncoderInit = 0; /* reset this value */ video = (VideoEncData *)M4VENC_MALLOC(sizeof(VideoEncData)); /* allocate memory for encData */ if (video == NULL) return PV_FALSE; M4VENC_MEMSET(video, 0, sizeof(VideoEncData)); encoderControl->videoEncoderData = (void *) video; /* set up pointer in VideoEncData structure */ video->encParams = (VideoEncParams *)M4VENC_MALLOC(sizeof(VideoEncParams)); if (video->encParams == NULL) goto CLEAN_UP; M4VENC_MEMSET(video->encParams, 0, sizeof(VideoEncParams)); encParams = video->encParams; encParams->nLayers = encOption->numLayers; /* Check whether the input packetsize is valid (Note: put code here (before any memory allocation) in order to avoid memory leak */ if ((Int)profile_level <= (Int)(MAX_BASE_PROFILE)) /* non-scalable profile */ { profile_level_table = (Int *)profile_level_max_packet_size; profile_table_index = (Int)profile_level; if (encParams->nLayers != 1) { goto CLEAN_UP; } encParams->LayerMaxMbsPerSec[0] = profile_level_max_mbsPerSec[profile_table_index]; } else /* scalable profile */ { profile_level_table = (Int *)scalable_profile_level_max_packet_size; profile_table_index = (Int)profile_level - (Int)(MAX_BASE_PROFILE) - 1; if (encParams->nLayers < 2) { goto CLEAN_UP; } for (i = 0; i < encParams->nLayers; i++) { encParams->LayerMaxMbsPerSec[i] = scalable_profile_level_max_mbsPerSec[profile_table_index]; } } /* cannot have zero size packet with these modes */ if (PacketSize == 0) { if (encOption->encMode == DATA_PARTITIONING_MODE) { goto CLEAN_UP; } if (encOption->encMode == COMBINE_MODE_WITH_ERR_RES) { encOption->encMode = COMBINE_MODE_NO_ERR_RES; } } if (encOption->gobHeaderInterval == 0) { if (encOption->encMode == H263_MODE_WITH_ERR_RES) { encOption->encMode = H263_MODE; } if (encOption->encMode == SHORT_HEADER_WITH_ERR_RES) { encOption->encMode = SHORT_HEADER; } } if (PacketSize > profile_level_table[profile_table_index]) goto CLEAN_UP; /* Initial Defaults for all Modes */ encParams->SequenceStartCode = 1; encParams->GOV_Enabled = 0; encParams->RoundingType = 0; encParams->IntraDCVlcThr = PV_MAX(PV_MIN(encOption->intraDCVlcTh, 7), 0); encParams->ACDCPrediction = ((encOption->useACPred == PV_ON) ? TRUE : FALSE); encParams->RC_Type = encOption->rcType; encParams->Refresh = encOption->numIntraMB; encParams->ResyncMarkerDisable = 0; /* Enable Resync Marker */ for (i = 0; i < encOption->numLayers; i++) { #ifdef NO_MPEG_QUANT encParams->QuantType[i] = 0; #else encParams->QuantType[i] = encOption->quantType[i]; /* H263 */ #endif if (encOption->pQuant[i] >= 1 && encOption->pQuant[i] <= 31) { encParams->InitQuantPvop[i] = encOption->pQuant[i]; } else { goto CLEAN_UP; } if (encOption->iQuant[i] >= 1 && encOption->iQuant[i] <= 31) { encParams->InitQuantIvop[i] = encOption->iQuant[i]; } else { goto CLEAN_UP; } } encParams->HalfPel_Enabled = 1; encParams->SearchRange = encOption->searchRange; /* 4/16/2001 */ encParams->FullSearch_Enabled = 0; #ifdef NO_INTER4V encParams->MV8x8_Enabled = 0; #else encParams->MV8x8_Enabled = 0;// comment out for now!! encOption->mv8x8Enable; #endif encParams->H263_Enabled = 0; encParams->GOB_Header_Interval = 0; // need to be reset to 0 encParams->IntraPeriod = encOption->intraPeriod; /* Intra update period update default*/ encParams->SceneChange_Det = encOption->sceneDetect; encParams->FineFrameSkip_Enabled = 0; encParams->NoFrameSkip_Enabled = encOption->noFrameSkipped; encParams->NoPreSkip_Enabled = encOption->noFrameSkipped; encParams->GetVolHeader[0] = 0; encParams->GetVolHeader[1] = 0; encParams->ResyncPacketsize = encOption->packetSize << 3; encParams->LayerMaxBitRate[0] = 0; encParams->LayerMaxBitRate[1] = 0; encParams->LayerMaxFrameRate[0] = (float)0.0; encParams->LayerMaxFrameRate[1] = (float)0.0; encParams->VBV_delay = encOption->vbvDelay; /* 2sec VBV buffer size */ switch (encOption->encMode) { case SHORT_HEADER: case SHORT_HEADER_WITH_ERR_RES: /* From Table 6-26 */ encParams->nLayers = 1; encParams->QuantType[0] = 0; /*H263 */ encParams->ResyncMarkerDisable = 1; /* Disable Resync Marker */ encParams->DataPartitioning = 0; /* Combined Mode */ encParams->ReversibleVLC = 0; /* Disable RVLC */ encParams->RoundingType = 0; encParams->IntraDCVlcThr = 7; /* use_intra_dc_vlc = 0 */ encParams->MV8x8_Enabled = 0; encParams->GOB_Header_Interval = encOption->gobHeaderInterval; encParams->H263_Enabled = 2; encParams->GOV_Enabled = 0; encParams->TimeIncrementRes = 30000; /* timeIncrementRes for H263 */ break; case H263_MODE: case H263_MODE_WITH_ERR_RES: /* From Table 6-26 */ encParams->nLayers = 1; encParams->QuantType[0] = 0; /*H263 */ encParams->ResyncMarkerDisable = 1; /* Disable Resync Marker */ encParams->DataPartitioning = 0; /* Combined Mode */ encParams->ReversibleVLC = 0; /* Disable RVLC */ encParams->RoundingType = 0; encParams->IntraDCVlcThr = 7; /* use_intra_dc_vlc = 0 */ encParams->MV8x8_Enabled = 0; encParams->H263_Enabled = 1; encParams->GOV_Enabled = 0; encParams->TimeIncrementRes = 30000; /* timeIncrementRes for H263 */ break; #ifndef H263_ONLY case DATA_PARTITIONING_MODE: encParams->DataPartitioning = 1; /* Base Layer Data Partitioning */ encParams->ResyncMarkerDisable = 0; /* Resync Marker */ #ifdef NO_RVLC encParams->ReversibleVLC = 0; #else encParams->ReversibleVLC = (encOption->rvlcEnable == PV_ON); /* RVLC when Data Partitioning */ #endif encParams->ResyncPacketsize = PacketSize; break; case COMBINE_MODE_WITH_ERR_RES: encParams->DataPartitioning = 0; /* Combined Mode */ encParams->ResyncMarkerDisable = 0; /* Resync Marker */ encParams->ReversibleVLC = 0; /* No RVLC */ encParams->ResyncPacketsize = PacketSize; break; case COMBINE_MODE_NO_ERR_RES: encParams->DataPartitioning = 0; /* Combined Mode */ encParams->ResyncMarkerDisable = 1; /* Disable Resync Marker */ encParams->ReversibleVLC = 0; /* No RVLC */ break; #endif default: goto CLEAN_UP; } /* Set the constraints (maximum values) according to the input profile and level */ /* Note that profile_table_index is already figured out above */ /* base layer */ encParams->profile_table_index = profile_table_index; /* Used to limit the profile and level in SetProfile_BufferSize() */ /* check timeIncRes */ timeIncRes = encOption->timeIncRes; timeInc = encOption->tickPerSrc; if ((timeIncRes >= 1) && (timeIncRes <= 65536) && (timeInc < timeIncRes) && (timeInc != 0)) { if (!encParams->H263_Enabled) { encParams->TimeIncrementRes = timeIncRes; } else { encParams->TimeIncrementRes = 30000; // video->FrameRate = 30000/(float)1001; /* fix it to 29.97 fps */ } video->FrameRate = timeIncRes / ((float)timeInc); } else { goto CLEAN_UP; } /* check frame dimension */ if (encParams->H263_Enabled) { switch (encOption->encWidth[0]) { case 128: if (encOption->encHeight[0] != 96) /* source_format = 1 */ goto CLEAN_UP; break; case 176: if (encOption->encHeight[0] != 144) /* source_format = 2 */ goto CLEAN_UP; break; case 352: if (encOption->encHeight[0] != 288) /* source_format = 2 */ goto CLEAN_UP; break; case 704: if (encOption->encHeight[0] != 576) /* source_format = 2 */ goto CLEAN_UP; break; case 1408: if (encOption->encHeight[0] != 1152) /* source_format = 2 */ goto CLEAN_UP; break; default: goto CLEAN_UP; } } for (i = 0; i < encParams->nLayers; i++) { encParams->LayerHeight[i] = encOption->encHeight[i]; encParams->LayerWidth[i] = encOption->encWidth[i]; } /* check frame rate */ for (i = 0; i < encParams->nLayers; i++) { encParams->LayerFrameRate[i] = encOption->encFrameRate[i]; } if (encParams->nLayers > 1) { if (encOption->encFrameRate[0] == encOption->encFrameRate[1] || encOption->encFrameRate[0] == 0. || encOption->encFrameRate[1] == 0.) /* 7/31/03 */ goto CLEAN_UP; } /* set max frame rate */ for (i = 0; i < encParams->nLayers; i++) { /* Make sure the maximum framerate is consistent with the given profile and level */ nTotalMB = ((encParams->LayerWidth[i] + 15) / 16) * ((encParams->LayerHeight[i] + 15) / 16); if (nTotalMB > 0) profile_max_framerate = (float)encParams->LayerMaxMbsPerSec[i] / (float)nTotalMB; else profile_max_framerate = (float)30.0; encParams->LayerMaxFrameRate[i] = PV_MIN(profile_max_framerate, encParams->LayerFrameRate[i]); } /* check bit rate */ /* set max bit rate */ for (i = 0; i < encParams->nLayers; i++) { encParams->LayerBitRate[i] = encOption->bitRate[i]; encParams->LayerMaxBitRate[i] = encOption->bitRate[i]; } if (encParams->nLayers > 1) { if (encOption->bitRate[0] == encOption->bitRate[1] || encOption->bitRate[0] == 0 || encOption->bitRate[1] == 0) /* 7/31/03 */ goto CLEAN_UP; } /* check rate control and vbv delay*/ encParams->RC_Type = encOption->rcType; if (encOption->vbvDelay == 0.0) /* set to default */ { switch (encOption->rcType) { case CBR_1: case CBR_2: encParams->VBV_delay = (float)2.0; /* default 2sec VBV buffer size */ break; case CBR_LOWDELAY: encParams->VBV_delay = (float)0.5; /* default 0.5sec VBV buffer size */ break; case VBR_1: case VBR_2: encParams->VBV_delay = (float)10.0; /* default 10sec VBV buffer size */ break; default: break; } } else /* force this value */ { encParams->VBV_delay = encOption->vbvDelay; } /* check search range */ if (encParams->H263_Enabled && encOption->searchRange > 16) { encParams->SearchRange = 16; /* 4/16/2001 */ } /*****************************************/ /* checking for conflict between options */ /*****************************************/ if (video->encParams->RC_Type == CBR_1 || video->encParams->RC_Type == CBR_2 || video->encParams->RC_Type == CBR_LOWDELAY) /* if CBR */ { #ifdef _PRINT_STAT if (video->encParams->NoFrameSkip_Enabled == PV_ON || video->encParams->NoPreSkip_Enabled == PV_ON) /* don't allow frame skip*/ printf("WARNING!!!! CBR with NoFrameSkip\n"); #endif } else if (video->encParams->RC_Type == CONSTANT_Q) /* constant_Q */ { video->encParams->NoFrameSkip_Enabled = PV_ON; /* no frame skip */ video->encParams->NoPreSkip_Enabled = PV_ON; /* no frame skip */ #ifdef _PRINT_STAT printf("Turn on NoFrameSkip\n"); #endif } if (video->encParams->NoFrameSkip_Enabled == PV_ON) /* if no frame skip */ { video->encParams->FineFrameSkip_Enabled = PV_OFF; #ifdef _PRINT_STAT printf("NoFrameSkip !!! may violate VBV_BUFFER constraint.\n"); printf("Turn off FineFrameSkip\n"); #endif } /******************************************/ /******************************************/ nLayers = video->encParams->nLayers; /* Number of Layers to be encoded */ /* Find the maximum width*height for memory allocation of the VOPs */ for (idx = 0; idx < nLayers; idx++) { temp_w = video->encParams->LayerWidth[idx]; temp_h = video->encParams->LayerHeight[idx]; if ((temp_w*temp_h) > max) { max = temp_w * temp_h; max_width = ((temp_w + 15) >> 4) << 4; max_height = ((temp_h + 15) >> 4) << 4; nTotalMB = ((max_width * max_height) >> 8); } /* Check if the video size and framerate(MBsPerSec) are vald */ mbsPerSec = (Int)(nTotalMB * video->encParams->LayerFrameRate[idx]); if (mbsPerSec > video->encParams->LayerMaxMbsPerSec[idx]) status = PV_FALSE; } /****************************************************/ /* Set Profile and Video Buffer Size for each layer */ /****************************************************/ if (video->encParams->RC_Type == CBR_LOWDELAY) video->encParams->VBV_delay = 0.5; /* For CBR_LOWDELAY, we set 0.5sec buffer */ status = SetProfile_BufferSize(video, video->encParams->VBV_delay, 1); if (status != PV_TRUE) goto CLEAN_UP; /****************************************/ /* memory allocation and initialization */ /****************************************/ if (video == NULL) goto CLEAN_UP; /* cyclic reference for passing through both structures */ video->videoEncControls = encoderControl; //video->currLayer = 0; /* Set current Layer to 0 */ //video->currFrameNo = 0; /* Set current frame Number to 0 */ video->nextModTime = 0; video->nextEncIVop = 0; /* Sets up very first frame to be I-VOP! */ video->numVopsInGOP = 0; /* counter for Vops in Gop, 2/8/01 */ //video->frameRate = video->encParams->LayerFrameRate[0]; /* Set current layer frame rate */ video->QPMB = (UChar *) M4VENC_MALLOC(nTotalMB * sizeof(UChar)); /* Memory for MB quantizers */ if (video->QPMB == NULL) goto CLEAN_UP; video->headerInfo.Mode = (UChar *) M4VENC_MALLOC(sizeof(UChar) * nTotalMB); /* Memory for MB Modes */ if (video->headerInfo.Mode == NULL) goto CLEAN_UP; video->headerInfo.CBP = (UChar *) M4VENC_MALLOC(sizeof(UChar) * nTotalMB); /* Memory for CBP (Y and C) of each MB */ if (video->headerInfo.CBP == NULL) goto CLEAN_UP; /* Allocating motion vector space and interpolation memory*/ video->mot = (MOT **)M4VENC_MALLOC(sizeof(MOT *) * nTotalMB); if (video->mot == NULL) goto CLEAN_UP; for (idx = 0; idx < nTotalMB; idx++) { video->mot[idx] = (MOT *)M4VENC_MALLOC(sizeof(MOT) * 8); if (video->mot[idx] == NULL) { goto CLEAN_UP; } } video->intraArray = (UChar *)M4VENC_MALLOC(sizeof(UChar) * nTotalMB); if (video->intraArray == NULL) goto CLEAN_UP; video->sliceNo = (UChar *) M4VENC_MALLOC(nTotalMB); /* Memory for Slice Numbers */ if (video->sliceNo == NULL) goto CLEAN_UP; /* Allocating space for predDCAC[][8][16], Not that I intentionally */ /* increase the dimension of predDCAC from [][6][15] to [][8][16] */ /* so that compilers can generate faster code to indexing the */ /* data inside (by using << instead of *). 04/14/2000. */ /* 5/29/01, use decoder lib ACDC prediction memory scheme. */ video->predDC = (typeDCStore *) M4VENC_MALLOC(nTotalMB * sizeof(typeDCStore)); if (video->predDC == NULL) goto CLEAN_UP; if (!video->encParams->H263_Enabled) { video->predDCAC_col = (typeDCACStore *) M4VENC_MALLOC(((max_width >> 4) + 1) * sizeof(typeDCACStore)); if (video->predDCAC_col == NULL) goto CLEAN_UP; /* element zero will be used for storing vertical (col) AC coefficients */ /* the rest will be used for storing horizontal (row) AC coefficients */ video->predDCAC_row = video->predDCAC_col + 1; /* ACDC */ video->acPredFlag = (Int *) M4VENC_MALLOC(nTotalMB * sizeof(Int)); /* Memory for acPredFlag */ if (video->acPredFlag == NULL) goto CLEAN_UP; } video->outputMB = (MacroBlock *) M4VENC_MALLOC(sizeof(MacroBlock)); /* Allocating macroblock space */ if (video->outputMB == NULL) goto CLEAN_UP; M4VENC_MEMSET(video->outputMB->block[0], 0, (sizeof(Short) << 6)*6); M4VENC_MEMSET(video->dataBlock, 0, sizeof(Short) << 7); /* Allocate (2*packetsize) working bitstreams */ video->bitstream1 = BitStreamCreateEnc(2 * 4096); /*allocate working stream 1*/ if (video->bitstream1 == NULL) goto CLEAN_UP; video->bitstream2 = BitStreamCreateEnc(2 * 4096); /*allocate working stream 2*/ if (video->bitstream2 == NULL) goto CLEAN_UP; video->bitstream3 = BitStreamCreateEnc(2 * 4096); /*allocate working stream 3*/ if (video->bitstream3 == NULL) goto CLEAN_UP; /* allocate overrun buffer */ // this buffer is used when user's buffer is too small to hold one frame. // It is not needed for slice-based encoding. if (nLayers == 1) { video->oBSize = encParams->BufferSize[0] >> 3; } else { video->oBSize = PV_MAX((encParams->BufferSize[0] >> 3), (encParams->BufferSize[1] >> 3)); } if (video->oBSize > DEFAULT_OVERRUN_BUFFER_SIZE || encParams->RC_Type == CONSTANT_Q) // set limit { video->oBSize = DEFAULT_OVERRUN_BUFFER_SIZE; } video->overrunBuffer = (UChar*) M4VENC_MALLOC(sizeof(UChar) * video->oBSize); if (video->overrunBuffer == NULL) goto CLEAN_UP; video->currVop = (Vop *) M4VENC_MALLOC(sizeof(Vop)); /* Memory for Current VOP */ if (video->currVop == NULL) goto CLEAN_UP; /* add padding, 09/19/05 */ if (video->encParams->H263_Enabled) /* make it conditional 11/28/05 */ { pitch = max_width; offset = 0; } else { pitch = max_width + 32; offset = (pitch << 4) + 16; max_height += 32; } size = pitch * max_height; video->currVop->yChan = (PIXEL *)M4VENC_MALLOC(sizeof(PIXEL) * (size + (size >> 1))); /* Memory for currVop Y */ if (video->currVop->yChan == NULL) goto CLEAN_UP; video->currVop->uChan = video->currVop->yChan + size;/* Memory for currVop U */ video->currVop->vChan = video->currVop->uChan + (size >> 2);/* Memory for currVop V */ /* shift for the offset */ if (offset) { video->currVop->yChan += offset; /* offset to the origin.*/ video->currVop->uChan += (offset >> 2) + 4; video->currVop->vChan += (offset >> 2) + 4; } video->forwardRefVop = video->currVop; /* Initialize forwardRefVop */ video->backwardRefVop = video->currVop; /* Initialize backwardRefVop */ video->prevBaseVop = (Vop *) M4VENC_MALLOC(sizeof(Vop)); /* Memory for Previous Base Vop */ if (video->prevBaseVop == NULL) goto CLEAN_UP; video->prevBaseVop->yChan = (PIXEL *) M4VENC_MALLOC(sizeof(PIXEL) * (size + (size >> 1))); /* Memory for prevBaseVop Y */ if (video->prevBaseVop->yChan == NULL) goto CLEAN_UP; video->prevBaseVop->uChan = video->prevBaseVop->yChan + size; /* Memory for prevBaseVop U */ video->prevBaseVop->vChan = video->prevBaseVop->uChan + (size >> 2); /* Memory for prevBaseVop V */ if (offset) { video->prevBaseVop->yChan += offset; /* offset to the origin.*/ video->prevBaseVop->uChan += (offset >> 2) + 4; video->prevBaseVop->vChan += (offset >> 2) + 4; } if (0) /* If B Frames */ { video->nextBaseVop = (Vop *) M4VENC_MALLOC(sizeof(Vop)); /* Memory for Next Base Vop */ if (video->nextBaseVop == NULL) goto CLEAN_UP; video->nextBaseVop->yChan = (PIXEL *) M4VENC_MALLOC(sizeof(PIXEL) * (size + (size >> 1))); /* Memory for nextBaseVop Y */ if (video->nextBaseVop->yChan == NULL) goto CLEAN_UP; video->nextBaseVop->uChan = video->nextBaseVop->yChan + size; /* Memory for nextBaseVop U */ video->nextBaseVop->vChan = video->nextBaseVop->uChan + (size >> 2); /* Memory for nextBaseVop V */ if (offset) { video->nextBaseVop->yChan += offset; /* offset to the origin.*/ video->nextBaseVop->uChan += (offset >> 2) + 4; video->nextBaseVop->vChan += (offset >> 2) + 4; } } if (nLayers > 1) /* If enhancement layers */ { video->prevEnhanceVop = (Vop *) M4VENC_MALLOC(sizeof(Vop)); /* Memory for Previous Enhancement Vop */ if (video->prevEnhanceVop == NULL) goto CLEAN_UP; video->prevEnhanceVop->yChan = (PIXEL *) M4VENC_MALLOC(sizeof(PIXEL) * (size + (size >> 1))); /* Memory for Previous Ehancement Y */ if (video->prevEnhanceVop->yChan == NULL) goto CLEAN_UP; video->prevEnhanceVop->uChan = video->prevEnhanceVop->yChan + size; /* Memory for Previous Enhancement U */ video->prevEnhanceVop->vChan = video->prevEnhanceVop->uChan + (size >> 2); /* Memory for Previous Enhancement V */ if (offset) { video->prevEnhanceVop->yChan += offset; /* offset to the origin.*/ video->prevEnhanceVop->uChan += (offset >> 2) + 4; video->prevEnhanceVop->vChan += (offset >> 2) + 4; } } video->numberOfLayers = nLayers; /* Number of Layers */ video->sumMAD = 0; /* 04/09/01, for Vops in the use multipass processing */ for (idx = 0; idx < nLayers; idx++) { video->pMP[idx] = (MultiPass *)M4VENC_MALLOC(sizeof(MultiPass)); if (video->pMP[idx] == NULL) goto CLEAN_UP; M4VENC_MEMSET(video->pMP[idx], 0, sizeof(MultiPass)); video->pMP[idx]->encoded_frames = -1; /* forget about the very first I frame */ /* RDInfo **pRDSamples */ video->pMP[idx]->pRDSamples = (RDInfo **)M4VENC_MALLOC(30 * sizeof(RDInfo *)); if (video->pMP[idx]->pRDSamples == NULL) goto CLEAN_UP; for (i = 0; i < 30; i++) { video->pMP[idx]->pRDSamples[i] = (RDInfo *)M4VENC_MALLOC(32 * sizeof(RDInfo)); if (video->pMP[idx]->pRDSamples[i] == NULL) goto CLEAN_UP; for (j = 0; j < 32; j++) M4VENC_MEMSET(&(video->pMP[idx]->pRDSamples[i][j]), 0, sizeof(RDInfo)); } video->pMP[idx]->frameRange = (Int)(video->encParams->LayerFrameRate[idx] * 1.0); /* 1.0s time frame*/ video->pMP[idx]->frameRange = PV_MAX(video->pMP[idx]->frameRange, 5); video->pMP[idx]->frameRange = PV_MIN(video->pMP[idx]->frameRange, 30); video->pMP[idx]->framePos = -1; } /* /// End /////////////////////////////////////// */ video->vol = (Vol **)M4VENC_MALLOC(nLayers * sizeof(Vol *)); /* Memory for VOL pointers */ /* Memory allocation and Initialization of Vols and writing of headers */ if (video->vol == NULL) goto CLEAN_UP; for (idx = 0; idx < nLayers; idx++) { video->volInitialize[idx] = 1; video->refTick[idx] = 0; video->relLayerCodeTime[idx] = 1000; video->vol[idx] = (Vol *)M4VENC_MALLOC(sizeof(Vol)); if (video->vol[idx] == NULL) goto CLEAN_UP; pVol = video->vol[idx]; pEncParams = video->encParams; M4VENC_MEMSET(video->vol[idx], 0, sizeof(Vol)); /* Initialize some VOL parameters */ pVol->volID = idx; /* Set VOL ID */ pVol->shortVideoHeader = pEncParams->H263_Enabled; /*Short Header */ pVol->GOVStart = pEncParams->GOV_Enabled; /* GOV Header */ pVol->timeIncrementResolution = video->encParams->TimeIncrementRes; pVol->nbitsTimeIncRes = 1; while (pVol->timeIncrementResolution > (1 << pVol->nbitsTimeIncRes)) { pVol->nbitsTimeIncRes++; } /* timing stuff */ pVol->timeIncrement = 0; pVol->moduloTimeBase = 0; pVol->fixedVopRate = 0; /* No fixed VOP rate */ pVol->stream = (BitstreamEncVideo *)M4VENC_MALLOC(sizeof(BitstreamEncVideo)); /* allocate BitstreamEncVideo Instance */ if (pVol->stream == NULL) goto CLEAN_UP; pVol->width = pEncParams->LayerWidth[idx]; /* Layer Width */ pVol->height = pEncParams->LayerHeight[idx]; /* Layer Height */ // pVol->intra_acdcPredDisable = pEncParams->ACDCPrediction; /* ACDC Prediction */ pVol->ResyncMarkerDisable = pEncParams->ResyncMarkerDisable; /* Resync Marker Mode */ pVol->dataPartitioning = pEncParams->DataPartitioning; /* Data Partitioning */ pVol->useReverseVLC = pEncParams->ReversibleVLC; /* RVLC */ if (idx > 0) /* Scalability layers */ { pVol->ResyncMarkerDisable = 1; pVol->dataPartitioning = 0; pVol->useReverseVLC = 0; /* No RVLC */ } pVol->quantType = pEncParams->QuantType[idx]; /* Quantizer Type */ /* no need to init Quant Matrices */ pVol->scalability = 0; /* Vol Scalability */ if (idx > 0) pVol->scalability = 1; /* Multiple layers => Scalability */ /* Initialize Vol to Temporal scalability. It can change during encoding */ pVol->scalType = 1; /* Initialize reference Vol ID to the base layer = 0 */ pVol->refVolID = 0; /* Initialize layer resolution to same as the reference */ pVol->refSampDir = 0; pVol->horSamp_m = 1; pVol->horSamp_n = 1; pVol->verSamp_m = 1; pVol->verSamp_n = 1; pVol->enhancementType = 0; /* We always enhance the entire region */ pVol->nMBPerRow = (pVol->width + 15) / 16; pVol->nMBPerCol = (pVol->height + 15) / 16; pVol->nTotalMB = pVol->nMBPerRow * pVol->nMBPerCol; if (pVol->nTotalMB >= 1) pVol->nBitsForMBID = 1; if (pVol->nTotalMB >= 3) pVol->nBitsForMBID = 2; if (pVol->nTotalMB >= 5) pVol->nBitsForMBID = 3; if (pVol->nTotalMB >= 9) pVol->nBitsForMBID = 4; if (pVol->nTotalMB >= 17) pVol->nBitsForMBID = 5; if (pVol->nTotalMB >= 33) pVol->nBitsForMBID = 6; if (pVol->nTotalMB >= 65) pVol->nBitsForMBID = 7; if (pVol->nTotalMB >= 129) pVol->nBitsForMBID = 8; if (pVol->nTotalMB >= 257) pVol->nBitsForMBID = 9; if (pVol->nTotalMB >= 513) pVol->nBitsForMBID = 10; if (pVol->nTotalMB >= 1025) pVol->nBitsForMBID = 11; if (pVol->nTotalMB >= 2049) pVol->nBitsForMBID = 12; if (pVol->nTotalMB >= 4097) pVol->nBitsForMBID = 13; if (pVol->nTotalMB >= 8193) pVol->nBitsForMBID = 14; if (pVol->nTotalMB >= 16385) pVol->nBitsForMBID = 15; if (pVol->nTotalMB >= 32769) pVol->nBitsForMBID = 16; if (pVol->nTotalMB >= 65537) pVol->nBitsForMBID = 17; if (pVol->nTotalMB >= 131073) pVol->nBitsForMBID = 18; if (pVol->shortVideoHeader) { switch (pVol->width) { case 128: if (pVol->height == 96) /* source_format = 1 */ { pVol->nGOBinVop = 6; pVol->nMBinGOB = 8; } else status = PV_FALSE; break; case 176: if (pVol->height == 144) /* source_format = 2 */ { pVol->nGOBinVop = 9; pVol->nMBinGOB = 11; } else status = PV_FALSE; break; case 352: if (pVol->height == 288) /* source_format = 2 */ { pVol->nGOBinVop = 18; pVol->nMBinGOB = 22; } else status = PV_FALSE; break; case 704: if (pVol->height == 576) /* source_format = 2 */ { pVol->nGOBinVop = 18; pVol->nMBinGOB = 88; } else status = PV_FALSE; break; case 1408: if (pVol->height == 1152) /* source_format = 2 */ { pVol->nGOBinVop = 18; pVol->nMBinGOB = 352; } else status = PV_FALSE; break; default: status = PV_FALSE; break; } } } /***************************************************/ /* allocate and initialize rate control parameters */ /***************************************************/ /* BEGIN INITIALIZATION OF ANNEX L RATE CONTROL */ if (video->encParams->RC_Type != CONSTANT_Q) { for (idx = 0; idx < nLayers; idx++) /* 12/25/00 */ { video->rc[idx] = (rateControl *)M4VENC_MALLOC(sizeof(rateControl)); if (video->rc[idx] == NULL) goto CLEAN_UP; M4VENC_MEMSET(video->rc[idx], 0, sizeof(rateControl)); } if (PV_SUCCESS != RC_Initialize(video)) { goto CLEAN_UP; } /* initialization for 2-pass rate control */ } /* END INITIALIZATION OF ANNEX L RATE CONTROL */ /********** assign platform dependent functions ***********************/ /* 1/23/01 */ /* This must be done at run-time not a compile time */ video->functionPointer = (FuncPtr*) M4VENC_MALLOC(sizeof(FuncPtr)); if (video->functionPointer == NULL) goto CLEAN_UP; video->functionPointer->ComputeMBSum = &ComputeMBSum_C; video->functionPointer->SAD_MB_HalfPel[0] = NULL; video->functionPointer->SAD_MB_HalfPel[1] = &SAD_MB_HalfPel_Cxh; video->functionPointer->SAD_MB_HalfPel[2] = &SAD_MB_HalfPel_Cyh; video->functionPointer->SAD_MB_HalfPel[3] = &SAD_MB_HalfPel_Cxhyh; #ifndef NO_INTER4V video->functionPointer->SAD_Blk_HalfPel = &SAD_Blk_HalfPel_C; video->functionPointer->SAD_Block = &SAD_Block_C; #endif video->functionPointer->SAD_Macroblock = &SAD_Macroblock_C; video->functionPointer->ChooseMode = &ChooseMode_C; video->functionPointer->GetHalfPelMBRegion = &GetHalfPelMBRegion_C; // video->functionPointer->SAD_MB_PADDING = &SAD_MB_PADDING; /* 4/21/01 */ encoderControl->videoEncoderInit = 1; /* init done! */ return PV_TRUE; CLEAN_UP: PVCleanUpVideoEncoder(encoderControl); return PV_FALSE; } /* ======================================================================== */ /* Function : PVCleanUpVideoEncoder() */ /* Date : 08/22/2000 */ /* Purpose : Deallocates allocated memory from InitVideoEncoder() */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : 5/21/01, free only yChan in Vop */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVCleanUpVideoEncoder(VideoEncControls *encoderControl) { Int idx, i; VideoEncData *video = (VideoEncData *)encoderControl->videoEncoderData; int nTotalMB; int max_width, offset; #ifdef PRINT_RC_INFO if (facct != NULL) { fprintf(facct, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); fprintf(facct, "TOTAL NUM BITS GENERATED %d\n", tiTotalNumBitsGenerated); fprintf(facct, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); fprintf(facct, "TOTAL NUMBER OF FRAMES CODED %d\n", video->encParams->rc[0]->totalFrameNumber); fprintf(facct, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); fprintf(facct, "Average BitRate %d\n", (tiTotalNumBitsGenerated / (90 / 30))); fprintf(facct, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); fprintf(facct, "TOTAL NUMBER OF STUFF BITS %d\n", (iStuffBits + 10740)); fprintf(facct, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); fprintf(facct, "TOTAL NUMBER OF BITS TO NETWORK %d\n", (35800*90 / 30));; fprintf(facct, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); fprintf(facct, "SUM OF STUFF BITS AND GENERATED BITS %d\n", (tiTotalNumBitsGenerated + iStuffBits + 10740)); fprintf(facct, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); fprintf(facct, "UNACCOUNTED DIFFERENCE %d\n", ((35800*90 / 30) - (tiTotalNumBitsGenerated + iStuffBits + 10740))); fprintf(facct, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); fclose(facct); } #endif #ifdef PRINT_EC fclose(fec); #endif if (video != NULL) { if (video->QPMB) M4VENC_FREE(video->QPMB); if (video->headerInfo.Mode)M4VENC_FREE(video->headerInfo.Mode); if (video->headerInfo.CBP)M4VENC_FREE(video->headerInfo.CBP); if (video->mot) { nTotalMB = video->vol[0]->nTotalMB; for (idx = 1; idx < video->currLayer; idx++) if (video->vol[idx]->nTotalMB > nTotalMB) nTotalMB = video->vol[idx]->nTotalMB; for (idx = 0; idx < nTotalMB; idx++) { if (video->mot[idx]) M4VENC_FREE(video->mot[idx]); } M4VENC_FREE(video->mot); } if (video->intraArray) M4VENC_FREE(video->intraArray); if (video->sliceNo)M4VENC_FREE(video->sliceNo); if (video->acPredFlag)M4VENC_FREE(video->acPredFlag); // if(video->predDCAC)M4VENC_FREE(video->predDCAC); if (video->predDC) M4VENC_FREE(video->predDC); video->predDCAC_row = NULL; if (video->predDCAC_col) M4VENC_FREE(video->predDCAC_col); if (video->outputMB)M4VENC_FREE(video->outputMB); if (video->bitstream1)BitstreamCloseEnc(video->bitstream1); if (video->bitstream2)BitstreamCloseEnc(video->bitstream2); if (video->bitstream3)BitstreamCloseEnc(video->bitstream3); if (video->overrunBuffer) M4VENC_FREE(video->overrunBuffer); max_width = video->encParams->LayerWidth[0]; max_width = (((max_width + 15) >> 4) << 4); /* 09/19/05 */ if (video->encParams->H263_Enabled) { offset = 0; } else { offset = ((max_width + 32) << 4) + 16; } if (video->currVop) { if (video->currVop->yChan) { video->currVop->yChan -= offset; M4VENC_FREE(video->currVop->yChan); } M4VENC_FREE(video->currVop); } if (video->nextBaseVop) { if (video->nextBaseVop->yChan) { video->nextBaseVop->yChan -= offset; M4VENC_FREE(video->nextBaseVop->yChan); } M4VENC_FREE(video->nextBaseVop); } if (video->prevBaseVop) { if (video->prevBaseVop->yChan) { video->prevBaseVop->yChan -= offset; M4VENC_FREE(video->prevBaseVop->yChan); } M4VENC_FREE(video->prevBaseVop); } if (video->prevEnhanceVop) { if (video->prevEnhanceVop->yChan) { video->prevEnhanceVop->yChan -= offset; M4VENC_FREE(video->prevEnhanceVop->yChan); } M4VENC_FREE(video->prevEnhanceVop); } /* 04/09/01, for Vops in the use multipass processing */ for (idx = 0; idx < video->encParams->nLayers; idx++) { if (video->pMP[idx]) { if (video->pMP[idx]->pRDSamples) { for (i = 0; i < 30; i++) { if (video->pMP[idx]->pRDSamples[i]) M4VENC_FREE(video->pMP[idx]->pRDSamples[i]); } M4VENC_FREE(video->pMP[idx]->pRDSamples); } M4VENC_MEMSET(video->pMP[idx], 0, sizeof(MultiPass)); M4VENC_FREE(video->pMP[idx]); } } /* // End /////////////////////////////////////// */ if (video->vol) { for (idx = 0; idx < video->encParams->nLayers; idx++) { if (video->vol[idx]) { if (video->vol[idx]->stream) M4VENC_FREE(video->vol[idx]->stream); M4VENC_FREE(video->vol[idx]); } } M4VENC_FREE(video->vol); } /***************************************************/ /* stop rate control parameters */ /***************************************************/ /* ANNEX L RATE CONTROL */ if (video->encParams->RC_Type != CONSTANT_Q) { RC_Cleanup(video->rc, video->encParams->nLayers); for (idx = 0; idx < video->encParams->nLayers; idx++) { if (video->rc[idx]) M4VENC_FREE(video->rc[idx]); } } if (video->functionPointer) M4VENC_FREE(video->functionPointer); /* If application has called PVCleanUpVideoEncoder then we deallocate */ /* If PVInitVideoEncoder class it, then we DO NOT deallocate */ if (video->encParams) { M4VENC_FREE(video->encParams); } M4VENC_FREE(video); encoderControl->videoEncoderData = NULL; /* video */ } encoderControl->videoEncoderInit = 0; return PV_TRUE; } /* ======================================================================== */ /* Function : PVGetVolHeader() */ /* Date : 7/17/2001, */ /* Purpose : */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVGetVolHeader(VideoEncControls *encCtrl, UChar *volHeader, Int *size, Int layer) { VideoEncData *encData; PV_STATUS EncodeVOS_Start(VideoEncControls *encCtrl); encData = (VideoEncData *)encCtrl->videoEncoderData; if (encData == NULL) return PV_FALSE; if (encData->encParams == NULL) return PV_FALSE; encData->currLayer = layer; /* Set Layer */ /*pv_status = */ EncodeVOS_Start(encCtrl); /* Encode VOL Header */ encData->encParams->GetVolHeader[layer] = 1; /* Set usage flag: Needed to support old method*/ /* Copy bitstream to buffer and set the size */ if (*size > encData->bitstream1->byteCount) { *size = encData->bitstream1->byteCount; M4VENC_MEMCPY(volHeader, encData->bitstream1->bitstreamBuffer, *size); } else return PV_FALSE; /* Reset bitstream1 buffer parameters */ BitstreamEncReset(encData->bitstream1); return PV_TRUE; } /* ======================================================================== */ /* Function : PVGetOverrunBuffer() */ /* Purpose : Get the overrun buffer ` */ /* In/out : */ /* Return : Pointer to overrun buffer. */ /* Modified : */ /* ======================================================================== */ OSCL_EXPORT_REF UChar* PVGetOverrunBuffer(VideoEncControls *encCtrl) { VideoEncData *video = (VideoEncData *)encCtrl->videoEncoderData; Int currLayer = video->currLayer; Vol *currVol = video->vol[currLayer]; if (currVol->stream->bitstreamBuffer != video->overrunBuffer) // not used { return NULL; } return video->overrunBuffer; } /* ======================================================================== */ /* Function : EncodeVideoFrame() */ /* Date : 08/22/2000 */ /* Purpose : Encode video frame and return bitstream */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* 02.14.2001 */ /* Finishing new timestamp 32-bit input */ /* Applications need to take care of wrap-around */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVEncodeVideoFrame(VideoEncControls *encCtrl, VideoEncFrameIO *vid_in, VideoEncFrameIO *vid_out, ULong *nextModTime, UChar *bstream, Int *size, Int *nLayer) { Bool status = PV_TRUE; PV_STATUS pv_status; VideoEncData *video = (VideoEncData *)encCtrl->videoEncoderData; VideoEncParams *encParams = video->encParams; Vol *currVol; Vop *tempForwRefVop = NULL; Int tempRefSelCode = 0; PV_STATUS EncodeVOS_Start(VideoEncControls *encCtrl); Int width_16, height_16; Int width, height; Vop *temp; Int encodeVop = 0; void PaddingEdge(Vop *padVop); Int currLayer = -1; //Int nLayers = encParams->nLayers; ULong modTime = vid_in->timestamp; #ifdef RANDOM_REFSELCODE /* add random selection of reference Vop */ Int random_val[30] = {0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0}; static Int rand_idx = 0; #endif /*******************************************************/ /* Determine Next Vop to encode, if any, and nLayer */ /*******************************************************/ //i = nLayers-1; if (video->volInitialize[0]) /* first vol to code */ { video->nextModTime = video->modTimeRef = ((modTime) - ((modTime) % 1000)); } encodeVop = DetermineCodingLayer(video, nLayer, modTime); currLayer = *nLayer; if ((currLayer < 0) || (currLayer > encParams->nLayers - 1)) return PV_FALSE; /******************************************/ /* If post-skipping still effective --- return */ /******************************************/ if (!encodeVop) /* skip enh layer, no base layer coded --- return */ { #ifdef _PRINT_STAT printf("No frame coded. Continue to next frame."); #endif /* expected next code time, convert back to millisec */ *nextModTime = video->nextModTime; #ifdef ALLOW_VOP_NOT_CODED if (video->vol[0]->shortVideoHeader) /* Short Video Header = 1 */ { *size = 0; *nLayer = -1; } else { *nLayer = 0; EncodeVopNotCoded(video, bstream, size, modTime); *size = video->vol[0]->stream->byteCount; } #else *size = 0; *nLayer = -1; #endif return status; } //ENCODE_VOP_AGAIN: /* 12/30/00 */ /**************************************************************/ /* Initialize Vol stream structure with application bitstream */ /**************************************************************/ currVol = video->vol[currLayer]; currVol->stream->bitstreamBuffer = bstream; currVol->stream->bufferSize = *size; BitstreamEncReset(currVol->stream); BitstreamSetOverrunBuffer(currVol->stream, video->overrunBuffer, video->oBSize, video); /***********************************************************/ /* Encode VOS and VOL Headers on first call for each layer */ /***********************************************************/ if (video->volInitialize[currLayer]) { video->currVop->timeInc = 0; video->prevBaseVop->timeInc = 0; if (!video->encParams->GetVolHeader[currLayer]) pv_status = EncodeVOS_Start(encCtrl); } /***************************************************/ /* Copy Input Video Frame to Internal Video Buffer */ /***************************************************/ /* Determine Width and Height of Vop Layer */ width = encParams->LayerWidth[currLayer]; /* Get input width */ height = encParams->LayerHeight[currLayer]; /* Get input height */ /* Round Up to nearest multiple of 16 : MPEG-4 Standard */ width_16 = ((width + 15) / 16) * 16; /* Round up to nearest multiple of 16 */ height_16 = ((height + 15) / 16) * 16; /* Round up to nearest multiple of 16 */ video->input = vid_in; /* point to the frame input */ /*// End ////////////////////////////// */ /**************************************/ /* Determine VOP Type */ /* 6/2/2001, separate function */ /**************************************/ DetermineVopType(video, currLayer); /****************************/ /* Initialize VOP */ /****************************/ video->currVop->volID = currVol->volID; video->currVop->width = width_16; video->currVop->height = height_16; if (video->encParams->H263_Enabled) /* 11/28/05 */ { video->currVop->pitch = width_16; } else { video->currVop->pitch = width_16 + 32; } video->currVop->timeInc = currVol->timeIncrement; video->currVop->vopCoded = 1; video->currVop->roundingType = 0; video->currVop->intraDCVlcThr = encParams->IntraDCVlcThr; if (currLayer == 0 #ifdef RANDOM_REFSELCODE /* add random selection of reference Vop */ || random_val[rand_idx] || video->volInitialize[currLayer] #endif ) { tempForwRefVop = video->forwardRefVop; /* keep initial state */ if (tempForwRefVop != NULL) tempRefSelCode = tempForwRefVop->refSelectCode; video->forwardRefVop = video->prevBaseVop; video->forwardRefVop->refSelectCode = 1; } #ifdef RANDOM_REFSELCODE else { tempForwRefVop = video->forwardRefVop; /* keep initial state */ if (tempForwRefVop != NULL) tempRefSelCode = tempForwRefVop->refSelectCode; video->forwardRefVop = video->prevEnhanceVop; video->forwardRefVop->refSelectCode = 0; } rand_idx++; rand_idx %= 30; #endif video->currVop->refSelectCode = video->forwardRefVop->refSelectCode; video->currVop->gobNumber = 0; video->currVop->gobFrameID = video->currVop->predictionType; video->currVop->temporalRef = (modTime * 30 / 1001) % 256; video->currVop->temporalInterval = 0; if (video->currVop->predictionType == I_VOP) video->currVop->quantizer = encParams->InitQuantIvop[currLayer]; else video->currVop->quantizer = encParams->InitQuantPvop[currLayer]; /****************/ /* Encode Vop */ /****************/ video->slice_coding = 0; pv_status = EncodeVop(video); #ifdef _PRINT_STAT if (video->currVop->predictionType == I_VOP) printf(" I-VOP "); else printf(" P-VOP (ref.%d)", video->forwardRefVop->refSelectCode); #endif /************************************/ /* Update Skip Next Frame */ /************************************/ *nLayer = UpdateSkipNextFrame(video, nextModTime, size, pv_status); if (*nLayer == -1) /* skip current frame */ { /* make sure that pointers are restored to the previous state */ if (currLayer == 0) { video->forwardRefVop = tempForwRefVop; /* For P-Vop base only */ video->forwardRefVop->refSelectCode = tempRefSelCode; } return status; } /* If I-VOP was encoded, reset IntraPeriod */ if ((currLayer == 0) && (encParams->IntraPeriod > 0) && (video->currVop->predictionType == I_VOP)) video->nextEncIVop = encParams->IntraPeriod; /* Set HintTrack Information */ if (currLayer != -1) { if (currVol->prevModuloTimeBase) video->hintTrackInfo.MTB = 1; else video->hintTrackInfo.MTB = 0; video->hintTrackInfo.LayerID = (UChar)currVol->volID; video->hintTrackInfo.CodeType = (UChar)video->currVop->predictionType; video->hintTrackInfo.RefSelCode = (UChar)video->currVop->refSelectCode; } /************************************************/ /* Determine nLayer and timeInc for next encode */ /* 12/27/00 always go by the highest layer*/ /************************************************/ /**********************************************************/ /* Copy Reconstructed Buffer to Output Video Frame Buffer */ /**********************************************************/ vid_out->yChan = video->currVop->yChan; vid_out->uChan = video->currVop->uChan; vid_out->vChan = video->currVop->vChan; if (video->encParams->H263_Enabled) { vid_out->height = video->currVop->height; /* padded height */ vid_out->pitch = video->currVop->width; /* padded width */ } else { vid_out->height = video->currVop->height + 32; /* padded height */ vid_out->pitch = video->currVop->width + 32; /* padded width */ } //video_out->timestamp = video->modTime; vid_out->timestamp = (ULong)(((video->prevFrameNum[currLayer] * 1000) / encParams->LayerFrameRate[currLayer]) + video->modTimeRef + 0.5); /*// End /////////////////////// */ /***********************************/ /* Update Ouput bstream byte count */ /***********************************/ *size = currVol->stream->byteCount; /****************************************/ /* Swap Vop Pointers for Base Layer */ /****************************************/ if (currLayer == 0) { temp = video->prevBaseVop; video->prevBaseVop = video->currVop; video->prevBaseVop->padded = 0; /* not padded */ video->currVop = temp; video->forwardRefVop = video->prevBaseVop; /* For P-Vop base only */ video->forwardRefVop->refSelectCode = 1; } else { temp = video->prevEnhanceVop; video->prevEnhanceVop = video->currVop; video->prevEnhanceVop->padded = 0; /* not padded */ video->currVop = temp; video->forwardRefVop = video->prevEnhanceVop; video->forwardRefVop->refSelectCode = 0; } /****************************************/ /* Modify the intialize flag at the end.*/ /****************************************/ if (video->volInitialize[currLayer]) video->volInitialize[currLayer] = 0; return status; } #ifndef NO_SLICE_ENCODE /* ======================================================================== */ /* Function : PVEncodeFrameSet() */ /* Date : 04/18/2000 */ /* Purpose : Enter a video frame and perform front-end time check plus ME */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVEncodeFrameSet(VideoEncControls *encCtrl, VideoEncFrameIO *vid_in, ULong *nextModTime, Int *nLayer) { Bool status = PV_TRUE; VideoEncData *video = (VideoEncData *)encCtrl->videoEncoderData; VideoEncParams *encParams = video->encParams; Vol *currVol; PV_STATUS EncodeVOS_Start(VideoEncControls *encCtrl); Int width_16, height_16; Int width, height; Int encodeVop = 0; void PaddingEdge(Vop *padVop); Int currLayer = -1; //Int nLayers = encParams->nLayers; ULong modTime = vid_in->timestamp; #ifdef RANDOM_REFSELCODE /* add random selection of reference Vop */ Int random_val[30] = {0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0}; static Int rand_idx = 0; #endif /*******************************************************/ /* Determine Next Vop to encode, if any, and nLayer */ /*******************************************************/ video->modTime = modTime; //i = nLayers-1; if (video->volInitialize[0]) /* first vol to code */ { video->nextModTime = video->modTimeRef = ((modTime) - ((modTime) % 1000)); } encodeVop = DetermineCodingLayer(video, nLayer, modTime); currLayer = *nLayer; /******************************************/ /* If post-skipping still effective --- return */ /******************************************/ if (!encodeVop) /* skip enh layer, no base layer coded --- return */ { #ifdef _PRINT_STAT printf("No frame coded. Continue to next frame."); #endif *nLayer = -1; /* expected next code time, convert back to millisec */ *nextModTime = video->nextModTime;; return status; } /**************************************************************/ /* Initialize Vol stream structure with application bitstream */ /**************************************************************/ currVol = video->vol[currLayer]; currVol->stream->bufferSize = 0; BitstreamEncReset(currVol->stream); /***********************************************************/ /* Encode VOS and VOL Headers on first call for each layer */ /***********************************************************/ if (video->volInitialize[currLayer]) { video->currVop->timeInc = 0; video->prevBaseVop->timeInc = 0; } /***************************************************/ /* Copy Input Video Frame to Internal Video Buffer */ /***************************************************/ /* Determine Width and Height of Vop Layer */ width = encParams->LayerWidth[currLayer]; /* Get input width */ height = encParams->LayerHeight[currLayer]; /* Get input height */ /* Round Up to nearest multiple of 16 : MPEG-4 Standard */ width_16 = ((width + 15) / 16) * 16; /* Round up to nearest multiple of 16 */ height_16 = ((height + 15) / 16) * 16; /* Round up to nearest multiple of 16 */ video->input = vid_in; /* point to the frame input */ /*// End ////////////////////////////// */ /**************************************/ /* Determine VOP Type */ /* 6/2/2001, separate function */ /**************************************/ DetermineVopType(video, currLayer); /****************************/ /* Initialize VOP */ /****************************/ video->currVop->volID = currVol->volID; video->currVop->width = width_16; video->currVop->height = height_16; if (video->encParams->H263_Enabled) /* 11/28/05 */ { video->currVop->pitch = width_16; } else { video->currVop->pitch = width_16 + 32; } video->currVop->timeInc = currVol->timeIncrement; video->currVop->vopCoded = 1; video->currVop->roundingType = 0; video->currVop->intraDCVlcThr = encParams->IntraDCVlcThr; if (currLayer == 0 #ifdef RANDOM_REFSELCODE /* add random selection of reference Vop */ || random_val[rand_idx] || video->volInitialize[currLayer] #endif ) { video->tempForwRefVop = video->forwardRefVop; /* keep initial state */ if (video->tempForwRefVop != NULL) video->tempRefSelCode = video->tempForwRefVop->refSelectCode; video->forwardRefVop = video->prevBaseVop; video->forwardRefVop->refSelectCode = 1; } #ifdef RANDOM_REFSELCODE else { video->tempForwRefVop = video->forwardRefVop; /* keep initial state */ if (video->tempForwRefVop != NULL) video->tempRefSelCode = video->tempForwRefVop->refSelectCode; video->forwardRefVop = video->prevEnhanceVop; video->forwardRefVop->refSelectCode = 0; } rand_idx++; rand_idx %= 30; #endif video->currVop->refSelectCode = video->forwardRefVop->refSelectCode; video->currVop->gobNumber = 0; video->currVop->gobFrameID = video->currVop->predictionType; video->currVop->temporalRef = ((modTime) * 30 / 1001) % 256; video->currVop->temporalInterval = 0; if (video->currVop->predictionType == I_VOP) video->currVop->quantizer = encParams->InitQuantIvop[currLayer]; else video->currVop->quantizer = encParams->InitQuantPvop[currLayer]; /****************/ /* Encode Vop */ /****************/ video->slice_coding = 1; /*pv_status =*/ EncodeVop(video); #ifdef _PRINT_STAT if (video->currVop->predictionType == I_VOP) printf(" I-VOP "); else printf(" P-VOP (ref.%d)", video->forwardRefVop->refSelectCode); #endif /* Set HintTrack Information */ if (currVol->prevModuloTimeBase) video->hintTrackInfo.MTB = 1; else video->hintTrackInfo.MTB = 0; video->hintTrackInfo.LayerID = (UChar)currVol->volID; video->hintTrackInfo.CodeType = (UChar)video->currVop->predictionType; video->hintTrackInfo.RefSelCode = (UChar)video->currVop->refSelectCode; return status; } #endif /* NO_SLICE_ENCODE */ #ifndef NO_SLICE_ENCODE /* ======================================================================== */ /* Function : PVEncodePacket() */ /* Date : 04/18/2002 */ /* Purpose : Encode one packet and return bitstream */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVEncodeSlice(VideoEncControls *encCtrl, UChar *bstream, Int *size, Int *endofFrame, VideoEncFrameIO *vid_out, ULong *nextModTime) { PV_STATUS pv_status; VideoEncData *video = (VideoEncData *)encCtrl->videoEncoderData; VideoEncParams *encParams = video->encParams; Vol *currVol; PV_STATUS EncodeVOS_Start(VideoEncControls *encCtrl); Vop *temp; void PaddingEdge(Vop *padVop); Int currLayer = video->currLayer; Int pre_skip; Int pre_size; /**************************************************************/ /* Initialize Vol stream structure with application bitstream */ /**************************************************************/ currVol = video->vol[currLayer]; currVol->stream->bitstreamBuffer = bstream; pre_size = currVol->stream->byteCount; currVol->stream->bufferSize = pre_size + (*size); /***********************************************************/ /* Encode VOS and VOL Headers on first call for each layer */ /***********************************************************/ if (video->volInitialize[currLayer]) { if (!video->encParams->GetVolHeader[currLayer]) pv_status = EncodeVOS_Start(encCtrl); } /****************/ /* Encode Slice */ /****************/ pv_status = EncodeSlice(video); *endofFrame = 0; if (video->mbnum >= currVol->nTotalMB && !video->end_of_buf) { *endofFrame = 1; /************************************/ /* Update Skip Next Frame */ /************************************/ pre_skip = UpdateSkipNextFrame(video, nextModTime, size, pv_status); /* modified such that no pre-skipped */ if (pre_skip == -1) /* error */ { *endofFrame = -1; /* make sure that pointers are restored to the previous state */ if (currLayer == 0) { video->forwardRefVop = video->tempForwRefVop; /* For P-Vop base only */ video->forwardRefVop->refSelectCode = video->tempRefSelCode; } return pv_status; } /* If I-VOP was encoded, reset IntraPeriod */ if ((currLayer == 0) && (encParams->IntraPeriod > 0) && (video->currVop->predictionType == I_VOP)) video->nextEncIVop = encParams->IntraPeriod; /**********************************************************/ /* Copy Reconstructed Buffer to Output Video Frame Buffer */ /**********************************************************/ vid_out->yChan = video->currVop->yChan; vid_out->uChan = video->currVop->uChan; vid_out->vChan = video->currVop->vChan; if (video->encParams->H263_Enabled) { vid_out->height = video->currVop->height; /* padded height */ vid_out->pitch = video->currVop->width; /* padded width */ } else { vid_out->height = video->currVop->height + 32; /* padded height */ vid_out->pitch = video->currVop->width + 32; /* padded width */ } //vid_out->timestamp = video->modTime; vid_out->timestamp = (ULong)(((video->prevFrameNum[currLayer] * 1000) / encParams->LayerFrameRate[currLayer]) + video->modTimeRef + 0.5); /*// End /////////////////////// */ /****************************************/ /* Swap Vop Pointers for Base Layer */ /****************************************/ if (currLayer == 0) { temp = video->prevBaseVop; video->prevBaseVop = video->currVop; video->prevBaseVop->padded = 0; /* not padded */ video->currVop = temp; video->forwardRefVop = video->prevBaseVop; /* For P-Vop base only */ video->forwardRefVop->refSelectCode = 1; } else { temp = video->prevEnhanceVop; video->prevEnhanceVop = video->currVop; video->prevEnhanceVop->padded = 0; /* not padded */ video->currVop = temp; video->forwardRefVop = video->prevEnhanceVop; video->forwardRefVop->refSelectCode = 0; } } /***********************************/ /* Update Ouput bstream byte count */ /***********************************/ *size = currVol->stream->byteCount - pre_size; /****************************************/ /* Modify the intialize flag at the end.*/ /****************************************/ if (video->volInitialize[currLayer]) video->volInitialize[currLayer] = 0; return pv_status; } #endif /* NO_SLICE_ENCODE */ /* ======================================================================== */ /* Function : PVGetH263ProfileLevelID() */ /* Date : 02/05/2003 */ /* Purpose : Get H.263 Profile ID and level ID for profile 0 */ /* In/out : Profile ID=0, levelID is what we want */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* Note : h263Level[8], rBR_bound[8], max_h263_framerate[2] */ /* max_h263_width[2], max_h263_height[2] are global */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVGetH263ProfileLevelID(VideoEncControls *encCtrl, Int *profileID, Int *levelID) { VideoEncData *encData; Int width, height; float bitrate_r, framerate; /* For this version, we only support H.263 profile 0 */ *profileID = 0; *levelID = 0; encData = (VideoEncData *)encCtrl->videoEncoderData; if (encData == NULL) return PV_FALSE; if (encData->encParams == NULL) return PV_FALSE; if (!encData->encParams->H263_Enabled) return PV_FALSE; /* get image width, height, bitrate and framerate */ width = encData->encParams->LayerWidth[0]; height = encData->encParams->LayerHeight[0]; bitrate_r = (float)(encData->encParams->LayerBitRate[0]) / (float)64000.0; framerate = encData->encParams->LayerFrameRate[0]; if (!width || !height || !(bitrate_r > 0 && framerate > 0)) return PV_FALSE; /* This is the most frequent case : level 10 */ if (bitrate_r <= rBR_bound[1] && framerate <= max_h263_framerate[0] && (width <= max_h263_width[0] && height <= max_h263_height[0])) { *levelID = h263Level[1]; return PV_TRUE; } else if (bitrate_r > rBR_bound[4] || (width > max_h263_width[1] || height > max_h263_height[1]) || framerate > max_h263_framerate[1]) /* check the highest level 70 */ { *levelID = h263Level[7]; return PV_TRUE; } else /* search level 20, 30, 40 */ { /* pick out level 20 */ if (bitrate_r <= rBR_bound[2] && ((width <= max_h263_width[0] && height <= max_h263_height[0] && framerate <= max_h263_framerate[1]) || (width <= max_h263_width[1] && height <= max_h263_height[1] && framerate <= max_h263_framerate[0]))) { *levelID = h263Level[2]; return PV_TRUE; } else /* width, height and framerate are ok, now choose level 30 or 40 */ { *levelID = (bitrate_r <= rBR_bound[3] ? h263Level[3] : h263Level[4]); return PV_TRUE; } } } /* ======================================================================== */ /* Function : PVGetMPEG4ProfileLevelID() */ /* Date : 26/06/2008 */ /* Purpose : Get MPEG4 Level after initialized */ /* In/out : profile_level according to interface */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVGetMPEG4ProfileLevelID(VideoEncControls *encCtrl, Int *profile_level, Int nLayer) { VideoEncData* video; Int i; video = (VideoEncData *)encCtrl->videoEncoderData; if (nLayer == 0) { for (i = 0; i < MAX_BASE_PROFILE + 1; i++) { if (video->encParams->ProfileLevel[0] == profile_level_code[i]) { break; } } *profile_level = i; } else { for (i = 0; i < MAX_SCALABLE_PROFILE - MAX_BASE_PROFILE; i++) { if (video->encParams->ProfileLevel[1] == scalable_profile_level_code[i]) { break; } } *profile_level = i + MAX_BASE_PROFILE + 1; } return true; } #ifndef LIMITED_API /* ======================================================================== */ /* Function : PVUpdateEncFrameRate */ /* Date : 04/08/2002 */ /* Purpose : Update target frame rates of the encoded base and enhance */ /* layer(if any) while encoding operation is ongoing */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVUpdateEncFrameRate(VideoEncControls *encCtrl, float *frameRate) { VideoEncData *encData; Int i;// nTotalMB, mbPerSec; encData = (VideoEncData *)encCtrl->videoEncoderData; if (encData == NULL) return PV_FALSE; if (encData->encParams == NULL) return PV_FALSE; /* Update the framerates for all the layers */ for (i = 0; i < encData->encParams->nLayers; i++) { /* New check: encoding framerate should be consistent with the given profile and level */ //nTotalMB = (((encData->encParams->LayerWidth[i]+15)/16)*16)*(((encData->encParams->LayerHeight[i]+15)/16)*16)/(16*16); //mbPerSec = (Int)(nTotalMB * frameRate[i]); //if(mbPerSec > encData->encParams->LayerMaxMbsPerSec[i]) return PV_FALSE; if (frameRate[i] > encData->encParams->LayerMaxFrameRate[i]) return PV_FALSE; /* set by users or profile */ encData->encParams->LayerFrameRate[i] = frameRate[i]; } RC_UpdateBXRCParams((void*) encData); return PV_TRUE; } #endif #ifndef LIMITED_API /* ======================================================================== */ /* Function : PVUpdateBitRate */ /* Date : 04/08/2002 */ /* Purpose : Update target bit rates of the encoded base and enhance */ /* layer(if any) while encoding operation is ongoing */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVUpdateBitRate(VideoEncControls *encCtrl, Int *bitRate) { VideoEncData *encData; Int i; encData = (VideoEncData *)encCtrl->videoEncoderData; if (encData == NULL) return PV_FALSE; if (encData->encParams == NULL) return PV_FALSE; /* Update the bitrates for all the layers */ for (i = 0; i < encData->encParams->nLayers; i++) { if (bitRate[i] > encData->encParams->LayerMaxBitRate[i]) /* set by users or profile */ { return PV_FALSE; } encData->encParams->LayerBitRate[i] = bitRate[i]; } RC_UpdateBXRCParams((void*) encData); return PV_TRUE; } #endif #ifndef LIMITED_API /* ============================================================================ */ /* Function : PVUpdateVBVDelay() */ /* Date : 4/23/2004 */ /* Purpose : Update VBV buffer size(in delay) */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ============================================================================ */ Bool PVUpdateVBVDelay(VideoEncControls *encCtrl, float delay) { VideoEncData *encData; Int total_bitrate, max_buffer_size; int index; encData = (VideoEncData *)encCtrl->videoEncoderData; if (encData == NULL) return PV_FALSE; if (encData->encParams == NULL) return PV_FALSE; /* Check whether the input delay is valid based on the given profile */ total_bitrate = (encData->encParams->nLayers == 1 ? encData->encParams->LayerBitRate[0] : encData->encParams->LayerBitRate[1]); index = encData->encParams->profile_table_index; max_buffer_size = (encData->encParams->nLayers == 1 ? profile_level_max_VBV_size[index] : scalable_profile_level_max_VBV_size[index]); if (total_bitrate*delay > (float)max_buffer_size) return PV_FALSE; encData->encParams->VBV_delay = delay; return PV_TRUE; } #endif #ifndef LIMITED_API /* ======================================================================== */ /* Function : PVUpdateIFrameInterval() */ /* Date : 04/10/2002 */ /* Purpose : updates the INTRA frame refresh interval while encoding */ /* is ongoing */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVUpdateIFrameInterval(VideoEncControls *encCtrl, Int aIFramePeriod) { VideoEncData *encData; encData = (VideoEncData *)encCtrl->videoEncoderData; if (encData == NULL) return PV_FALSE; if (encData->encParams == NULL) return PV_FALSE; encData->encParams->IntraPeriod = aIFramePeriod; return PV_TRUE; } #endif #ifndef LIMITED_API /* ======================================================================== */ /* Function : PVSetNumIntraMBRefresh() */ /* Date : 08/05/2003 */ /* Purpose : */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVUpdateNumIntraMBRefresh(VideoEncControls *encCtrl, Int numMB) { VideoEncData *encData; encData = (VideoEncData *)encCtrl->videoEncoderData; if (encData == NULL) return PV_FALSE; encData->encParams->Refresh = numMB; return PV_TRUE; } #endif #ifndef LIMITED_API /* ======================================================================== */ /* Function : PVIFrameRequest() */ /* Date : 04/10/2002 */ /* Purpose : encodes the next base frame as an I-Vop */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVIFrameRequest(VideoEncControls *encCtrl) { VideoEncData *encData; encData = (VideoEncData *)encCtrl->videoEncoderData; if (encData == NULL) return PV_FALSE; if (encData->encParams == NULL) return PV_FALSE; encData->nextEncIVop = 1; return PV_TRUE; } #endif #ifndef LIMITED_API /* ======================================================================== */ /* Function : PVGetEncMemoryUsage() */ /* Date : 10/17/2000 */ /* Purpose : */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Int PVGetEncMemoryUsage(VideoEncControls *encCtrl) { VideoEncData *encData; encData = (VideoEncData *)encCtrl->videoEncoderData; if (encData == NULL) return PV_FALSE; if (encData->encParams == NULL) return PV_FALSE; return encData->encParams->MemoryUsage; } #endif /* ======================================================================== */ /* Function : PVGetHintTrack() */ /* Date : 1/17/2001, */ /* Purpose : */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVGetHintTrack(VideoEncControls *encCtrl, MP4HintTrack *info) { VideoEncData *encData; encData = (VideoEncData *)encCtrl->videoEncoderData; if (encData == NULL) return PV_FALSE; if (encData->encParams == NULL) return PV_FALSE; info->MTB = encData->hintTrackInfo.MTB; info->LayerID = encData->hintTrackInfo.LayerID; info->CodeType = encData->hintTrackInfo.CodeType; info->RefSelCode = encData->hintTrackInfo.RefSelCode; return PV_TRUE; } /* ======================================================================== */ /* Function : PVGetMaxVideoFrameSize() */ /* Date : 7/17/2001, */ /* Purpose : Function merely returns the maximum buffer size */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVGetMaxVideoFrameSize(VideoEncControls *encCtrl, Int *maxVideoFrameSize) { VideoEncData *encData; encData = (VideoEncData *)encCtrl->videoEncoderData; if (encData == NULL) return PV_FALSE; if (encData->encParams == NULL) return PV_FALSE; *maxVideoFrameSize = encData->encParams->BufferSize[0]; if (encData->encParams->nLayers == 2) if (*maxVideoFrameSize < encData->encParams->BufferSize[1]) *maxVideoFrameSize = encData->encParams->BufferSize[1]; *maxVideoFrameSize >>= 3; /* Convert to Bytes */ if (*maxVideoFrameSize <= 4000) *maxVideoFrameSize = 4000; return PV_TRUE; } #ifndef LIMITED_API /* ======================================================================== */ /* Function : PVGetVBVSize() */ /* Date : 4/15/2002 */ /* Purpose : Function merely returns the maximum buffer size */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ OSCL_EXPORT_REF Bool PVGetVBVSize(VideoEncControls *encCtrl, Int *VBVSize) { VideoEncData *encData; encData = (VideoEncData *)encCtrl->videoEncoderData; if (encData == NULL) return PV_FALSE; if (encData->encParams == NULL) return PV_FALSE; *VBVSize = encData->encParams->BufferSize[0]; if (encData->encParams->nLayers == 2) *VBVSize += encData->encParams->BufferSize[1]; return PV_TRUE; } #endif /* ======================================================================== */ /* Function : EncodeVOS_Start() */ /* Date : 08/22/2000 */ /* Purpose : Encodes the VOS,VO, and VOL or Short Headers */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ PV_STATUS EncodeVOS_Start(VideoEncControls *encoderControl) { VideoEncData *video = (VideoEncData *)encoderControl->videoEncoderData; Vol *currVol = video->vol[video->currLayer]; PV_STATUS status = PV_SUCCESS; //int profile_level=0x01; BitstreamEncVideo *stream = video->bitstream1; int i, j; /********************************/ /* Check for short_video_header */ /********************************/ if (currVol->shortVideoHeader == 1) return status; else { /* Short Video Header or M4V */ /**************************/ /* VisualObjectSequence ()*/ /**************************/ status = BitstreamPutGT16Bits(stream, 32, SESSION_START_CODE); /* Determine profile_level */ status = BitstreamPutBits(stream, 8, video->encParams->ProfileLevel[video->currLayer]); /******************/ /* VisualObject() */ /******************/ status = BitstreamPutGT16Bits(stream, 32, VISUAL_OBJECT_START_CODE); status = BitstreamPut1Bits(stream, 0x00); /* visual object identifier */ status = BitstreamPutBits(stream, 4, 0x01); /* visual object Type == "video ID" */ status = BitstreamPut1Bits(stream, 0x00); /* no video signal type */ /*temp = */ BitstreamMpeg4ByteAlignStuffing(stream); status = BitstreamPutGT16Bits(stream, 27, VO_START_CODE);/* byte align: should be 2 bits */ status = BitstreamPutBits(stream, 5, 0x00);/* Video ID = 0 */ /**********************/ /* VideoObjectLayer() */ /**********************/ if (currVol->shortVideoHeader == 0) { /* M4V else Short Video Header */ status = BitstreamPutGT16Bits(stream, VOL_START_CODE_LENGTH, VOL_START_CODE); status = BitstreamPutBits(stream, 4, currVol->volID);/* video_object_layer_id */ status = BitstreamPut1Bits(stream, 0x00);/* Random Access = 0 */ if (video->currLayer == 0) status = BitstreamPutBits(stream, 8, 0x01);/* Video Object Type Indication = 1 ... Simple Object Type */ else status = BitstreamPutBits(stream, 8, 0x02);/* Video Object Type Indication = 2 ... Simple Scalable Object Type */ status = BitstreamPut1Bits(stream, 0x00);/* is_object_layer_identifer = 0 */ status = BitstreamPutBits(stream, 4, 0x01); /* aspect_ratio_info = 1 ... 1:1(Square) */ status = BitstreamPut1Bits(stream, 0x00);/* vol_control_parameters = 0 */ status = BitstreamPutBits(stream, 2, 0x00);/* video_object_layer_shape = 00 ... rectangular */ status = BitstreamPut1Bits(stream, 0x01);/* marker bit */ status = BitstreamPutGT8Bits(stream, 16, currVol->timeIncrementResolution);/* vop_time_increment_resolution */ status = BitstreamPut1Bits(stream, 0x01);/* marker bit */ status = BitstreamPut1Bits(stream, currVol->fixedVopRate);/* fixed_vop_rate = 0 */ /* For Rectangular VO layer shape */ status = BitstreamPut1Bits(stream, 0x01);/* marker bit */ status = BitstreamPutGT8Bits(stream, 13, currVol->width);/* video_object_layer_width */ status = BitstreamPut1Bits(stream, 0x01);/* marker bit */ status = BitstreamPutGT8Bits(stream, 13, currVol->height);/* video_object_layer_height */ status = BitstreamPut1Bits(stream, 0x01);/*marker bit */ status = BitstreamPut1Bits(stream, 0x00);/*interlaced = 0 */ status = BitstreamPut1Bits(stream, 0x01);/* obmc_disable = 1 */ status = BitstreamPut1Bits(stream, 0x00);/* sprite_enable = 0 */ status = BitstreamPut1Bits(stream, 0x00);/* not_8_bit = 0 */ status = BitstreamPut1Bits(stream, currVol->quantType);/* quant_type */ if (currVol->quantType) { status = BitstreamPut1Bits(stream, currVol->loadIntraQuantMat); /* Intra quant matrix */ if (currVol->loadIntraQuantMat) { for (j = 63; j >= 1; j--) if (currVol->iqmat[*(zigzag_i+j)] != currVol->iqmat[*(zigzag_i+j-1)]) break; if ((j == 1) && (currVol->iqmat[*(zigzag_i+j)] == currVol->iqmat[*(zigzag_i+j-1)])) j = 0; for (i = 0; i < j + 1; i++) BitstreamPutBits(stream, 8, currVol->iqmat[*(zigzag_i+i)]); if (j < 63) BitstreamPutBits(stream, 8, 0); } else { for (j = 0; j < 64; j++) currVol->iqmat[j] = mpeg_iqmat_def[j]; } status = BitstreamPut1Bits(stream, currVol->loadNonIntraQuantMat); /* Non-Intra quant matrix */ if (currVol->loadNonIntraQuantMat) { for (j = 63; j >= 1; j--) if (currVol->niqmat[*(zigzag_i+j)] != currVol->niqmat[*(zigzag_i+j-1)]) break; if ((j == 1) && (currVol->niqmat[*(zigzag_i+j)] == currVol->niqmat[*(zigzag_i+j-1)])) j = 0; for (i = 0; i < j + 1; i++) BitstreamPutBits(stream, 8, currVol->niqmat[*(zigzag_i+i)]); if (j < 63) BitstreamPutBits(stream, 8, 0); } else { for (j = 0; j < 64; j++) currVol->niqmat[j] = mpeg_nqmat_def[j]; } } status = BitstreamPut1Bits(stream, 0x01); /* complexity_estimation_disable = 1 */ status = BitstreamPut1Bits(stream, currVol->ResyncMarkerDisable);/* Resync_marker_disable */ status = BitstreamPut1Bits(stream, currVol->dataPartitioning);/* Data partitioned */ if (currVol->dataPartitioning) status = BitstreamPut1Bits(stream, currVol->useReverseVLC); /* Reversible_vlc */ if (currVol->scalability) /* Scalability*/ { status = BitstreamPut1Bits(stream, currVol->scalability);/* Scalability = 1 */ status = BitstreamPut1Bits(stream, currVol->scalType);/* hierarchy _type ... Spatial= 0 and Temporal = 1 */ status = BitstreamPutBits(stream, 4, currVol->refVolID);/* ref_layer_id */ status = BitstreamPut1Bits(stream, currVol->refSampDir);/* ref_layer_sampling_direc*/ status = BitstreamPutBits(stream, 5, currVol->horSamp_n);/*hor_sampling_factor_n*/ status = BitstreamPutBits(stream, 5, currVol->horSamp_m);/*hor_sampling_factor_m*/ status = BitstreamPutBits(stream, 5, currVol->verSamp_n);/*vert_sampling_factor_n*/ status = BitstreamPutBits(stream, 5, currVol->verSamp_m);/*vert_sampling_factor_m*/ status = BitstreamPut1Bits(stream, currVol->enhancementType);/* enhancement_type*/ } else /* No Scalability */ status = BitstreamPut1Bits(stream, currVol->scalability);/* Scalability = 0 */ /*temp = */ BitstreamMpeg4ByteAlignStuffing(stream); /* Byte align Headers for VOP */ } } return status; } /* ======================================================================== */ /* Function : VOS_End() */ /* Date : 08/22/2000 */ /* Purpose : Visual Object Sequence End */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* */ /* ======================================================================== */ PV_STATUS VOS_End(VideoEncControls *encoderControl) { PV_STATUS status = PV_SUCCESS; VideoEncData *video = (VideoEncData *)encoderControl->videoEncoderData; Vol *currVol = video->vol[video->currLayer]; BitstreamEncVideo *stream = currVol->stream; status = BitstreamPutBits(stream, SESSION_END_CODE, 32); return status; } /* ======================================================================== */ /* Function : DetermineCodingLayer */ /* Date : 06/02/2001 */ /* Purpose : Find layer to code based on current mod time, assuming that it's time to encode enhanced layer. */ /* In/out : */ /* Return : Number of layer to code. */ /* Modified : */ /* */ /* ======================================================================== */ Int DetermineCodingLayer(VideoEncData *video, Int *nLayer, ULong modTime) { Vol **vol = video->vol; VideoEncParams *encParams = video->encParams; Int numLayers = encParams->nLayers; UInt modTimeRef = video->modTimeRef; float *LayerFrameRate = encParams->LayerFrameRate; UInt frameNum[4], frameTick; ULong frameModTime, nextFrmModTime; #ifdef REDUCE_FRAME_VARIANCE /* To limit how close 2 frames can be */ float frameInterval; #endif float srcFrameInterval; Int frameInc; Int i, extra_skip; Int encodeVop = 0; i = numLayers - 1; if (modTime - video->nextModTime > ((ULong)(-1)) >> 1) /* next time wrapped around */ return 0; /* not time to code it yet */ video->relLayerCodeTime[i] -= 1000; video->nextEncIVop--; /* number of Vops in highest layer resolution. */ video->numVopsInGOP++; /* from this point frameModTime and nextFrmModTime are internal */ frameNum[i] = (UInt)((modTime - modTimeRef) * LayerFrameRate[i] + 500) / 1000; if (video->volInitialize[i]) { video->prevFrameNum[i] = frameNum[i] - 1; } else if (frameNum[i] <= video->prevFrameNum[i]) { return 0; /* do not encode this frame */ } /**** this part computes expected next frame *******/ frameModTime = (ULong)(((frameNum[i] * 1000) / LayerFrameRate[i]) + modTimeRef + 0.5); /* rec. time */ nextFrmModTime = (ULong)((((frameNum[i] + 1) * 1000) / LayerFrameRate[i]) + modTimeRef + 0.5); /* rec. time */ srcFrameInterval = 1000 / video->FrameRate; video->nextModTime = nextFrmModTime - (ULong)(srcFrameInterval / 2.) - 1; /* between current and next frame */ #ifdef REDUCE_FRAME_VARIANCE /* To limit how close 2 frames can be */ frameInterval = 1000 / LayerFrameRate[i]; /* next rec. time */ delta = (Int)(frameInterval / 4); /* empirical number */ if (video->nextModTime - modTime < (ULong)delta) /* need to move nextModTime further. */ { video->nextModTime += ((delta - video->nextModTime + modTime)); /* empirical formula */ } #endif /****************************************************/ /* map frame no.to tick from modTimeRef */ /*frameTick = (frameNum[i]*vol[i]->timeIncrementResolution) ; frameTick = (UInt)((frameTick + (encParams->LayerFrameRate[i]/2))/encParams->LayerFrameRate[i]);*/ /* 11/16/01, change frameTick to be the closest tick from the actual modTime */ /* 12/12/02, add (double) to prevent large number wrap-around */ frameTick = (Int)(((double)(modTime - modTimeRef) * vol[i]->timeIncrementResolution + 500) / 1000); /* find timeIncrement to be put in the bitstream */ /* refTick is second boundary reference. */ vol[i]->timeIncrement = frameTick - video->refTick[i]; vol[i]->moduloTimeBase = 0; while (vol[i]->timeIncrement >= vol[i]->timeIncrementResolution) { vol[i]->timeIncrement -= vol[i]->timeIncrementResolution; vol[i]->moduloTimeBase++; /* do not update refTick and modTimeRef yet, do it after encoding!! */ } if (video->relLayerCodeTime[i] <= 0) /* no skipping */ { encodeVop = 1; video->currLayer = *nLayer = i; video->relLayerCodeTime[i] += 1000; /* takes care of more dropped frame than expected */ extra_skip = -1; frameInc = (frameNum[i] - video->prevFrameNum[i]); extra_skip += frameInc; if (extra_skip > 0) { /* update rc->Nr, rc->B, (rc->Rr)*/ video->nextEncIVop -= extra_skip; video->numVopsInGOP += extra_skip; if (encParams->RC_Type != CONSTANT_Q) { RC_UpdateBuffer(video, i, extra_skip); } } } /* update frame no. */ video->prevFrameNum[i] = frameNum[i]; /* go through all lower layer */ for (i = (numLayers - 2); i >= 0; i--) { video->relLayerCodeTime[i] -= 1000; /* find timeIncrement to be put in the bitstream */ vol[i]->timeIncrement = frameTick - video->refTick[i]; if (video->relLayerCodeTime[i] <= 0) /* time to encode base */ { /* 12/27/00 */ encodeVop = 1; video->currLayer = *nLayer = i; video->relLayerCodeTime[i] += (Int)((1000.0 * encParams->LayerFrameRate[numLayers-1]) / encParams->LayerFrameRate[i]); vol[i]->moduloTimeBase = 0; while (vol[i]->timeIncrement >= vol[i]->timeIncrementResolution) { vol[i]->timeIncrement -= vol[i]->timeIncrementResolution; vol[i]->moduloTimeBase++; /* do not update refTick and modTimeRef yet, do it after encoding!! */ } /* takes care of more dropped frame than expected */ frameNum[i] = (UInt)((frameModTime - modTimeRef) * encParams->LayerFrameRate[i] + 500) / 1000; if (video->volInitialize[i]) video->prevFrameNum[i] = frameNum[i] - 1; extra_skip = -1; frameInc = (frameNum[i] - video->prevFrameNum[i]); extra_skip += frameInc; if (extra_skip > 0) { /* update rc->Nr, rc->B, (rc->Rr)*/ if (encParams->RC_Type != CONSTANT_Q) { RC_UpdateBuffer(video, i, extra_skip); } } /* update frame no. */ video->prevFrameNum[i] = frameNum[i]; } } #ifdef _PRINT_STAT if (encodeVop) printf(" TI: %d ", vol[*nLayer]->timeIncrement); #endif return encodeVop; } /* ======================================================================== */ /* Function : DetermineVopType */ /* Date : 06/02/2001 */ /* Purpose : The name says it all. */ /* In/out : */ /* Return : void . */ /* Modified : */ /* */ /* ======================================================================== */ void DetermineVopType(VideoEncData *video, Int currLayer) { VideoEncParams *encParams = video->encParams; // Vol *currVol = video->vol[currLayer]; if (encParams->IntraPeriod == 0) /* I-VOPs only */ { if (video->currLayer > 0) video->currVop->predictionType = P_VOP; else { video->currVop->predictionType = I_VOP; if (video->numVopsInGOP >= 132) video->numVopsInGOP = 0; } } else if (encParams->IntraPeriod == -1) /* IPPPPP... */ { /* maintain frame type if previous frame is pre-skipped, 06/02/2001 */ if (encParams->RC_Type == CONSTANT_Q || video->rc[currLayer]->skip_next_frame != -1) video->currVop->predictionType = P_VOP; if (video->currLayer == 0) { if (/*video->numVopsInGOP>=132 || */video->volInitialize[currLayer]) { video->currVop->predictionType = I_VOP; video->numVopsInGOP = 0; /* force INTRA update every 132 base frames*/ video->nextEncIVop = 1; } else if (video->nextEncIVop == 0 || video->currVop->predictionType == I_VOP) { video->numVopsInGOP = 0; video->nextEncIVop = 1; } } } else /* IntraPeriod>0 : IPPPPPIPPPPPI... */ { /* maintain frame type if previous frame is pre-skipped, 06/02/2001 */ if (encParams->RC_Type == CONSTANT_Q || video->rc[currLayer]->skip_next_frame != -1) video->currVop->predictionType = P_VOP; if (currLayer == 0) { if (video->nextEncIVop <= 0 || video->currVop->predictionType == I_VOP) { video->nextEncIVop = encParams->IntraPeriod; video->currVop->predictionType = I_VOP; video->numVopsInGOP = 0; } } } return ; } /* ======================================================================== */ /* Function : UpdateSkipNextFrame */ /* Date : 06/02/2001 */ /* Purpose : From rate control frame skipping decision, update timing related parameters. */ /* In/out : */ /* Return : Current coded layer. */ /* Modified : */ /* */ /* ======================================================================== */ Int UpdateSkipNextFrame(VideoEncData *video, ULong *modTime, Int *size, PV_STATUS status) { Int currLayer = video->currLayer; Int nLayer = currLayer; VideoEncParams *encParams = video->encParams; Int numLayers = encParams->nLayers; Vol *currVol = video->vol[currLayer]; Vol **vol = video->vol; Int num_skip, extra_skip; Int i; UInt newRefTick, deltaModTime; UInt temp; if (encParams->RC_Type != CONSTANT_Q) { if (video->volInitialize[0] && currLayer == 0) /* always encode the first frame */ { RC_ResetSkipNextFrame(video, currLayer); //return currLayer; 09/15/05 } else { if (RC_GetSkipNextFrame(video, currLayer) < 0 || status == PV_END_OF_BUF) /* Skip Current Frame */ { #ifdef _PRINT_STAT printf("Skip current frame"); #endif currVol->moduloTimeBase = currVol->prevModuloTimeBase; /*********************/ /* prepare to return */ /*********************/ *size = 0; /* Set Bitstream buffer to zero */ /* Determine nLayer and modTime for next encode */ *modTime = video->nextModTime; nLayer = -1; return nLayer; /* return immediately without updating RefTick & modTimeRef */ /* If I-VOP was attempted, then ensure next base is I-VOP */ /*if((encParams->IntraPeriod>0) && (video->currVop->predictionType == I_VOP)) video->nextEncIVop = 0; commented out by 06/05/01 */ } else if ((num_skip = RC_GetSkipNextFrame(video, currLayer)) > 0) { #ifdef _PRINT_STAT printf("Skip next %d frames", num_skip); #endif /* to keep the Nr of enh layer the same */ /* adjust relLayerCodeTime only, do not adjust layerCodeTime[numLayers-1] */ extra_skip = 0; for (i = 0; i < currLayer; i++) { if (video->relLayerCodeTime[i] <= 1000) { extra_skip = 1; break; } } for (i = currLayer; i < numLayers; i++) { video->relLayerCodeTime[i] += (num_skip + extra_skip) * ((Int)((1000.0 * encParams->LayerFrameRate[numLayers-1]) / encParams->LayerFrameRate[i])); } } }/* first frame */ } /***** current frame is encoded, now update refTick ******/ video->refTick[currLayer] += vol[currLayer]->prevModuloTimeBase * vol[currLayer]->timeIncrementResolution; /* Reset layerCodeTime every I-VOP to prevent overflow */ if (currLayer == 0) { /* 12/12/02, fix for weird targer frame rate of 9.99 fps or 3.33 fps */ if (((encParams->IntraPeriod != 0) /*&& (video->currVop->predictionType==I_VOP)*/) || ((encParams->IntraPeriod == 0) && (video->numVopsInGOP == 0))) { newRefTick = video->refTick[0]; for (i = 1; i < numLayers; i++) { if (video->refTick[i] < newRefTick) newRefTick = video->refTick[i]; } /* check to make sure that the update is integer multiple of frame number */ /* how many msec elapsed from last modTimeRef */ deltaModTime = (newRefTick / vol[0]->timeIncrementResolution) * 1000; for (i = numLayers - 1; i >= 0; i--) { temp = (UInt)(deltaModTime * encParams->LayerFrameRate[i]); /* 12/12/02 */ if (temp % 1000) newRefTick = 0; } if (newRefTick > 0) { video->modTimeRef += deltaModTime; for (i = numLayers - 1; i >= 0; i--) { video->prevFrameNum[i] -= (UInt)(deltaModTime * encParams->LayerFrameRate[i]) / 1000; video->refTick[i] -= newRefTick; } } } } *modTime = video->nextModTime; return nLayer; } #ifndef ORIGINAL_VERSION /* ======================================================================== */ /* Function : SetProfile_BufferSize */ /* Date : 04/08/2002 */ /* Purpose : Set profile and video buffer size, copied from Jim's code */ /* in PVInitVideoEncoder(.), since we have different places */ /* to reset profile and video buffer size */ /* In/out : */ /* Return : */ /* Modified : */ /* */ /* ======================================================================== */ Bool SetProfile_BufferSize(VideoEncData *video, float delay, Int bInitialized) { Int i, j, start, end; // Int BaseMBsPerSec = 0, EnhMBsPerSec = 0; Int nTotalMB = 0; Int idx, temp_w, temp_h, max = 0, max_width, max_height; Int nLayers = video->encParams->nLayers; /* Number of Layers to be encoded */ Int total_bitrate = 0, base_bitrate; Int total_packet_size = 0, base_packet_size; Int total_MBsPerSec = 0, base_MBsPerSec; Int total_VBV_size = 0, base_VBV_size, enhance_VBV_size = 0; float total_framerate, base_framerate; float upper_bound_ratio; Int bFound = 0; Int k = 0, width16, height16, index; Int lowest_level; #define MIN_BUFF 16000 /* 16k minimum buffer size */ #define BUFF_CONST 2.0 /* 2000ms */ #define UPPER_BOUND_RATIO 8.54 /* upper_bound = 1.4*(1.1+bound/10)*bitrate/framerate */ #define QCIF_WIDTH 176 #define QCIF_HEIGHT 144 index = video->encParams->profile_table_index; /* Calculate "nTotalMB" */ /* Find the maximum width*height for memory allocation of the VOPs */ for (idx = 0; idx < nLayers; idx++) { temp_w = video->encParams->LayerWidth[idx]; temp_h = video->encParams->LayerHeight[idx]; if ((temp_w*temp_h) > max) { max = temp_w * temp_h; max_width = temp_w; max_height = temp_h; nTotalMB = ((max_width + 15) >> 4) * ((max_height + 15) >> 4); } } upper_bound_ratio = (video->encParams->RC_Type == CBR_LOWDELAY ? (float)5.0 : (float)UPPER_BOUND_RATIO); /* Get the basic information: bitrate, packet_size, MBs/s and VBV_size */ base_bitrate = video->encParams->LayerBitRate[0]; if (video->encParams->LayerMaxBitRate[0] != 0) /* video->encParams->LayerMaxBitRate[0] == 0 means it has not been set */ { base_bitrate = PV_MAX(base_bitrate, video->encParams->LayerMaxBitRate[0]); } else /* if the max is not set, set it to the specified profile/level */ { video->encParams->LayerMaxBitRate[0] = profile_level_max_bitrate[index]; } base_framerate = video->encParams->LayerFrameRate[0]; if (video->encParams->LayerMaxFrameRate[0] != 0) { base_framerate = PV_MAX(base_framerate, video->encParams->LayerMaxFrameRate[0]); } else /* if the max is not set, set it to the specified profile/level */ { video->encParams->LayerMaxFrameRate[0] = (float)profile_level_max_mbsPerSec[index] / nTotalMB; } base_packet_size = video->encParams->ResyncPacketsize; base_MBsPerSec = (Int)(base_framerate * nTotalMB); base_VBV_size = PV_MAX((Int)(base_bitrate * delay), (Int)(upper_bound_ratio * base_bitrate / base_framerate)); base_VBV_size = PV_MAX(base_VBV_size, MIN_BUFF); /* if the buffer is larger than maximum buffer size, we'll clip it */ if (base_VBV_size > profile_level_max_VBV_size[SIMPLE_PROFILE_LEVEL5]) base_VBV_size = profile_level_max_VBV_size[SIMPLE_PROFILE_LEVEL5]; /* Check if the buffer exceeds the maximum buffer size given the maximum profile and level */ if (nLayers == 1 && base_VBV_size > profile_level_max_VBV_size[index]) return FALSE; if (nLayers == 2) /* check both enhanced and base layer */ { total_bitrate = video->encParams->LayerBitRate[1]; if (video->encParams->LayerMaxBitRate[1] != 0) { total_bitrate = PV_MIN(total_bitrate, video->encParams->LayerMaxBitRate[1]); } else /* if the max is not set, set it to the specified profile/level */ { video->encParams->LayerMaxBitRate[1] = scalable_profile_level_max_bitrate[index]; } total_framerate = video->encParams->LayerFrameRate[1]; if (video->encParams->LayerMaxFrameRate[1] != 0) { total_framerate = PV_MIN(total_framerate, video->encParams->LayerMaxFrameRate[1]); } else /* if the max is not set, set it to the specified profile/level */ { video->encParams->LayerMaxFrameRate[1] = (float)scalable_profile_level_max_mbsPerSec[index] / nTotalMB; } total_packet_size = video->encParams->ResyncPacketsize; total_MBsPerSec = (Int)(total_framerate * nTotalMB); enhance_VBV_size = PV_MAX((Int)((total_bitrate - base_bitrate) * delay), (Int)(upper_bound_ratio * (total_bitrate - base_bitrate) / (total_framerate - base_framerate))); enhance_VBV_size = PV_MAX(enhance_VBV_size, MIN_BUFF); total_VBV_size = base_VBV_size + enhance_VBV_size; /* if the buffer is larger than maximum buffer size, we'll clip it */ if (total_VBV_size > scalable_profile_level_max_VBV_size[CORE_SCALABLE_PROFILE_LEVEL3 - MAX_BASE_PROFILE - 1]) { total_VBV_size = scalable_profile_level_max_VBV_size[CORE_SCALABLE_PROFILE_LEVEL3 - MAX_BASE_PROFILE - 1]; enhance_VBV_size = total_VBV_size - base_VBV_size; } /* Check if the buffer exceeds the maximum buffer size given the maximum profile and level */ if (total_VBV_size > scalable_profile_level_max_VBV_size[index]) return FALSE; } if (!bInitialized) /* Has been initialized --> profile @ level has been figured out! */ { video->encParams->BufferSize[0] = base_VBV_size; if (nLayers > 1) video->encParams->BufferSize[1] = enhance_VBV_size; return PV_TRUE; } /* Profile @ level determination */ if (nLayers == 1) { /* check other parameters */ /* BASE ONLY : Simple Profile(SP) Or Core Profile(CP) */ if (base_bitrate > profile_level_max_bitrate[index] || base_packet_size > profile_level_max_packet_size[index] || base_MBsPerSec > profile_level_max_mbsPerSec[index] || base_VBV_size > profile_level_max_VBV_size[index]) return PV_FALSE; /* Beyond the bound of Core Profile @ Level2 */ /* For H263/Short header, determine k*16384 */ /* This part only applies to Short header mode, but not H.263 */ width16 = ((video->encParams->LayerWidth[0] + 15) >> 4) << 4; height16 = ((video->encParams->LayerHeight[0] + 15) >> 4) << 4; if (video->encParams->H263_Enabled) { k = 4; if (width16 == 2*QCIF_WIDTH && height16 == 2*QCIF_HEIGHT) /* CIF */ k = 16; else if (width16 == 4*QCIF_WIDTH && height16 == 4*QCIF_HEIGHT) /* 4CIF */ k = 32; else if (width16 == 8*QCIF_WIDTH && height16 == 8*QCIF_HEIGHT) /* 16CIF */ k = 64; video->encParams->maxFrameSize = k * 16384; /* Make sure the buffer size is limited to the top profile and level: the SPL5 */ if (base_VBV_size > (Int)(k*16384 + 4*(float)profile_level_max_bitrate[SIMPLE_PROFILE_LEVEL5]*1001.0 / 30000.0)) base_VBV_size = (Int)(k * 16384 + 4 * (float)profile_level_max_bitrate[SIMPLE_PROFILE_LEVEL5] * 1001.0 / 30000.0); if (base_VBV_size > (Int)(k*16384 + 4*(float)profile_level_max_bitrate[index]*1001.0 / 30000.0)) return PV_FALSE; } /* Search the appropriate profile@level index */ if (!video->encParams->H263_Enabled && (video->encParams->IntraDCVlcThr != 0 || video->encParams->SearchRange > 16)) { lowest_level = SIMPLE_PROFILE_LEVEL1; /* cannot allow SPL0 */ } else { lowest_level = SIMPLE_PROFILE_LEVEL0; /* SPL0 */ } for (i = lowest_level; i <= index; i++) { /* Since CPL1 is smaller than SPL4A, SPL5, this search favors Simple Profile. */ if (base_bitrate <= profile_level_max_bitrate[i] && base_packet_size <= profile_level_max_packet_size[i] && base_MBsPerSec <= profile_level_max_mbsPerSec[i] && base_VBV_size <= (video->encParams->H263_Enabled ? (Int)(k*16384 + 4*(float)profile_level_max_bitrate[i]*1001.0 / 30000.0) : profile_level_max_VBV_size[i])) break; } if (i > index) return PV_FALSE; /* Nothing found!! */ /* Found out the actual profile @ level : index "i" */ if (i == 0) { /* For Simple Profile @ Level 0, we need to do one more check: image size <= QCIF */ if (width16 > QCIF_WIDTH || height16 > QCIF_HEIGHT) i = 1; /* image size > QCIF, then set SP level1 */ } video->encParams->ProfileLevel[0] = profile_level_code[i]; video->encParams->BufferSize[0] = base_VBV_size; if (video->encParams->LayerMaxBitRate[0] == 0) video->encParams->LayerMaxBitRate[0] = profile_level_max_bitrate[i]; if (video->encParams->LayerMaxFrameRate[0] == 0) video->encParams->LayerMaxFrameRate[0] = PV_MIN(30, (float)profile_level_max_mbsPerSec[i] / nTotalMB); /* For H263/Short header, one special constraint for VBV buffer size */ if (video->encParams->H263_Enabled) video->encParams->BufferSize[0] = (Int)(k * 16384 + 4 * (float)profile_level_max_bitrate[i] * 1001.0 / 30000.0); } else { /* SCALABALE MODE: Simple Scalable Profile(SSP) Or Core Scalable Profile(CSP) */ if (total_bitrate > scalable_profile_level_max_bitrate[index] || total_packet_size > scalable_profile_level_max_packet_size[index] || total_MBsPerSec > scalable_profile_level_max_mbsPerSec[index] || total_VBV_size > scalable_profile_level_max_VBV_size[index]) return PV_FALSE; /* Beyond given profile and level */ /* One-time check: Simple Scalable Profile or Core Scalable Profile */ if (total_bitrate <= scalable_profile_level_max_bitrate[CORE_SCALABLE_PROFILE_LEVEL1 - MAX_BASE_PROFILE - 1] && total_packet_size <= scalable_profile_level_max_packet_size[CORE_SCALABLE_PROFILE_LEVEL1 - MAX_BASE_PROFILE - 1] && total_MBsPerSec <= scalable_profile_level_max_mbsPerSec[CORE_SCALABLE_PROFILE_LEVEL1 - MAX_BASE_PROFILE - 1] && total_VBV_size <= scalable_profile_level_max_VBV_size[CORE_SCALABLE_PROFILE_LEVEL1 - MAX_BASE_PROFILE - 1]) { start = 0; end = index; } else { start = CORE_SCALABLE_PROFILE_LEVEL1 - MAX_BASE_PROFILE - 1; end = index; } /* Search the scalable profile */ for (i = start; i <= end; i++) { if (total_bitrate <= scalable_profile_level_max_bitrate[i] && total_packet_size <= scalable_profile_level_max_packet_size[i] && total_MBsPerSec <= scalable_profile_level_max_mbsPerSec[i] && total_VBV_size <= scalable_profile_level_max_VBV_size[i]) break; } if (i > end) return PV_FALSE; /* Search for matching base profile */ if (i == 0) { j = 0; bFound = 1; } else bFound = 0; if (i >= CORE_SCALABLE_PROFILE_LEVEL1 - MAX_BASE_PROFILE - 1) { start = CORE_PROFILE_LEVEL1; /* range for CORE PROFILE */ end = CORE_PROFILE_LEVEL2; } else { start = SIMPLE_PROFILE_LEVEL0; /* range for SIMPLE PROFILE */ end = SIMPLE_PROFILE_LEVEL5; } for (j = start; !bFound && j <= end; j++) { if (base_bitrate <= profile_level_max_bitrate[j] && base_packet_size <= profile_level_max_packet_size[j] && base_MBsPerSec <= profile_level_max_mbsPerSec[j] && base_VBV_size <= profile_level_max_VBV_size[j]) { bFound = 1; break; } } if (!bFound) // && start == 4) return PV_FALSE; /* mis-match in the profiles between base layer and enhancement layer */ /* j for base layer, i for enhancement layer */ video->encParams->ProfileLevel[0] = profile_level_code[j]; video->encParams->ProfileLevel[1] = scalable_profile_level_code[i]; video->encParams->BufferSize[0] = base_VBV_size; video->encParams->BufferSize[1] = enhance_VBV_size; if (video->encParams->LayerMaxBitRate[0] == 0) video->encParams->LayerMaxBitRate[0] = profile_level_max_bitrate[j]; if (video->encParams->LayerMaxBitRate[1] == 0) video->encParams->LayerMaxBitRate[1] = scalable_profile_level_max_bitrate[i]; if (video->encParams->LayerMaxFrameRate[0] == 0) video->encParams->LayerMaxFrameRate[0] = PV_MIN(30, (float)profile_level_max_mbsPerSec[j] / nTotalMB); if (video->encParams->LayerMaxFrameRate[1] == 0) video->encParams->LayerMaxFrameRate[1] = PV_MIN(30, (float)scalable_profile_level_max_mbsPerSec[i] / nTotalMB); } /* end of: if(nLayers == 1) */ if (!video->encParams->H263_Enabled && (video->encParams->ProfileLevel[0] == 0x08)) /* SPL0 restriction*/ { /* PV only allow frame-based rate control, no QP change from one MB to another if(video->encParams->ACDCPrediction == TRUE && MB-based rate control) return PV_FALSE */ } return PV_TRUE; } #endif /* #ifndef ORIGINAL_VERSION */ ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/mp4enc_lib.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef _MP4ENC_LIB_H_ #define _MP4ENC_LIB_H_ #include "mp4def.h" // typedef #include "mp4lib_int.h" // main video structure #ifdef __cplusplus extern "C" { #endif /* defined in vop.c */ PV_STATUS EncodeVop(VideoEncData *video); PV_STATUS EncodeSlice(VideoEncData *video); PV_STATUS EncodeVideoPacketHeader(VideoEncData *video, int MB_number, int quant_scale, Int insert); #ifdef ALLOW_VOP_NOT_CODED PV_STATUS EncodeVopNotCoded(VideoEncData *video, UChar *bstream, Int *size, ULong modTime); #endif /* defined in combined_decode.c */ PV_STATUS EncodeFrameCombinedMode(VideoEncData *video); PV_STATUS EncodeSliceCombinedMode(VideoEncData *video); /* defined in datapart_decode.c */ PV_STATUS EncodeFrameDataPartMode(VideoEncData *video); PV_STATUS EncodeSliceDataPartMode(VideoEncData *video); /* defined in fastcodeMB.c */ //void m4v_memset(void *adr_dst, uint8 value, uint32 size); PV_STATUS CodeMB_H263(VideoEncData *video, approxDCT *function, Int offsetQP, Int ncoefblck[]); #ifndef NO_MPEG_QUANT PV_STATUS CodeMB_MPEG(VideoEncData *video, approxDCT *function, Int offsetQP, Int ncoefblck[]); #endif Int getBlockSAV(Short block[]); Int Sad8x8(UChar *rec, UChar *prev, Int lx); Int getBlockSum(UChar *rec, Int lx); /* defined in dct.c */ void blockIdct(Short *block); void blockIdct_SSE(Short *input); void BlockDCTEnc(Short *blockData, Short *blockCoeff); /*---- FastQuant.c -----*/ Int cal_dc_scalerENC(Int QP, Int type) ; Int BlockQuantDequantH263Inter(Short *rcoeff, Short *qcoeff, struct QPstruct *QuantParam, UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz, Int dctMode, Int comp, Int dummy, UChar shortHeader); Int BlockQuantDequantH263Intra(Short *rcoeff, Short *qcoeff, struct QPstruct *QuantParam, UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz, Int dctMode, Int comp, Int dc_scaler, UChar shortHeader); Int BlockQuantDequantH263DCInter(Short *rcoeff, Short *qcoeff, struct QPstruct *QuantParam, UChar *bitmaprow, UInt *bitmapzz, Int dummy, UChar shortHeader); Int BlockQuantDequantH263DCIntra(Short *rcoeff, Short *qcoeff, struct QPstruct *QuantParam, UChar *bitmaprow, UInt *bitmapzz, Int dc_scaler, UChar shortHeader); #ifndef NO_MPEG_QUANT Int BlockQuantDequantMPEGInter(Short *rcoeff, Short *qcoeff, Int QP, Int *qmat, UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz, Int DctMode, Int comp, Int dc_scaler); Int BlockQuantDequantMPEGIntra(Short *rcoeff, Short *qcoeff, Int QP, Int *qmat, UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz, Int DctMode, Int comp, Int dc_scaler); Int BlockQuantDequantMPEGDCInter(Short *rcoeff, Short *qcoeff, Int QP, Int *qmat, UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz, Int dummy); Int BlockQuantDequantMPEGDCIntra(Short *rcoeff, Short *qcoeff, Int QP, Int *qmat, UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz, Int dc_scaler); #endif /*---- FastIDCT.c -----*/ void BlockIDCTMotionComp(Short *block, UChar *bitmapcol, UChar bitmaprow, Int dctMode, UChar *rec, UChar *prev, Int lx_intra_zeroMV); /* defined in motion_comp.c */ void getMotionCompensatedMB(VideoEncData *video, Int ind_x, Int ind_y, Int offset); void EncPrediction_INTER(Int xpred, Int ypred, UChar *c_prev, UChar *c_rec, Int width, Int round1); void EncPrediction_INTER4V(Int xpred, Int ypred, MOT *mot, UChar *c_prev, UChar *c_rec, Int width, Int round1); void EncPrediction_Chrom(Int xpred, Int ypred, UChar *cu_prev, UChar *cv_prev, UChar *cu_rec, UChar *cv_rec, Int pitch_uv, Int width_uv, Int height_uv, Int round1); void get_MB(UChar *c_prev, UChar *c_prev_u , UChar *c_prev_v, Short mb[6][64], Int width, Int width_uv); void PutSkippedBlock(UChar *rec, UChar *prev, Int lx); /* defined in motion_est.c */ void MotionEstimation(VideoEncData *video); #ifdef HTFM void InitHTFM(VideoEncData *video, HTFM_Stat *htfm_stat, double *newvar, Int *collect); void UpdateHTFM(VideoEncData *video, double *newvar, double *exp_lamda, HTFM_Stat *htfm_stat); #endif /* defined in ME_utils.c */ void ChooseMode_C(UChar *Mode, UChar *cur, Int lx, Int min_SAD); void ChooseMode_MMX(UChar *Mode, UChar *cur, Int lx, Int min_SAD); void GetHalfPelMBRegion_C(UChar *cand, UChar *hmem, Int lx); void GetHalfPelMBRegion_SSE(UChar *cand, UChar *hmem, Int lx); void GetHalfPelBlkRegion(UChar *cand, UChar *hmem, Int lx); void PaddingEdge(Vop *padVop); void ComputeMBSum_C(UChar *cur, Int lx, MOT *mot_mb); void ComputeMBSum_MMX(UChar *cur, Int lx, MOT *mot_mb); void ComputeMBSum_SSE(UChar *cur, Int lx, MOT *mot_mb); void GetHalfPelMBRegionPadding(UChar *ncand, UChar *hmem, Int lx, Int *reptl); void GetHalfPelBlkRegionPadding(UChar *ncand, UChar *hmem, Int lx, Int *reptl); /* defined in findhalfpel.c */ void FindHalfPelMB(VideoEncData *video, UChar *cur, MOT *mot, UChar *ncand, Int xpos, Int ypos, Int *xhmin, Int *yhmin, Int hp_guess); Int FindHalfPelBlk(VideoEncData *video, UChar *cur, MOT *mot, Int sad16, UChar *ncand8[], UChar *mode, Int xpos, Int ypos, Int *xhmin, Int *yhmin, UChar *hp_mem); /* defined in sad.c */ Int SAD_MB_HalfPel_Cxhyh(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info); Int SAD_MB_HalfPel_Cyh(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info); Int SAD_MB_HalfPel_Cxh(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info); Int SAD_MB_HalfPel_MMX(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info); Int SAD_MB_HalfPel_SSE(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info); Int SAD_Blk_HalfPel_C(UChar *ref, UChar *blk, Int dmin, Int lx, Int rx, Int xh, Int yh, void *extra_info); Int SAD_Blk_HalfPel_MMX(UChar *ref, UChar *blk, Int dmin, Int lx, void *extra_info); Int SAD_Blk_HalfPel_SSE(UChar *ref, UChar *blk, Int dmin, Int lx, void *extra_info); Int SAD_Macroblock_C(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info); Int SAD_Macroblock_MMX(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info); Int SAD_Macroblock_SSE(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info); Int SAD_Block_C(UChar *ref, UChar *blk, Int dmin, Int lx, void *extra_info); Int SAD_Block_MMX(UChar *ref, UChar *blk, Int dmin, Int lx, void *extra_info); Int SAD_Block_SSE(UChar *ref, UChar *blk, Int dmin, Int lx, void *extra_info); #ifdef HTFM /* Hypothesis Testing Fast Matching */ Int SAD_MB_HP_HTFM_Collectxhyh(UChar *ref, UChar *blk, Int dmin_x, void *extra_info); Int SAD_MB_HP_HTFM_Collectyh(UChar *ref, UChar *blk, Int dmin_x, void *extra_info); Int SAD_MB_HP_HTFM_Collectxh(UChar *ref, UChar *blk, Int dmin_x, void *extra_info); Int SAD_MB_HP_HTFMxhyh(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info); Int SAD_MB_HP_HTFMyh(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info); Int SAD_MB_HP_HTFMxh(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info); Int SAD_MB_HTFM_Collect(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info); Int SAD_MB_HTFM(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info); #endif /* on-the-fly padding */ Int SAD_Blk_PADDING(UChar *ref, UChar *cur, Int dmin, Int lx, void *extra_info); Int SAD_MB_PADDING(UChar *ref, UChar *cur, Int dmin, Int lx, void *extra_info); #ifdef HTFM Int SAD_MB_PADDING_HTFM_Collect(UChar *ref, UChar *blk, Int dmin, Int lx, void *extra_info); Int SAD_MB_PADDING_HTFM(UChar *ref, UChar *blk, Int dmin, Int lx, void *extra_info); #endif /* defined in rate_control.c */ /* These are APIs to rate control exposed to core encoder module. */ PV_STATUS RC_Initialize(void *video); PV_STATUS RC_VopQPSetting(VideoEncData *video, rateControl *rc[]); PV_STATUS RC_VopUpdateStat(VideoEncData *video, rateControl *rc); PV_STATUS RC_MBQPSetting(VideoEncData *video, rateControl *rc, Int start_packet_header); PV_STATUS RC_MBUpdateStat(VideoEncData *video, rateControl *rc, Int Bi, Int Hi); PV_STATUS RC_Cleanup(rateControl *rc[], Int numLayers); Int RC_GetSkipNextFrame(VideoEncData *video, Int currLayer); Int RC_GetRemainingVops(VideoEncData *video, Int currLayer); void RC_ResetSkipNextFrame(VideoEncData *video, Int currLayer); PV_STATUS RC_UpdateBuffer(VideoEncData *video, Int currLayer, Int num_skip); PV_STATUS RC_UpdateBXRCParams(void *input); /* defined in vlc_encode.c */ void MBVlcEncodeDataPar_I_VOP(VideoEncData *video, Int ncoefblck[], void *blkCodePtr); void MBVlcEncodeDataPar_P_VOP(VideoEncData *video, Int ncoefblck[], void *blkCodePtr); void MBVlcEncodeCombined_I_VOP(VideoEncData *video, Int ncoefblck[], void *blkCodePtr); void MBVlcEncodeCombined_P_VOP(VideoEncData *video, Int ncoefblck[], void *blkCodePtr); void BlockCodeCoeff_ShortHeader(RunLevelBlock *RLB, BitstreamEncVideo *bs, Int j_start, Int j_stop, UChar Mode); void BlockCodeCoeff_RVLC(RunLevelBlock *RLB, BitstreamEncVideo *bs, Int j_start, Int j_stop, UChar Mode); void BlockCodeCoeff_Normal(RunLevelBlock *RLB, BitstreamEncVideo *bs, Int j_start, Int j_stop, UChar Mode); #ifdef __cplusplus } #endif #endif /* _MP4ENC_LIB_H_ */ ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/mp4lib_int.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef _MP4LIB_INT_H_ #define _MP4LIB_INT_H_ #include "oscl_types.h" #include "mp4def.h" #include "mp4enc_api.h" #include "rate_control.h" /* BitstreamEncVideo will be modified */ typedef struct tagBitstream { Int(*writeVideoPacket)(UChar *buf, Int nbytes_required); /*write video packet out */ UChar *bitstreamBuffer; /*buffer to hold one video packet*/ Int bufferSize; /*total bitstream buffer size in bytes */ Int byteCount; /*how many bytes already encoded*/ UInt word; /*hold one word temporarily */ Int bitLeft; /*number of bits left in "word" */ UChar* overrunBuffer; /* pointer to overrun buffer */ Int oBSize; /* length of overrun buffer */ struct tagVideoEncData *video; } BitstreamEncVideo; typedef struct tagVOP { PIXEL *yChan; /* The Y component */ PIXEL *uChan; /* The U component */ PIXEL *vChan; /* The V component */ Int frame; /* frame number */ Int volID; /* Layer number */ //Int timeStamp; /* Vop TimeStamp in msec */ /* Syntax elements copied from VOL (standard) */ Int width; /* Width (multiple of 16) */ Int height; /* Height (multiple of 16) */ Int pitch; /* Pitch (differs from width for UMV case) */ Int padded; /* flag whether this frame has been padded */ /* Actual syntax elements for VOP (standard) */ Int predictionType; /* VOP prediction type */ Int timeInc; /* VOP time increment (relative to last mtb) */ Int vopCoded; Int roundingType; Int intraDCVlcThr; Int quantizer; /* VOP quantizer */ Int fcodeForward; /* VOP dynamic range of motion vectors */ Int fcodeBackward; /* VOP dynamic range of motion vectors */ Int refSelectCode; /* enhancement layer reference select code */ /* H.263 parameters */ Int gobNumber; Int gobFrameID; Int temporalRef; /* temporal reference, roll over at 256 */ Int temporalInterval; /* increase every 256 temporalRef */ } Vop; typedef struct tagVol { Int volID; /* VOL identifier (for tracking) */ Int shortVideoHeader; /* shortVideoHeader mode */ Int GOVStart; /* Insert GOV Header */ Int timeIncrementResolution; /* VOL time increment */ Int nbitsTimeIncRes; /* number of bits for time increment */ Int timeIncrement; /* time increment */ Int moduloTimeBase; /* internal decoder clock */ Int prevModuloTimeBase; /* in case of pre-frameskip */ Int fixedVopRate; BitstreamEncVideo *stream; /* library bitstream buffer (input buffer) */ /* VOL Dimensions */ Int width; /* Width */ Int height; /* Height */ /* Error Resilience Flags */ Int ResyncMarkerDisable; /* VOL Disable Resynch Markers */ Int useReverseVLC; /* VOL reversible VLCs */ Int dataPartitioning; /* VOL data partitioning */ /* Quantization related parameters */ Int quantPrecision; /* Quantizer precision */ Int quantType; /* MPEG-4 or H.263 Quantization Type */ /* Added loaded quant mat, 05/22/2000 */ Int loadIntraQuantMat; /* Load intra quantization matrix */ Int loadNonIntraQuantMat; /* Load nonintra quantization matrix */ Int iqmat[64]; /* Intra quant.matrix */ Int niqmat[64]; /* Non-intra quant.matrix */ /* Parameters used for scalability */ Int scalability; /* VOL scalability (flag) */ Int scalType; /* temporal = 0, spatial = 1, both = 2 */ Int refVolID; /* VOL id of reference VOL */ Int refSampDir; /* VOL resol. of ref. VOL */ Int horSamp_n; /* VOL hor. resampling of ref. VOL given by */ Int horSamp_m; /* sampfac = hor_samp_n/hor_samp_m */ Int verSamp_n; /* VOL ver. resampling of ref. VOL given by */ Int verSamp_m; /* sampfac = ver_samp_n/ver_samp_m */ Int enhancementType; /* VOL type of enhancement layer */ /* These variables were added since they are used a lot. */ Int nMBPerRow, nMBPerCol; /* number of MBs in each row & column */ Int nTotalMB; Int nBitsForMBID; /* how many bits required for MB number? */ /* for short video header */ Int nMBinGOB; /* number of MBs in GOB, 05/22/00 */ Int nGOBinVop; /* number of GOB in Vop 05/22/00 */ } Vol; typedef struct tagMacroBlock { Int mb_x; /* X coordinate */ Int mb_y; /* Y coordinate */ Short block[9][64]; /* 4-Y, U and V blocks , and AAN Scale*/ } MacroBlock; typedef struct tagRunLevelBlock { Int run[64]; /* Runlength */ Int level[64]; /* Abs(level) */ Int s[64]; /* sign level */ } RunLevelBlock; typedef struct tagHeaderInfoDecVideo { UChar *Mode; /* Modes INTRA/INTER/etc. */ UChar *CBP; /* MCBPC/CBPY stuff */ } HeaderInfoEncVideo; typedef Short typeDCStore[6]; /* ACDC */ typedef Short typeDCACStore[4][8]; typedef struct tagMOT { Int x; /* half-pel resolution x component */ Int y; /* half-pel resolution y component */ Int sad; /* SAD */ } MOT; typedef struct tagHintTrackInfo { UChar MTB; UChar LayerID; UChar CodeType; UChar RefSelCode; } HintTrackInfo; typedef struct tagVideoEncParams { //Int Width; /* Input Width */ //Int Height; /* Input Height */ //float FrameRate; /* Input Frame Rate */ UInt TimeIncrementRes; /* timeIncrementRes */ /*VOL Parameters */ Int nLayers; Int LayerWidth[4]; /* Encoded Width */ Int LayerHeight[4]; /* Encoded Height */ float LayerFrameRate[4]; /* Encoded Frame Rate */ Int LayerBitRate[4]; /* Encoded BitRate */ Int LayerMaxBitRate[4]; /* Maximum Encoded BitRate */ float LayerMaxFrameRate[4]; /* Maximum Encoded Frame Rate */ Int LayerMaxMbsPerSec[4]; /* Maximum mbs per second, according to the specified profile and level */ Int LayerMaxBufferSize[4]; /* Maximum buffer size, according to the specified profile and level */ Bool ResyncMarkerDisable; /* Disable Resync Marker */ Bool DataPartitioning; /* Base Layer Data Partitioning */ Bool ReversibleVLC; /* RVLC when Data Partitioning */ Bool ACDCPrediction; /* AC/DC Prediction */ Int QuantType[4]; /* H263, MPEG2 */ Int InitQuantBvop[4]; Int InitQuantPvop[4]; Int InitQuantIvop[4]; Int ResyncPacketsize; Int RoundingType; Int IntraDCVlcThr; /* Rate Control Parameters */ MP4RateControlType RC_Type; /*Constant Q, M4 constantRate, VM5+, M4RC,MPEG2TM5 */ /* Intra Refresh Parameters */ Int IntraPeriod; /* Intra update period */ Int Refresh; /* Number of MBs refresh in each frame */ /* Other Parameters */ Bool SceneChange_Det; /* scene change detection */ Bool FineFrameSkip_Enabled; /* src rate resolution frame skipping */ Bool VBR_Enabled; /* VBR rate control */ Bool NoFrameSkip_Enabled; /* do not allow frame skip */ Bool NoPreSkip_Enabled; /* do not allow pre-skip */ Bool H263_Enabled; /* H263 Short Header */ Bool GOV_Enabled; /* GOV Header Enabled */ Bool SequenceStartCode; /* This probably should be removed */ Bool FullSearch_Enabled; /* full-pel exhaustive search motion estimation */ Bool HalfPel_Enabled; /* Turn Halfpel ME on or off */ Bool MV8x8_Enabled; /* Enable 8x8 motion vectors */ Bool RD_opt_Enabled; /* Enable operational R-D optimization */ Int GOB_Header_Interval; /* Enable encoding GOB header in H263_WITH_ERR_RES and SHORT_HERDER_WITH_ERR_RES */ Int SearchRange; /* Search range for 16x16 motion vector */ Int MemoryUsage; /* Amount of memory allocated */ Int GetVolHeader[2]; /* Flag to check if Vol Header has been retrieved */ Int BufferSize[2]; /* Buffer Size for Base and Enhance Layers */ Int ProfileLevel[2]; /* Profile and Level for encoding purposes */ float VBV_delay; /* VBV buffer size in the form of delay */ Int maxFrameSize; /* maximum frame size(bits) for H263/Short header mode, k*16384 */ Int profile_table_index; /* index for profile and level tables given the specified profile and level */ } VideoEncParams; /* platform dependent functions */ typedef struct tagFuncPtr { // Int (*SAD_MB_HalfPel)(UChar *ref,UChar *blk,Int dmin_lx,Int xh,Int yh,void *extra_info); Int(*SAD_MB_HalfPel[4])(UChar*, UChar*, Int, void *); Int(*SAD_Blk_HalfPel)(UChar *ref, UChar *blk, Int dmin, Int lx, Int rx, Int xh, Int yh, void *extra_info); Int(*SAD_Macroblock)(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info); Int(*SAD_Block)(UChar *ref, UChar *blk, Int dmin, Int lx, void *extra_info); Int(*SAD_MB_PADDING)(UChar *ref, UChar *blk, Int dmin, Int lx, void *extra_info); /*, 4/21/01 */ void (*ComputeMBSum)(UChar *cur, Int lx, MOT *mot_mb); void (*ChooseMode)(UChar *Mode, UChar *cur, Int lx, Int min_SAD); void (*GetHalfPelMBRegion)(UChar *cand, UChar *hmem, Int lx); void (*blockIdct)(Int *block); } FuncPtr; /* 04/09/01, for multipass rate control */ typedef struct tagRDInfo { Int QP; Int actual_bits; float mad; float R_D; } RDInfo; typedef struct tagMultiPass { /* multipass rate control data */ Int target_bits; /* target bits for current frame, = rc->T */ Int actual_bits; /* actual bits for current frame obtained after encoding, = rc->Rc*/ Int QP; /* quantization level for current frame, = rc->Qc*/ Int prev_QP; /* quantization level for previous frame */ Int prev_prev_QP; /* quantization level for previous frame before last*/ float mad; /* mad for current frame, = video->avgMAD*/ Int bitrate; /* bitrate for current frame */ float framerate; /* framerate for current frame*/ Int nRe_Quantized; /* control variable for multipass encoding, */ /* 0 : first pass */ /* 1 : intermediate pass(quantization and VLC loop only) */ /* 2 : final pass(de-quantization, idct, etc) */ /* 3 : macroblock level rate control */ Int encoded_frames; /* counter for all encoded frames */ Int re_encoded_frames; /* counter for all multipass encoded frames*/ Int re_encoded_times; /* counter for all times of multipass frame encoding */ /* Multiple frame prediction*/ RDInfo **pRDSamples; /* pRDSamples[30][32], 30->30fps, 32 -> 5 bit quantizer, 32 candidates*/ Int framePos; /* specific position in previous multiple frames*/ Int frameRange; /* number of overall previous multiple frames */ Int samplesPerFrame[30]; /* number of samples per frame, 30->30fps */ /* Bit allocation for scene change frames and high motion frames */ float sum_mad; Int counter_BTsrc; /* BT = Bit Transfer, bit transfer from low motion frames or less complicatedly compressed frames */ Int counter_BTdst; /* BT = Bit Transfer, bit transfer to scene change frames or high motion frames or more complicatedly compressed frames */ float sum_QP; Int diff_counter; /* diff_counter = -diff_counter_BTdst, or diff_counter_BTsrc */ /* For target bitrate or framerate update */ float target_bits_per_frame; /* = C = bitrate/framerate */ float target_bits_per_frame_prev; /* previous C */ float aver_mad; /* so-far average mad could replace sum_mad */ float aver_mad_prev; /* previous average mad */ Int overlapped_win_size; /* transition period of time */ Int encoded_frames_prev; /* previous encoded_frames */ } MultiPass; /* End */ #ifdef HTFM typedef struct tagHTFM_Stat { Int abs_dif_mad_avg; UInt countbreak; Int offsetArray[16]; Int offsetRef[16]; } HTFM_Stat; #endif /* Global structure that can be passed around */ typedef struct tagVideoEncData { /* VOL Header Initialization */ UChar volInitialize[4]; /* Used to Write VOL Headers */ /* Data For Layers (Scalability) */ Int numberOfLayers; /* Number of Layers */ Vol **vol; /* Data stored for each VOL */ /* Data used for encoding frames */ VideoEncFrameIO *input; /* original input frame */ Vop *currVop; /* Current reconstructed VOP */ Vop *prevBaseVop; /* Previous reference Base Vop */ Vop *nextBaseVop; /* Next reference Base Vop */ Vop *prevEnhanceVop;/* Previous Enhancement Layer Vop */ Vop *forwardRefVop; /* Forward Reference VOP */ Vop *backwardRefVop; /* Backward Reference VOP */ /* scratch memory */ BitstreamEncVideo *bitstream1; /* Used for data partitioning */ BitstreamEncVideo *bitstream2; /* and combined modes as */ BitstreamEncVideo *bitstream3; /* intermediate storages */ UChar *overrunBuffer; /* extra output buffer to prevent current skip due to output buffer overrun*/ Int oBSize; /* size of allocated overrun buffer */ Int dc_scalar_1; /*dc scalar for Y block */ Int dc_scalar_2; /*dc scalar for U, V block*/ /* Annex L Rate Control */ rateControl *rc[4]; /* Pointer to Rate Control structure*/ /* 12/25/00, each R.C. for each layer */ /********* motion compensation related variables ****************/ MOT **mot; /* Motion vectors */ /* where [mbnum][0] = 1MV. [mbnum][1...4] = 4MVs [mbnum][5] = backward MV. [mbnum][6] = delta MV for direct mode. [mbnum][7] = nothing yet. */ UChar *intraArray; /* Intra Update Arrary */ float sumMAD; /* SAD/MAD for frame */ /* to speedup the SAD calculation */ void *sad_extra_info; #ifdef HTFM Int nrmlz_th[48]; /* Threshold for fast SAD calculation using HTFM */ HTFM_Stat htfm_stat; /* For statistics collection */ #endif /*Tao 04/09/00 For DCT routine */ UChar currYMB[256]; /* interleaved current macroblock in HTFM order */ MacroBlock *outputMB; /* Output MB to VLC encode */ UChar predictedMB[384]; /* scrath memory for predicted value */ RunLevelBlock RLB[6]; /* Run and Level of coefficients! */ Short dataBlock[128]; /* DCT block data before and after quant/dequant*/ UChar bitmaprow[8]; /* Need to keep it for ACDCPrediction, 8 bytes for alignment, need only 6 */ UChar bitmapcol[6][8]; UInt bitmapzz[6][2]; /* for zigzag bitmap */ Int zeroMV; /* flag for zero MV */ Int usePrevQP; /* flag for intraDCVlcThreshold switch decision */ Int QP_prev; /* use for DQUANT calculation */ Int *acPredFlag; /* */ typeDCStore *predDC; /* The DC coeffs for each MB */ typeDCACStore *predDCAC_row; typeDCACStore *predDCAC_col; UChar *sliceNo; /* Slice Number for each MB */ Int header_bits; /* header bits in frmae */ HeaderInfoEncVideo headerInfo; /* MB Header information */ UChar zz_direction; /* direction of zigzag scan */ UChar *QPMB; /* Quantizer value for each MB */ /* Miscellaneous data points to be passed */ float FrameRate; /* Src frame Rate */ ULong nextModTime; /* expected next frame time */ UInt prevFrameNum[4]; /* previous frame number starting from modTimeRef */ UInt modTimeRef; /* Reference modTime update every I-Vop*/ UInt refTick[4]; /* second aligned referenc tick */ Int relLayerCodeTime[4];/* Next coding time for each Layer relative to highest layer */ ULong modTime; /* Input frame modTime */ Int currLayer; /* Current frame layer */ Int mbnum; /* Macroblock number */ /* slice coding, state variables */ Vop *tempForwRefVop; Int tempRefSelCode; Int end_of_buf; /* end of bitstream buffer flag */ Int slice_coding; /* flag for slice based coding */ Int totalSAD; /* So far total SAD for a frame */ Int numIntra; /* So far number of Intra MB */ Int offset; /* So far MB offset */ Int ind_x, ind_y; /* So far MB coordinate */ Int collect; Int hp_guess; /*********************************/ HintTrackInfo hintTrackInfo; /* hintTrackInfo */ /* IntraPeriod, Timestamp, etc. */ float nextEncIVop; /* counter til the next I-Vop */ float numVopsInGOP; /* value at the beginning of nextEncIVop */ /* platform dependent functions */ FuncPtr *functionPointer; /* structure containing platform dependent functions */ /* Application controls */ VideoEncControls *videoEncControls; VideoEncParams *encParams; MultiPass *pMP[4]; /* for multipass encoding, 4 represents 4 layer encoding */ } VideoEncData; /*************************************************************/ /* VLC structures */ /*************************************************************/ typedef struct tagVLCtable { unsigned int code; /* right justified */ int len; } VLCtable, *LPVLCtable; /*************************************************************/ /* Approx DCT */ /*************************************************************/ typedef struct struct_approxDCT approxDCT; struct struct_approxDCT { Void(*BlockDCT8x8)(Int *, Int *, UChar *, UChar *, Int, Int); Void(*BlockDCT8x8Intra)(Int *, Int *, UChar *, UChar *, Int, Int); Void(*BlockDCT8x8wSub)(Int *, Int *, UChar *, UChar *, Int, Int); }; /*************************************************************/ /* QP structure */ /*************************************************************/ struct QPstruct { Int QPx2 ; Int QP; Int QPdiv2; Int QPx2plus; Int Addition; }; #endif /* _MP4LIB_INT_H_ */ ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/pvm4vencoder.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2010 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "pvm4vencoder.h" #include "oscl_mem.h" #include "oscl_dll.h" OSCL_DLL_ENTRY_POINT_DEFAULT() /* ///////////////////////////////////////////////////////////////////////// */ CPVM4VEncoder::CPVM4VEncoder() { #if defined(RGB24_INPUT) || defined (RGB12_INPUT) || defined(YUV420SEMIPLANAR_INPUT) ccRGBtoYUV = NULL; #endif //iEncoderControl } /* ///////////////////////////////////////////////////////////////////////// */ OSCL_EXPORT_REF CPVM4VEncoder::~CPVM4VEncoder() { #if defined(RGB24_INPUT) || defined (RGB12_INPUT) || defined(YUV420SEMIPLANAR_INPUT) OSCL_DELETE(ccRGBtoYUV); #endif Cancel(); /* CTimer function */ Terminate(); } /* ///////////////////////////////////////////////////////////////////////// */ OSCL_EXPORT_REF CPVM4VEncoder* CPVM4VEncoder::New(int32 aThreadId) { CPVM4VEncoder* self = new CPVM4VEncoder; if (self && self->Construct(aThreadId)) return self; if (self) OSCL_DELETE(self); return NULL; } /* ///////////////////////////////////////////////////////////////////////// */ bool CPVM4VEncoder::Construct(int32 aThreadId) { oscl_memset((void *)&iEncoderControl, 0, sizeof(VideoEncControls)); iInitialized = false; iObserver = NULL; iNumOutputData = 0; iYUVIn = NULL; for (int i = 0; i < KCVEIMaxOutputBuffer; i++) { iOutputData[i] = NULL; } iState = EIdle; if (aThreadId >= 0) AddToScheduler(); return true; } /* ///////////////////////////////////////////////////////////////////////// */ void CPVM4VEncoder::DoCancel() { /* called when Cancel() is called.*/ // They use Stop for PVEngine.cpp in PVPlayer. return ; } /* ///////////////////////////////////////////////////////////////////////// */ OSCL_EXPORT_REF TCVEI_RETVAL CPVM4VEncoder::SetObserver(MPVCVEIObserver *aObserver) { iObserver = aObserver; return ECVEI_SUCCESS; } /* ///////////////////////////////////////////////////////////////////////// */ OSCL_EXPORT_REF TCVEI_RETVAL CPVM4VEncoder::AddBuffer(TPVVideoOutputData *aVidOut) { if (iNumOutputData >= KCVEIMaxOutputBuffer) { return ECVEI_FAIL; } iOutputData[iNumOutputData++] = aVidOut; return ECVEI_SUCCESS; } /* ///////////////////////////////////////////////////////////////////////// */ OSCL_EXPORT_REF TCVEI_RETVAL CPVM4VEncoder::Encode(TPVVideoInputData *aVidIn) { ULong modTime; VideoEncFrameIO vid_in; if (iState != EIdle || iObserver == NULL) { return ECVEI_FAIL; } if (aVidIn->iTimeStamp >= iNextModTime) { if (iVideoFormat == ECVEI_YUV420) #ifdef YUV_INPUT { if (iYUVIn) /* iSrcWidth is not multiple of 4 or iSrcHeight is odd number */ { CopyToYUVIn(aVidIn->iSource, iSrcWidth, iSrcHeight, ((iSrcWidth + 15) >> 4) << 4, ((iSrcHeight + 15) >> 4) << 4); iVideoIn = iYUVIn; } else /* otherwise, we can just use aVidIn->iSource */ { iVideoIn = aVidIn->iSource; } } #else return ECVEI_FAIL; #endif if ((iVideoFormat == ECVEI_RGB12) || (iVideoFormat == ECVEI_RGB24) || (iVideoFormat == ECVEI_YUV420SEMIPLANAR)) #if defined(RGB24_INPUT) || defined (RGB12_INPUT) || defined(YUV420SEMIPLANAR_INPUT) { ccRGBtoYUV->Convert(aVidIn->iSource, iYUVIn); iVideoIn = iYUVIn; } #else return ECVEI_FAIL; #endif /* assign with backward-P or B-Vop this timestamp must be re-ordered */ iTimeStamp = aVidIn->iTimeStamp; modTime = iTimeStamp; #ifdef NO_SLICE_ENCODE return ECVEI_FAIL; #else vid_in.height = ((iSrcHeight + 15) >> 4) << 4; vid_in.pitch = ((iSrcWidth + 15) >> 4) << 4; vid_in.timestamp = modTime; vid_in.yChan = (UChar*)iVideoIn; vid_in.uChan = (UChar*)(iVideoIn + vid_in.height * vid_in.pitch); vid_in.vChan = vid_in.uChan + ((vid_in.height * vid_in.pitch) >> 2); /*status = */ (int) PVEncodeFrameSet(&iEncoderControl, &vid_in, &modTime, &iNumLayer); #endif iState = EEncode; RunIfNotReady(); return ECVEI_SUCCESS; } else /* if(aVidIn->iTimeStamp >= iNextModTime) */ { iTimeStamp = aVidIn->iTimeStamp; iNumLayer = -1; iState = EEncode; RunIfNotReady(); return ECVEI_SUCCESS; } } /* ///////////////////////////////////////////////////////////////////////// */ void CPVM4VEncoder::Run() { #ifndef NO_SLICE_ENCODE //Bool status; Int Size, endOfFrame = 0; ULong modTime; int32 oindx; VideoEncFrameIO vid_out; switch (iState) { case EEncode: /* find available bitstream */ if (iNumOutputData <= 0) { iObserver->HandlePVCVEIEvent(iId, ECVEI_NO_BUFFERS); RunIfNotReady(50); break; } oindx = --iNumOutputData; /* last-in first-out */ Size = iOutputData[oindx]->iBitStreamSize; iOutputData[oindx]->iExternalTimeStamp = iTimeStamp; iOutputData[oindx]->iVideoTimeStamp = iTimeStamp; iOutputData[oindx]->iFrame = iVideoOut; if (iNumLayer == -1) { iOutputData[oindx]->iBitStreamSize = 0; iOutputData[oindx]->iLayerNumber = iNumLayer; iState = EIdle; iObserver->HandlePVCVEIEvent(iId, ECVEI_FRAME_DONE, (uint32)iOutputData[oindx]); break; } /*status = */ (int) PVEncodeSlice(&iEncoderControl, (UChar*)iOutputData[oindx]->iBitStream, &Size, &endOfFrame, &vid_out, &modTime); iOutputData[oindx]->iBitStreamSize = Size; iOutputData[oindx]->iLayerNumber = iNumLayer; if (endOfFrame != 0) /* done with this frame */ { iNextModTime = modTime; iOutputData[oindx]->iFrame = iVideoOut = (uint8*)vid_out.yChan; iOutputData[oindx]->iVideoTimeStamp = vid_out.timestamp; if (endOfFrame == -1) /* pre-skip */ { iOutputData[oindx]->iLayerNumber = -1; } else { PVGetHintTrack(&iEncoderControl, &(iOutputData[oindx]->iHintTrack)); } iState = EIdle; iObserver->HandlePVCVEIEvent(iId, ECVEI_FRAME_DONE, (uint32)iOutputData[oindx]); } else { RunIfNotReady(); iObserver->HandlePVCVEIEvent(iId, ECVEI_BUFFER_READY, (uint32)iOutputData[oindx]); } break; default: break; } #endif /* NO_SLICE_ENCODE */ return ; } TCVEI_RETVAL CPVM4VEncoder::ParseFSI(uint8* aFSIBuff, int FSILength, VideoEncOptions *aEncOption) { uint32 codeword; mp4StreamType *psBits; psBits = (mp4StreamType *) oscl_malloc(sizeof(mp4StreamType)); if (psBits == NULL) return ECVEI_FAIL; psBits->data = aFSIBuff; psBits->numBytes = FSILength; psBits->bitBuf = 0; psBits->bitPos = 32; psBits->bytePos = 0; psBits->dataBitPos = 0; //visual_object_sequence_start_code ShowBits(psBits, 32, &codeword); if (codeword == VISUAL_OBJECT_SEQUENCE_START_CODE) { psBits->dataBitPos += 32; psBits->bitPos += 32; ReadBits(psBits, 8, &codeword); /* profile_and_level_indication */ switch (codeword) { case 0x08: /* SP_LVL0 */ { aEncOption->profile_level = SIMPLE_PROFILE_LEVEL0; break; } case 0x01: /* SP_LVL1 */ { aEncOption->profile_level = SIMPLE_PROFILE_LEVEL1; break; } case 0x02: /* SP_LVL2 */ { aEncOption->profile_level = SIMPLE_PROFILE_LEVEL2; break; } case 0x03: /* SP_LVL3 */ { aEncOption->profile_level = SIMPLE_PROFILE_LEVEL3; break; } case 0x21: /* CP_LVL1 */ { aEncOption->profile_level = CORE_PROFILE_LEVEL1; break; } case 0x22: /* CP_LVL2 */ { aEncOption->profile_level = CORE_PROFILE_LEVEL2; break; } default: { goto FREE_PS_BITS_AND_FAIL; } } ShowBits(psBits, 32, &codeword); if (codeword == USER_DATA_START_CODE) { goto FREE_PS_BITS_AND_FAIL; } //visual_object_start_code ReadBits(psBits, 32, &codeword); if (codeword != VISUAL_OBJECT_START_CODE) goto FREE_PS_BITS_AND_FAIL; /* is_visual_object_identifier */ ReadBits(psBits, 1, &codeword); if (codeword) goto FREE_PS_BITS_AND_FAIL; /* visual_object_type */ ReadBits(psBits, 4, &codeword); if (codeword != 1) goto FREE_PS_BITS_AND_FAIL; /* video_signal_type */ ReadBits(psBits, 1, &codeword); if (codeword) goto FREE_PS_BITS_AND_FAIL; /* next_start_code() */ ByteAlign(psBits); ShowBits(psBits, 32, &codeword); if (codeword == USER_DATA_START_CODE) { goto FREE_PS_BITS_AND_FAIL; } } ShowBits(psBits, 27, &codeword); if (codeword == VO_START_CODE) { ReadBits(psBits, 32, &codeword); /* video_object_layer_start_code */ ShowBits(psBits, 28, &codeword); if (codeword != VOL_START_CODE) { ShowBits(psBits, 22, &codeword); if (codeword == SHORT_VIDEO_START_MARKER) { iDecodeShortHeader(psBits, aEncOption); return ECVEI_SUCCESS; } else { goto FREE_PS_BITS_AND_FAIL; } } /* video_object_layer_start_code */ ReadBits(psBits, 28, &codeword); /* vol_id (4 bits) */ ReadBits(psBits, 4, & codeword); // RandomAccessibleVOLFlag ReadBits(psBits, 1, &codeword); //Video Object Type Indication ReadBits(psBits, 8, &codeword); if (codeword > 2) { goto FREE_PS_BITS_AND_FAIL; } // is_object_layer_identifier ReadBits(psBits, 1, &codeword); if (codeword) goto FREE_PS_BITS_AND_FAIL; // aspect ratio ReadBits(psBits, 4, &codeword); if (codeword != 1) goto FREE_PS_BITS_AND_FAIL; //vol_control_parameters ReadBits(psBits, 1, &codeword); if (codeword != 0) goto FREE_PS_BITS_AND_FAIL; // video_object_layer_shape ReadBits(psBits, 2, &codeword); if (codeword != 0) goto FREE_PS_BITS_AND_FAIL; //Marker bit ReadBits(psBits, 1, &codeword); if (codeword != 1) goto FREE_PS_BITS_AND_FAIL; // vop_time_increment_resolution ReadBits(psBits, 16, &codeword); aEncOption->timeIncRes = codeword; //Marker bit ReadBits(psBits, 1, &codeword); if (codeword != 1) goto FREE_PS_BITS_AND_FAIL; // fixed_vop_rate ReadBits(psBits, 1, &codeword); if (codeword != 0) goto FREE_PS_BITS_AND_FAIL; /* video_object_layer_shape is RECTANGULAR */ //Marker bit ReadBits(psBits, 1, &codeword); if (codeword != 1) goto FREE_PS_BITS_AND_FAIL; /* this should be 176 for QCIF */ ReadBits(psBits, 13, &codeword); aEncOption->encWidth[0] = codeword; //Marker bit ReadBits(psBits, 1, &codeword); if (codeword != 1) goto FREE_PS_BITS_AND_FAIL; /* this should be 144 for QCIF */ ReadBits(psBits, 13, &codeword); aEncOption->encHeight[0] = codeword; //Marker bit ReadBits(psBits, 1, &codeword); if (codeword != 1) goto FREE_PS_BITS_AND_FAIL; //Interlaced ReadBits(psBits, 1, &codeword); if (codeword != 0) goto FREE_PS_BITS_AND_FAIL; //obmc_disable ReadBits(psBits, 1, &codeword); if (codeword != 1) goto FREE_PS_BITS_AND_FAIL; //sprite_enable ReadBits(psBits, 1, &codeword); if (codeword != 0) goto FREE_PS_BITS_AND_FAIL; //not_8_bit ReadBits(psBits, 1, &codeword); if (codeword != 0) goto FREE_PS_BITS_AND_FAIL; /* video_object_layer_shape is not GRAY_SCALE */ //quant_type ReadBits(psBits, 1, &codeword); aEncOption->quantType[0] = codeword; if (codeword != 0) //quant_type = 1 { ReadBits(psBits, 1, &codeword); //load_intra_quant_mat if (codeword) goto FREE_PS_BITS_AND_FAIL; // No support for user defined matrix. ReadBits(psBits, 1, &codeword); //load_nonintra_quant_mat if (codeword) goto FREE_PS_BITS_AND_FAIL; // No support for user defined matrix. } //complexity_estimation_disable ReadBits(psBits, 1, &codeword); if (!codeword) { goto FREE_PS_BITS_AND_FAIL; } //resync_marker_disable ReadBits(psBits, 1, &codeword); if (codeword) { aEncOption->packetSize = 0; } //data_partitioned ReadBits(psBits, 1, &codeword); if (codeword) { aEncOption->encMode = DATA_PARTITIONING_MODE; //reversible_vlc ReadBits(psBits, 1, &codeword); aEncOption->rvlcEnable = (ParamEncMode) codeword; } else { // No data_partitioned if (aEncOption->packetSize > 0) { aEncOption->encMode = COMBINE_MODE_WITH_ERR_RES; } else { aEncOption->encMode = COMBINE_MODE_NO_ERR_RES; } } //scalability ReadBits(psBits, 1, &codeword); if (codeword) goto FREE_PS_BITS_AND_FAIL; } else { /* SHORT_HEADER */ ShowBits(psBits, SHORT_VIDEO_START_MARKER_LENGTH, &codeword); if (codeword == SHORT_VIDEO_START_MARKER) { iDecodeShortHeader(psBits, aEncOption); } else { goto FREE_PS_BITS_AND_FAIL; } } return ECVEI_SUCCESS; FREE_PS_BITS_AND_FAIL: oscl_free(psBits); return ECVEI_FAIL; } int16 CPVM4VEncoder::iDecodeShortHeader(mp4StreamType *psBits, VideoEncOptions *aEncOption) { uint32 codeword; int *width, *height; //Default values aEncOption->quantType[0] = 0; aEncOption->rvlcEnable = PV_OFF; aEncOption->packetSize = 0; // Since, by default resync_marker_disable = 1; aEncOption->encMode = SHORT_HEADER; // NO error resilience width = &(aEncOption->encWidth[0]); height = &(aEncOption->encHeight[0]); //short_video_start_marker ShowBits(psBits, 22, &codeword); if (codeword != 0x20) { return ECVEI_FAIL; } FlushBits(psBits, 22); //temporal_reference ReadBits(psBits, 8, &codeword); //marker_bit ReadBits(psBits, 1, &codeword); if (codeword == 0) return ECVEI_FAIL; //zero_bit ReadBits(psBits, 1, &codeword); if (codeword == 1) return ECVEI_FAIL; //split_screen_indicator ReadBits(psBits, 1, &codeword); if (codeword == 1) return ECVEI_FAIL; //document_camera_indicator ReadBits(psBits, 1, &codeword); if (codeword == 1) return ECVEI_FAIL; //full_picture_freeze_release ReadBits(psBits, 1, &codeword); if (codeword == 1) return ECVEI_FAIL; /* source format */ ReadBits(psBits, 3, &codeword); switch (codeword) { case 1: *width = 128; *height = 96; break; case 2: *width = 176; *height = 144; break; case 3: *width = 352; *height = 288; break; case 4: *width = 704; *height = 576; break; case 5: *width = 1408; *height = 1152; break; default: return ECVEI_FAIL; } return 0; } int16 CPVM4VEncoder::ShowBits( mp4StreamType *pStream, /* Input Stream */ uint8 ucNBits, /* nr of bits to read */ uint32 *pulOutData /* output target */ ) { uint8 *bits; uint32 dataBitPos = pStream->dataBitPos; uint32 bitPos = pStream->bitPos; uint32 dataBytePos; uint i; if (ucNBits > (32 - bitPos)) /* not enough bits */ { dataBytePos = dataBitPos >> 3; /* Byte Aligned Position */ bitPos = dataBitPos & 7; /* update bit position */ if (dataBytePos > pStream->numBytes - 4) { pStream->bitBuf = 0; for (i = 0; i < pStream->numBytes - dataBytePos; i++) { pStream->bitBuf |= pStream->data[dataBytePos+i]; pStream->bitBuf <<= 8; } pStream->bitBuf <<= 8 * (3 - i); } else { bits = &pStream->data[dataBytePos]; pStream->bitBuf = (bits[0] << 24) | (bits[1] << 16) | (bits[2] << 8) | bits[3]; } pStream->bitPos = bitPos; } bitPos += ucNBits; *pulOutData = (pStream->bitBuf >> (32 - bitPos)) & MASK[(uint16)ucNBits]; return 0; } int16 CPVM4VEncoder::FlushBits( mp4StreamType *pStream, /* Input Stream */ uint8 ucNBits /* number of bits to flush */ ) { uint8 *bits; uint32 dataBitPos = pStream->dataBitPos; uint32 bitPos = pStream->bitPos; uint32 dataBytePos; if ((dataBitPos + ucNBits) > (uint32)(pStream->numBytes << 3)) return (-2); // Buffer over run dataBitPos += ucNBits; bitPos += ucNBits; if (bitPos > 32) { dataBytePos = dataBitPos >> 3; /* Byte Aligned Position */ bitPos = dataBitPos & 7; /* update bit position */ bits = &pStream->data[dataBytePos]; pStream->bitBuf = (bits[0] << 24) | (bits[1] << 16) | (bits[2] << 8) | bits[3]; } pStream->dataBitPos = dataBitPos; pStream->bitPos = bitPos; return 0; } int16 CPVM4VEncoder::ReadBits( mp4StreamType *pStream, /* Input Stream */ uint8 ucNBits, /* nr of bits to read */ uint32 *pulOutData /* output target */ ) { uint8 *bits; uint32 dataBitPos = pStream->dataBitPos; uint32 bitPos = pStream->bitPos; uint32 dataBytePos; if ((dataBitPos + ucNBits) > (pStream->numBytes << 3)) { *pulOutData = 0; return (-2); // Buffer over run } // dataBitPos += ucNBits; if (ucNBits > (32 - bitPos)) /* not enough bits */ { dataBytePos = dataBitPos >> 3; /* Byte Aligned Position */ bitPos = dataBitPos & 7; /* update bit position */ bits = &pStream->data[dataBytePos]; pStream->bitBuf = (bits[0] << 24) | (bits[1] << 16) | (bits[2] << 8) | bits[3]; } pStream->dataBitPos += ucNBits; pStream->bitPos = (unsigned char)(bitPos + ucNBits); *pulOutData = (pStream->bitBuf >> (32 - pStream->bitPos)) & MASK[(uint16)ucNBits]; return 0; } int16 CPVM4VEncoder::ByteAlign( mp4StreamType *pStream /* Input Stream */ ) { uint8 *bits; uint32 dataBitPos = pStream->dataBitPos; uint32 bitPos = pStream->bitPos; uint32 dataBytePos; uint32 leftBits; leftBits = 8 - (dataBitPos & 0x7); if (leftBits == 8) { if ((dataBitPos + 8) > (uint32)(pStream->numBytes << 3)) return (-2); // Buffer over run dataBitPos += 8; bitPos += 8; } else { dataBytePos = dataBitPos >> 3; dataBitPos += leftBits; bitPos += leftBits; } if (bitPos > 32) { dataBytePos = dataBitPos >> 3; /* Byte Aligned Position */ bits = &pStream->data[dataBytePos]; pStream->bitBuf = (bits[0] << 24) | (bits[1] << 16) | (bits[2] << 8) | bits[3]; } pStream->dataBitPos = dataBitPos; pStream->bitPos = bitPos; return 0; } /* ///////////////////////////////////////////////////////////////////////// */ OSCL_EXPORT_REF TCVEI_RETVAL CPVM4VEncoder::Initialize(TPVVideoInputFormat *aVidInFormat, TPVVideoEncodeParam *aEncParam) { int i; TCVEI_RETVAL status; MP4EncodingMode ENC_Mode ; ParamEncMode RvlcMode = PV_OFF; /* default no RVLC */ Int quantType[2] = {0, 0}; /* default H.263 quant*/ VideoEncOptions aEncOption; /* encoding options */ iState = EIdle ; // stop encoding iId = aEncParam->iEncodeID; iOverrunBuffer = NULL; iOBSize = 0; if (aEncParam->iContentType == ECVEI_STREAMING) { ENC_Mode = DATA_PARTITIONING_MODE; } else if (aEncParam->iContentType == ECVEI_DOWNLOAD) { if (aEncParam->iPacketSize > 0) { ENC_Mode = COMBINE_MODE_WITH_ERR_RES; } else { ENC_Mode = COMBINE_MODE_NO_ERR_RES; } } else if (aEncParam->iContentType == ECVEI_H263) { if (aEncParam->iPacketSize > 0) { ENC_Mode = H263_MODE_WITH_ERR_RES; } else { ENC_Mode = H263_MODE; } } else { return ECVEI_FAIL; } iSrcWidth = aVidInFormat->iFrameWidth; iSrcHeight = aVidInFormat->iFrameHeight; iSrcFrameRate = (int) aVidInFormat->iFrameRate; iVideoFormat = (TPVVideoFormat) aVidInFormat->iVideoFormat; iFrameOrientation = aVidInFormat->iFrameOrientation; if (iInitialized == true) /* clean up before re-initialized */ { /*status = */ (int) PVCleanUpVideoEncoder(&iEncoderControl); if (iYUVIn) { oscl_free(iYUVIn); iYUVIn = NULL; } } // allocate iYUVIn if (((iSrcWidth&0xF) || (iSrcHeight&0xF)) || iVideoFormat != ECVEI_YUV420) /* Not multiple of 16 */ { iYUVIn = (uint8*) oscl_malloc(((((iSrcWidth + 15) >> 4) * ((iSrcHeight + 15) >> 4)) * 3) << 7); if (iYUVIn == NULL) { return ECVEI_FAIL; } } // check the buffer delay according to the clip duration if (aEncParam->iClipDuration > 0 && aEncParam->iRateControlType == EVBR_1) { if (aEncParam->iBufferDelay > aEncParam->iClipDuration / 10000.0) //enforce 10% variation of the clip duration as the bound of buffer delay { aEncParam->iBufferDelay = aEncParam->iClipDuration / (float)10000.0; } } if (iVideoFormat == ECVEI_RGB24) { #ifdef RGB24_INPUT ccRGBtoYUV = CCRGB24toYUV420::New(); #else return ECVEI_FAIL; #endif } if (iVideoFormat == ECVEI_RGB12) { #ifdef RGB12_INPUT ccRGBtoYUV = CCRGB12toYUV420::New(); #else return ECVEI_FAIL; #endif } if (iVideoFormat == ECVEI_YUV420SEMIPLANAR) { #ifdef YUV420SEMIPLANAR_INPUT ccRGBtoYUV = CCYUV420SEMItoYUV420::New(); #else return ECVEI_FAIL; #endif } if ((iVideoFormat == ECVEI_RGB12) || (iVideoFormat == ECVEI_RGB24) || (iVideoFormat == ECVEI_YUV420SEMIPLANAR)) { #if defined(RGB24_INPUT) || defined (RGB12_INPUT) || defined (YUV420SEMIPLANAR_INPUT) ccRGBtoYUV->Init(iSrcWidth, iSrcHeight, iSrcWidth, iSrcWidth, iSrcHeight, ((iSrcWidth + 15) >> 4) << 4, (iFrameOrientation == 1 ? CCBOTTOM_UP : 0)); #endif } PVGetDefaultEncOption(&aEncOption, 0); /* iContentType is M4v && FSI Buffer is input parameter */ if ((aEncParam->iContentType != ECVEI_H263) && (aEncParam->iFSIBuffLength)) { aEncOption.encMode = ENC_Mode; aEncOption.packetSize = aEncParam->iPacketSize; aEncOption.numLayers = aEncParam->iNumLayer; status = ParseFSI(aEncParam->iFSIBuff, aEncParam->iFSIBuffLength, &aEncOption); if (ECVEI_FAIL == status) { return ECVEI_FAIL; } aEncOption.tickPerSrc = (int)(aEncOption.timeIncRes / aVidInFormat->iFrameRate + 0.5); for (i = 0; i < aEncParam->iNumLayer; i++) { aEncOption.encFrameRate[i] = iEncFrameRate[i] = aEncParam->iFrameRate[i]; aEncOption.bitRate[i] = aEncParam->iBitRate[i]; aEncOption.iQuant[i] = aEncParam->iIquant[i]; aEncOption.pQuant[i] = aEncParam->iPquant[i]; } if (aEncParam->iRateControlType == ECONSTANT_Q) aEncOption.rcType = CONSTANT_Q; else if (aEncParam->iRateControlType == ECBR_1) aEncOption.rcType = CBR_1; else if (aEncParam->iRateControlType == EVBR_1) aEncOption.rcType = VBR_1; else return ECVEI_FAIL; } else // All default Settings { aEncOption.encMode = ENC_Mode; aEncOption.packetSize = aEncParam->iPacketSize; Int profile_level = (Int)ECVEI_CORE_LEVEL2; if (aEncParam->iNumLayer > 1) profile_level = (Int)ECVEI_CORE_SCALABLE_LEVEL3; aEncOption.profile_level = (ProfileLevelType)profile_level; aEncOption.rvlcEnable = RvlcMode; aEncOption.numLayers = aEncParam->iNumLayer; aEncOption.timeIncRes = 1000; aEncOption.tickPerSrc = (int)(1000 / aVidInFormat->iFrameRate + 0.5); for (i = 0; i < aEncParam->iNumLayer; i++) { aEncOption.encWidth[i] = iEncWidth[i] = aEncParam->iFrameWidth[i]; aEncOption.encHeight[i] = iEncHeight[i] = aEncParam->iFrameHeight[i]; aEncOption.encFrameRate[i] = iEncFrameRate[i] = aEncParam->iFrameRate[i]; aEncOption.bitRate[i] = aEncParam->iBitRate[i]; aEncOption.iQuant[i] = aEncParam->iIquant[i]; aEncOption.pQuant[i] = aEncParam->iPquant[i]; aEncOption.quantType[i] = quantType[i]; /* default to H.263 */ } if (aEncParam->iRateControlType == ECONSTANT_Q) aEncOption.rcType = CONSTANT_Q; else if (aEncParam->iRateControlType == ECBR_1) aEncOption.rcType = CBR_1; else if (aEncParam->iRateControlType == EVBR_1) aEncOption.rcType = VBR_1; else return ECVEI_FAIL; // Check the bitrate, framerate, image size and buffer delay for 3GGP compliance #ifdef FOR_3GPP_COMPLIANCE Check3GPPCompliance(aEncParam, iEncWidth, iEncHeight); #endif } aEncOption.vbvDelay = (float)aEncParam->iBufferDelay; switch (aEncParam->iIFrameInterval) { case -1: aEncOption.intraPeriod = -1; break; case 0: aEncOption.intraPeriod = 0; break; default: aEncOption.intraPeriod = (int)(aEncParam->iIFrameInterval * aVidInFormat->iFrameRate); break; } aEncOption.numIntraMB = aEncParam->iNumIntraMBRefresh; aEncOption.sceneDetect = (aEncParam->iSceneDetection == true) ? PV_ON : PV_OFF; aEncOption.noFrameSkipped = (aEncParam->iNoFrameSkip == true) ? PV_ON : PV_OFF; aEncOption.searchRange = aEncParam->iSearchRange; aEncOption.mv8x8Enable = (aEncParam->iMV8x8 == true) ? PV_ON : PV_OFF; if (PV_FALSE == PVInitVideoEncoder(&iEncoderControl, &aEncOption)) { goto FAIL; } iNextModTime = 0; iInitialized = true; return ECVEI_SUCCESS; FAIL: iInitialized = false; return ECVEI_FAIL; } /* ///////////////////////////////////////////////////////////////////////// */ OSCL_EXPORT_REF int32 CPVM4VEncoder::GetBufferSize() { Int bufSize = 0; //PVGetVBVSize(&iEncoderControl,&bufSize); PVGetMaxVideoFrameSize(&iEncoderControl, &bufSize); return (int32) bufSize; } /* ///////////////////////////////////////////////////////////////////////// */ OSCL_EXPORT_REF int32 CPVM4VEncoder::GetEncodeWidth(int32 aLayer) { return (int32)iEncWidth[aLayer]; } OSCL_EXPORT_REF int32 CPVM4VEncoder::GetEncodeHeight(int32 aLayer) { return (int32)iEncHeight[aLayer]; } OSCL_EXPORT_REF float CPVM4VEncoder::GetEncodeFrameRate(int32 aLayer) { return iEncFrameRate[aLayer]; } OSCL_EXPORT_REF TCVEI_RETVAL CPVM4VEncoder::GetVolHeader(uint8 *volHeader, int32 *size, int32 layer) { Int aSize, aLayer = layer; if (iInitialized == false) /* has to be initialized first */ return ECVEI_FAIL; aSize = *size; if (PVGetVolHeader(&iEncoderControl, (UChar*)volHeader, &aSize, aLayer) == PV_FALSE) return ECVEI_FAIL; *size = aSize; return ECVEI_SUCCESS; } #ifdef PVAUTHOR_PROFILING #include "pvauthorprofile.h" #endif /* ///////////////////////////////////////////////////////////////////////// */ // Value of aRemainingBytes is relevant when overrun buffer is used and return value is ECVEI_MORE_OUTPUT OSCL_EXPORT_REF TCVEI_RETVAL CPVM4VEncoder::EncodeFrame(TPVVideoInputData *aVidIn, TPVVideoOutputData *aVidOut, int *aRemainingBytes #ifdef PVAUTHOR_PROFILING , void *aParam1 #endif ) { Bool status; Int Size; Int nLayer = 0; ULong modTime; VideoEncFrameIO vid_in, vid_out; *aRemainingBytes = 0; if (iState == EEncode && iOverrunBuffer) // more output buffer to be copied out. { if (iOBSize > aVidOut->iBitStreamSize) { oscl_memcpy(aVidOut->iBitStream, iOverrunBuffer, aVidOut->iBitStreamSize); iOBSize -= aVidOut->iBitStreamSize; iOverrunBuffer += aVidOut->iBitStreamSize; *aRemainingBytes = iOBSize; return ECVEI_MORE_OUTPUT; } else { oscl_memcpy(aVidOut->iBitStream, iOverrunBuffer, iOBSize); aVidOut->iBitStreamSize = iOBSize; iOverrunBuffer = NULL; iOBSize = 0; iState = EIdle; *aRemainingBytes = 0; return ECVEI_SUCCESS; } } if (aVidIn->iSource == NULL) { return ECVEI_FAIL; } if (aVidIn->iTimeStamp >= iNextModTime) /* time to encode */ { iState = EIdle; /* stop current encoding */ Size = aVidOut->iBitStreamSize; #ifdef PVAUTHOR_PROFILING if (aParam1)((CPVAuthorProfile*)aParam1)->Start(); #endif if (iVideoFormat == ECVEI_YUV420) #ifdef YUV_INPUT { if (iYUVIn) /* iSrcWidth or iSrcHeight is not multiple of 16 */ { CopyToYUVIn(aVidIn->iSource, iSrcWidth, iSrcHeight, ((iSrcWidth + 15) >> 4) << 4, ((iSrcHeight + 15) >> 4) << 4); iVideoIn = iYUVIn; } else /* otherwise, we can just use aVidIn->iSource */ { iVideoIn = aVidIn->iSource; } } #else return ECVEI_FAIL; #endif else if ((iVideoFormat == ECVEI_RGB12) || (iVideoFormat == ECVEI_RGB24) || (iVideoFormat == ECVEI_YUV420SEMIPLANAR)) #if defined(RGB24_INPUT) || defined (RGB12_INPUT) || defined(YUV420SEMIPLANAR_INPUT) { ccRGBtoYUV->Convert((uint8*)aVidIn->iSource, iYUVIn); iVideoIn = iYUVIn; } #else return ECVEI_FAIL; #endif #ifdef PVAUTHOR_PROFILING if (aParam1)((CPVAuthorProfile*)aParam1)->Stop(CPVAuthorProfile::EColorInput); #endif #ifdef PVAUTHOR_PROFILING if (aParam1)((CPVAuthorProfile*)aParam1)->Start(); #endif /* with backward-P or B-Vop this timestamp must be re-ordered */ aVidOut->iExternalTimeStamp = aVidIn->iTimeStamp; aVidOut->iVideoTimeStamp = aVidOut->iExternalTimeStamp; vid_in.height = ((iSrcHeight + 15) >> 4) << 4; vid_in.pitch = ((iSrcWidth + 15) >> 4) << 4; vid_in.timestamp = aVidIn->iTimeStamp; vid_in.yChan = (UChar*)iVideoIn; vid_in.uChan = (UChar*)(iVideoIn + vid_in.height * vid_in.pitch); vid_in.vChan = vid_in.uChan + ((vid_in.height * vid_in.pitch) >> 2); status = PVEncodeVideoFrame(&iEncoderControl, &vid_in, &vid_out, &modTime, (UChar*)aVidOut->iBitStream, &Size, &nLayer); if (status == PV_TRUE) { iNextModTime = modTime; aVidOut->iLayerNumber = nLayer; aVidOut->iFrame = iVideoOut = (uint8*)vid_out.yChan; aVidOut->iVideoTimeStamp = vid_out.timestamp; PVGetHintTrack(&iEncoderControl, &aVidOut->iHintTrack); #ifdef PVAUTHOR_PROFILING if (aParam1)((CPVAuthorProfile*)aParam1)->Stop(CPVAuthorProfile::EVideoEncode); #endif iOverrunBuffer = PVGetOverrunBuffer(&iEncoderControl); if (iOverrunBuffer != NULL && nLayer != -1) { oscl_memcpy(aVidOut->iBitStream, iOverrunBuffer, aVidOut->iBitStreamSize); iOBSize = Size - aVidOut->iBitStreamSize; iOverrunBuffer += aVidOut->iBitStreamSize; iState = EEncode; return ECVEI_MORE_OUTPUT; } else { aVidOut->iBitStreamSize = Size; return ECVEI_SUCCESS; } } else return ECVEI_FAIL; } else /* if(aVidIn->iTimeStamp >= iNextModTime) */ { aVidOut->iLayerNumber = -1; aVidOut->iBitStreamSize = 0; #ifdef PVAUTHOR_PROFILING if (aParam1)((CPVAuthorProfile*)aParam1)->AddVal (CPVAuthorProfile::EVidSkip, iNextModTime - aVidIn->iTimeStamp); #endif return ECVEI_SUCCESS; } } /* ///////////////////////////////////////////////////////////////////////// */ OSCL_EXPORT_REF TCVEI_RETVAL CPVM4VEncoder::FlushOutput(TPVVideoOutputData *aVidOut) { OSCL_UNUSED_ARG(aVidOut); return ECVEI_SUCCESS; } /* ///////////////////////////////////////////////////////////////////////// */ TCVEI_RETVAL CPVM4VEncoder::Terminate() { iState = EIdle; /* stop current encoding */ if (iInitialized == true) { PVCleanUpVideoEncoder(&iEncoderControl); iInitialized = false; } if (iYUVIn) { oscl_free(iYUVIn); iYUVIn = NULL; } return ECVEI_SUCCESS; } /* ///////////////////////////////////////////////////////////////////////// */ OSCL_EXPORT_REF TCVEI_RETVAL CPVM4VEncoder::UpdateBitRate(int32 aNumLayer, int32 *aBitRate) { #ifndef LIMITED_API Int i, bitRate[2] = {0, 0}; for (i = 0; i < aNumLayer; i++) { bitRate[i] = aBitRate[i]; } if (PVUpdateBitRate(&iEncoderControl, &bitRate[0]) == PV_TRUE) return ECVEI_SUCCESS; else #endif return ECVEI_FAIL; } /* ///////////////////////////////////////////////////////////////////////// */ OSCL_EXPORT_REF TCVEI_RETVAL CPVM4VEncoder::UpdateFrameRate(int32 aNumLayer, float *aFrameRate) { OSCL_UNUSED_ARG(aNumLayer); #ifndef LIMITED_API if (PVUpdateEncFrameRate(&iEncoderControl, aFrameRate) == PV_TRUE) return ECVEI_SUCCESS; else #else OSCL_UNUSED_ARG(aFrameRate); #endif return ECVEI_FAIL; } /* ///////////////////////////////////////////////////////////////////////// */ OSCL_EXPORT_REF TCVEI_RETVAL CPVM4VEncoder::UpdateIFrameInterval(int32 aIFrameInterval) { #ifndef LIMITED_API if (PVUpdateIFrameInterval(&iEncoderControl, (Int)aIFrameInterval) == PV_TRUE) return ECVEI_SUCCESS; else #endif return ECVEI_FAIL; } /* ///////////////////////////////////////////////////////////////////////// */ OSCL_EXPORT_REF TCVEI_RETVAL CPVM4VEncoder::IFrameRequest() { #ifndef LIMITED_API if (PVIFrameRequest(&iEncoderControl) == PV_TRUE) return ECVEI_SUCCESS; else #endif return ECVEI_FAIL; } /* ///////////////////////////////////////////////////////////////////////// */ OSCL_EXPORT_REF TCVEI_RETVAL CPVM4VEncoder::SetIntraMBRefresh(int32 aNumMBRefresh) { #ifndef LIMITED_API if (PVUpdateNumIntraMBRefresh(&iEncoderControl, aNumMBRefresh) == PV_TRUE) return ECVEI_SUCCESS; else #endif return ECVEI_FAIL; } #ifdef YUV_INPUT /* ///////////////////////////////////////////////////////////////////////// */ /* Copy from YUV input to YUV frame inside M4VEnc lib */ /* When input is not YUV, the color conv will write it directly to iVideoInOut. */ /* ///////////////////////////////////////////////////////////////////////// */ void CPVM4VEncoder::CopyToYUVIn(uint8 *YUV, Int width, Int height, Int width_16, Int height_16) { UChar *y, *u, *v, *yChan, *uChan, *vChan; Int y_ind, ilimit, jlimit, i, j, ioffset; Int size = width * height; Int size16 = width_16 * height_16; /* do padding at the bottom first */ /* do padding if input RGB size(height) is different from the output YUV size(height_16) */ if (height < height_16 || width < width_16) /* if padding */ { Int offset = (height < height_16) ? height : height_16; offset = (offset * width_16); if (width < width_16) { offset -= (width_16 - width); } yChan = (UChar*)(iYUVIn + offset); oscl_memset(yChan, 16, size16 - offset); /* pad with zeros */ uChan = (UChar*)(iYUVIn + size16 + (offset >> 2)); oscl_memset(uChan, 128, (size16 - offset) >> 2); vChan = (UChar*)(iYUVIn + size16 + (size16 >> 2) + (offset >> 2)); oscl_memset(vChan, 128, (size16 - offset) >> 2); } /* then do padding on the top */ yChan = (UChar*)iYUVIn; /* Normal order */ uChan = (UChar*)(iYUVIn + size16); vChan = (UChar*)(uChan + (size16 >> 2)); u = (UChar*)(&(YUV[size])); v = (UChar*)(&(YUV[size*5/4])); /* To center the output */ if (height_16 > height) /* output taller than input */ { if (width_16 >= width) /* output wider than or equal input */ { i = ((height_16 - height) >> 1) * width_16 + (((width_16 - width) >> 3) << 2); /* make sure that (width_16-width)>>1 is divisible by 4 */ j = ((height_16 - height) >> 2) * (width_16 >> 1) + (((width_16 - width) >> 4) << 2); /* make sure that (width_16-width)>>2 is divisible by 4 */ } else /* output narrower than input */ { i = ((height_16 - height) >> 1) * width_16; j = ((height_16 - height) >> 2) * (width_16 >> 1); YUV += ((width - width_16) >> 1); u += ((width - width_16) >> 2); v += ((width - width_16) >> 2); } oscl_memset((uint8 *)yChan, 16, i); yChan += i; oscl_memset((uint8 *)uChan, 128, j); uChan += j; oscl_memset((uint8 *)vChan, 128, j); vChan += j; } else /* output shorter or equal input */ { if (width_16 >= width) /* output wider or equal input */ { i = (((width_16 - width) >> 3) << 2); /* make sure that (width_16-width)>>1 is divisible by 4 */ j = (((width_16 - width) >> 4) << 2); /* make sure that (width_16-width)>>2 is divisible by 4 */ YUV += (((height - height_16) >> 1) * width); u += (((height - height_16) >> 1) * width) >> 2; v += (((height - height_16) >> 1) * width) >> 2; } else /* output narrower than input */ { i = 0; j = 0; YUV += (((height - height_16) >> 1) * width + ((width - width_16) >> 1)); u += (((height - height_16) >> 1) * width + ((width - width_16) >> 1)) >> 2; v += (((height - height_16) >> 1) * width + ((width - width_16) >> 1)) >> 2; } oscl_memset((uint8 *)yChan, 16, i); yChan += i; oscl_memset((uint8 *)uChan, 128, j); uChan += j; oscl_memset((uint8 *)vChan, 128, j); vChan += j; } /* Copy with cropping or zero-padding */ if (height < height_16) jlimit = height; else jlimit = height_16; if (width < width_16) { ilimit = width; ioffset = width_16 - width; } else { ilimit = width_16; ioffset = 0; } /* Copy Y */ /* Set up pointer for fast looping */ y = (UChar*)YUV; if (width == width_16 && height == height_16) /* no need to pad */ { oscl_memcpy(yChan, y, size); } else { for (y_ind = 0; y_ind < (jlimit - 1) ; y_ind++) { oscl_memcpy(yChan, y, ilimit); oscl_memset(yChan + ilimit, 16, ioffset); /* pad with zero */ yChan += width_16; y += width; } oscl_memcpy(yChan, y, ilimit); /* last line no padding */ } /* Copy U and V */ /* Set up pointers for fast looping */ if (width == width_16 && height == height_16) /* no need to pad */ { oscl_memcpy(uChan, u, size >> 2); oscl_memcpy(vChan, v, size >> 2); } else { for (y_ind = 0; y_ind < (jlimit >> 1) - 1; y_ind++) { oscl_memcpy(uChan, u, ilimit >> 1); oscl_memcpy(vChan, v, ilimit >> 1); oscl_memset(uChan + (ilimit >> 1), 128, ioffset >> 1); oscl_memset(vChan + (ilimit >> 1), 128, ioffset >> 1); uChan += (width_16 >> 1); u += (width >> 1); vChan += (width_16 >> 1); v += (width >> 1); } oscl_memcpy(uChan, u, ilimit >> 1); /* last line no padding */ oscl_memcpy(vChan, v, ilimit >> 1); } return ; } #endif #ifdef FOR_3GPP_COMPLIANCE void CPVM4VEncoder::Check3GPPCompliance(TPVVideoEncodeParam *aEncParam, Int *aEncWidth, Int *aEncHeight) { //MPEG-4 Simple profile and level 0 #define MAX_BITRATE 64000 #define MAX_FRAMERATE 15 #define MAX_WIDTH 176 #define MAX_HEIGHT 144 #define MAX_BUFFERSIZE 163840 // check bitrate, framerate, video size and vbv buffer if (aEncParam->iBitRate[0] > MAX_BITRATE) aEncParam->iBitRate[0] = MAX_BITRATE; if (aEncParam->iFrameRate[0] > MAX_FRAMERATE) aEncParam->iFrameRate[0] = MAX_FRAMERATE; if (aEncWidth[0] > MAX_WIDTH) aEncWidth[0] = MAX_WIDTH; if (aEncHeight[0] > MAX_HEIGHT) aEncHeight[0] = MAX_HEIGHT; if (aEncParam->iBitRate[0]*aEncParam->iBufferDelay > MAX_BUFFERSIZE) aEncParam->iBufferDelay = (float)MAX_BUFFERSIZE / aEncParam->iBitRate[0]; } #endif ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/rate_control.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "mp4def.h" #include "mp4lib_int.h" #include "rate_control.h" #include "mp4enc_lib.h" #include "bitstream_io.h" #include "m4venc_oscl.h" void targetBitCalculation(void *input); void calculateQuantizer_Multipass(void *video); void updateRateControl(rateControl *rc, VideoEncData *video); void updateRC_PostProc(rateControl *rc, VideoEncData *video); /*************************************************************************** ************** RC APIs to core encoding modules ******************* PV_STATUS RC_Initialize(void *video); PV_STATUS RC_Cleanup(rateControl *rc[],Int numLayers); PV_STATUS RC_VopQPSetting(VideoEncData *video,rateControl *rc[]); PV_STATUS RC_VopUpdateStat(VideoEncData *video,rateControl *rc[]); PV_STATUS RC_UpdateBuffer(VideoEncData *video, Int currLayer, Int num_skip); Int RC_GetSkipNextFrame(VideoEncData *video,Int currLayer); void RC_ResetSkipNextFrame(void *video,Int currLayer); PV_STATUS RC_UpdateBXRCParams(void *input); Parameters update for target bitrate or framerate change ****************************************************************************/ /************************************************************************/ /************ API part **************************************************/ /* must be called before each sequence*/ PV_STATUS RC_Initialize(void *input) { VideoEncData *video = (VideoEncData *) input; VideoEncParams *encParams = video->encParams; rateControl **rc = video->rc; Int numLayers = encParams->nLayers; Int *LayerBitRate = encParams->LayerBitRate; float *LayerFrameRate = encParams->LayerFrameRate; MultiPass **pMP = video->pMP; Int n; for (n = 0; n < numLayers; n++) { /* rate control */ rc[n]->fine_frame_skip = encParams->FineFrameSkip_Enabled; rc[n]->no_frame_skip = encParams->NoFrameSkip_Enabled; rc[n]->no_pre_skip = encParams->NoPreSkip_Enabled; rc[n]->skip_next_frame = 0; /* must be initialized */ //rc[n]->TMN_TH = (Int)((float)LayerBitRate[n]/LayerFrameRate[n]); rc[n]->Bs = video->encParams->BufferSize[n]; rc[n]->TMN_W = 0; rc[n]->VBV_fullness = (Int)(rc[n]->Bs * 0.5); /* rc[n]->Bs */ rc[n]->encoded_frames = 0; rc[n]->framerate = LayerFrameRate[n]; if (n == 0) { rc[n]->TMN_TH = (Int)((float)LayerBitRate[n] / LayerFrameRate[n]); rc[n]->bitrate = LayerBitRate[n]; rc[n]->framerate = LayerFrameRate[n]; // For h263 or short header mode, the bit variation is within (-2*Rmax*1001/3000, 2*Rmax*1001/3000) if (video->encParams->H263_Enabled) { rc[n]->max_BitVariance_num = (Int)((rc[n]->Bs - video->encParams->maxFrameSize) / 2 / (rc[n]->bitrate / rc[n]->framerate / 10.0)) - 5; if (rc[n]->max_BitVariance_num < 0) rc[n]->max_BitVariance_num += 5; } else // MPEG-4 normal modes { rc[n]->max_BitVariance_num = (Int)((float)(rc[n]->Bs - rc[n]->VBV_fullness) / ((float)LayerBitRate[n] / LayerFrameRate[n] / 10.0)) - 5; if (rc[n]->max_BitVariance_num < 0) rc[n]->max_BitVariance_num += 5; } } else { if (LayerFrameRate[n] - LayerFrameRate[n-1] > 0) /* 7/31/03 */ { rc[n]->TMN_TH = (Int)((float)(LayerBitRate[n] - LayerBitRate[n-1]) / (LayerFrameRate[n] - LayerFrameRate[n-1])); rc[n]->max_BitVariance_num = (Int)((float)(rc[n]->Bs - rc[n]->VBV_fullness) * 10 / ((float)rc[n]->TMN_TH)) - 5; if (rc[n]->max_BitVariance_num < 0) rc[n]->max_BitVariance_num += 5; } else /* 7/31/03 */ { rc[n]->TMN_TH = 1 << 30; rc[n]->max_BitVariance_num = 0; } rc[n]->bitrate = LayerBitRate[n] - LayerBitRate[n-1]; rc[n]->framerate = LayerFrameRate[n] - LayerFrameRate[n-1]; } // Set the initial buffer fullness if (1) //!video->encParams->H263_Enabled) { // MPEG-4 { /* According to the spec, the initial buffer fullness needs to be set to 1/3 */ rc[n]->VBV_fullness = (Int)(rc[n]->Bs / 3.0 - rc[n]->Bs / 2.0); /* the buffer range is [-Bs/2, Bs/2] */ pMP[n]->counter_BTsrc = (Int)((rc[n]->Bs / 2.0 - rc[n]->Bs / 3.0) / (rc[n]->bitrate / rc[n]->framerate / 10.0)); rc[n]->TMN_W = (Int)(rc[n]->VBV_fullness + pMP[n]->counter_BTsrc * (rc[n]->bitrate / rc[n]->framerate / 10.0)); rc[n]->low_bound = -rc[n]->Bs / 2; rc[n]-> VBV_fullness_offset = 0; } else /* this part doesn't work in some cases, the low_bound is too high, Jan 4,2006 */ { rc[n]->VBV_fullness = rc[n]->Bs - (Int)(video->encParams->VBV_delay * rc[n]->bitrate); if (rc[n]->VBV_fullness < 0) rc[n]->VBV_fullness = 0; //rc[n]->VBV_fullness = (rc[n]->Bs-video->encParams->maxFrameSize)/2 + video->encParams->maxFrameSize; rc[n]->VBV_fullness -= rc[n]->Bs / 2; /* the buffer range is [-Bs/2, Bs/2] */ rc[n]->low_bound = -rc[n]->Bs / 2 + video->encParams->maxFrameSize; /* too high */ rc[n]->VBV_fullness_offset = video->encParams->maxFrameSize / 2; /* don't understand the meaning of this */ pMP[n]->counter_BTdst = pMP[n]->counter_BTsrc = 0; } /* Setting the bitrate and framerate */ pMP[n]->bitrate = rc[n]->bitrate; pMP[n]->framerate = rc[n]->framerate; pMP[n]->target_bits_per_frame = pMP[n]->bitrate / pMP[n]->framerate; } return PV_SUCCESS; } /* ======================================================================== */ /* Function : RC_Cleanup */ /* Date : 12/20/2000 */ /* Purpose : free Rate Control memory */ /* In/out : */ /* Return : */ /* Modified : */ /* ======================================================================== */ PV_STATUS RC_Cleanup(rateControl *rc[], Int numLayers) { OSCL_UNUSED_ARG(rc); OSCL_UNUSED_ARG(numLayers); return PV_SUCCESS; } /* ======================================================================== */ /* Function : RC_VopQPSetting */ /* Date : 4/11/2001 */ /* Purpose : Reset rate control before coding VOP, moved from vop.c */ /* Compute QP for the whole VOP and initialize MB-based RC reset QPMB[], currVop->quantizer, rc->Ec, video->header_bits */ /* to In order to work RC_VopQPSetting has to do the followings 1. Set video->QPMB of all macroblocks. 2. Set currVop->quantizer 3. Reset video->header_bits to zero. 4. Initialize internal RC parameters for Vop cooding */ /* In/out : */ /* Return : PV_STATUS */ /* Modified : */ /* ======================================================================== */ /* To be moved to rate_control.c and separate between BX_RC and ANNEX_L */ PV_STATUS RC_VopQPSetting(VideoEncData *video, rateControl *prc[]) { Int currLayer = video->currLayer; Vol *currVol = video->vol[currLayer]; Vop *currVop = video->currVop; #ifdef TEST_MBBASED_QP int i; #endif rateControl *rc = video->rc[currLayer]; MultiPass *pMP = video->pMP[currLayer]; OSCL_UNUSED_ARG(prc); if (video->encParams->RC_Type == CONSTANT_Q) { M4VENC_MEMSET(video->QPMB, currVop->quantizer, sizeof(UChar)*currVol->nTotalMB); return PV_SUCCESS; } else { if (video->rc[currLayer]->encoded_frames == 0) /* rc[currLayer]->totalFrameNumber*/ { M4VENC_MEMSET(video->QPMB, currVop->quantizer, sizeof(UChar)*currVol->nTotalMB); video->rc[currLayer]->Qc = video->encParams->InitQuantIvop[currLayer]; } else { calculateQuantizer_Multipass((void*) video); currVop->quantizer = video->rc[currLayer]->Qc; #ifdef TEST_MBBASED_QP i = currVol->nTotalMB; /* testing changing QP at MB level */ while (i) { i--; video->QPMB[i] = (i & 1) ? currVop->quantizer - 1 : currVop->quantizer + 1; } #else M4VENC_MEMSET(video->QPMB, currVop->quantizer, sizeof(UChar)*currVol->nTotalMB); #endif } video->header_bits = 0; } /* update pMP->framePos */ if (++pMP->framePos == pMP->frameRange) pMP->framePos = 0; if (rc->T == 0) { pMP->counter_BTdst = (Int)(video->encParams->LayerFrameRate[video->currLayer] * 7.5 + 0.5); /* 0.75s time frame */ pMP->counter_BTdst = PV_MIN(pMP->counter_BTdst, (Int)(rc->max_BitVariance_num / 2 * 0.40)); /* 0.75s time frame may go beyond VBV buffer if we set the buffer size smaller than 0.75s */ pMP->counter_BTdst = PV_MAX(pMP->counter_BTdst, (Int)((rc->Bs / 2 - rc->VBV_fullness) * 0.30 / (rc->TMN_TH / 10.0) + 0.5)); /* At least 30% of VBV buffer size/2 */ pMP->counter_BTdst = PV_MIN(pMP->counter_BTdst, 20); /* Limit the target to be smaller than 3C */ pMP->target_bits = rc->T = rc->TMN_TH = (Int)(rc->TMN_TH * (1.0 + pMP->counter_BTdst * 0.1)); pMP->diff_counter = pMP->counter_BTdst; } /* collect the necessary data: target bits, actual bits, mad and QP */ pMP->target_bits = rc->T; pMP->QP = currVop->quantizer; pMP->mad = video->sumMAD / (float)currVol->nTotalMB; if (pMP->mad < MAD_MIN) pMP->mad = MAD_MIN; /* MAD_MIN is defined as 1 in mp4def.h */ pMP->bitrate = rc->bitrate; /* calculated in RCVopQPSetting */ pMP->framerate = rc->framerate; /* first pass encoding */ pMP->nRe_Quantized = 0; return PV_SUCCESS; } /* ======================================================================== */ /* Function : SaveRDSamples() */ /* Date : 08/29/2001 */ /* History : */ /* Purpose : Save QP, actual_bits, mad and R_D of the current iteration */ /* In/out : */ /* Return : */ /* Modified : */ /* */ /* ======================================================================== */ Void SaveRDSamples(MultiPass *pMP, Int counter_samples) { /* for pMP->pRDSamples */ pMP->pRDSamples[pMP->framePos][counter_samples].QP = pMP->QP; pMP->pRDSamples[pMP->framePos][counter_samples].actual_bits = pMP->actual_bits; pMP->pRDSamples[pMP->framePos][counter_samples].mad = pMP->mad; pMP->pRDSamples[pMP->framePos][counter_samples].R_D = (float)(pMP->actual_bits / (pMP->mad + 0.0001)); return ; } /* ======================================================================== */ /* Function : RC_VopUpdateStat */ /* Date : 12/20/2000 */ /* Purpose : Update statistics for rate control after encoding each VOP. */ /* No need to change anything in VideoEncData structure. */ /* In/out : */ /* Return : */ /* Modified : */ /* ======================================================================== */ PV_STATUS RC_VopUpdateStat(VideoEncData *video, rateControl *rc) { Int currLayer = video->currLayer; Vol *currVol = video->vol[currLayer]; MultiPass *pMP = video->pMP[currLayer]; Int diff_BTCounter; switch (video->encParams->RC_Type) { case CONSTANT_Q: break; case CBR_1: case CBR_2: case VBR_1: case VBR_2: case CBR_LOWDELAY: pMP->actual_bits = currVol->stream->byteCount << 3; SaveRDSamples(pMP, 0); pMP->encoded_frames++; /* for pMP->samplesPerFrame */ pMP->samplesPerFrame[pMP->framePos] = 0; pMP->sum_QP += pMP->QP; /* update pMP->counter_BTsrc, pMP->counter_BTdst */ /* re-allocate the target bit again and then stop encoding */ diff_BTCounter = (Int)((float)(rc->TMN_TH - rc->TMN_W - pMP->actual_bits) / (pMP->bitrate / (pMP->framerate + 0.0001) + 0.0001) / 0.1); if (diff_BTCounter >= 0) pMP->counter_BTsrc += diff_BTCounter; /* pMP->actual_bits is smaller */ else pMP->counter_BTdst -= diff_BTCounter; /* pMP->actual_bits is bigger */ rc->TMN_TH -= (Int)((float)pMP->bitrate / (pMP->framerate + 0.0001) * (diff_BTCounter * 0.1)); rc->T = pMP->target_bits = rc->TMN_TH - rc->TMN_W; pMP->diff_counter -= diff_BTCounter; rc->Rc = currVol->stream->byteCount << 3; /* Total Bits for current frame */ rc->Hc = video->header_bits; /* Total Bits in Header and Motion Vector */ /* BX_RC */ updateRateControl(rc, video); break; default: /* for case CBR_1/2, VBR_1/2 */ return PV_FAIL; } return PV_SUCCESS; } /* ======================================================================== */ /* Function : RC_GetSkipNextFrame, RC_GetRemainingVops */ /* Date : 2/20/2001 */ /* Purpose : To access RC parameters from other parts of the code. */ /* In/out : */ /* Return : */ /* Modified : */ /* ======================================================================== */ Int RC_GetSkipNextFrame(VideoEncData *video, Int currLayer) { return video->rc[currLayer]->skip_next_frame; } void RC_ResetSkipNextFrame(VideoEncData *video, Int currLayer) { video->rc[currLayer]->skip_next_frame = 0; return ; } /* ======================================================================== */ /* Function : RC_UpdateBuffer */ /* Date : 2/20/2001 */ /* Purpose : Update RC in case of there are frames skipped (camera freeze)*/ /* from the application level in addition to what RC requested */ /* In/out : Nr, B, Rr */ /* Return : Void */ /* Modified : */ /* ======================================================================== */ PV_STATUS RC_UpdateBuffer(VideoEncData *video, Int currLayer, Int num_skip) { rateControl *rc = video->rc[currLayer]; MultiPass *pMP = video->pMP[currLayer]; if (video == NULL || rc == NULL || pMP == NULL) return PV_FAIL; rc->VBV_fullness -= (Int)(rc->bitrate / rc->framerate * num_skip); //rc[currLayer]->Rp; pMP->counter_BTsrc += 10 * num_skip; /* Check buffer underflow */ if (rc->VBV_fullness < rc->low_bound) { rc->VBV_fullness = rc->low_bound; // -rc->Bs/2; rc->TMN_W = rc->VBV_fullness - rc->low_bound; pMP->counter_BTsrc = pMP->counter_BTdst + (Int)((float)(rc->Bs / 2 - rc->low_bound) / 2.0 / (pMP->target_bits_per_frame / 10)); } return PV_SUCCESS; } /* ======================================================================== */ /* Function : RC_UpdateBXRCParams */ /* Date : 4/08/2002 */ /* Purpose : Update RC parameters specifically for target bitrate or */ /* framerate update during an encoding session */ /* In/out : */ /* Return : PV_TRUE if successed, PV_FALSE if failed. */ /* Modified : */ /* ======================================================================== */ PV_STATUS RC_UpdateBXRCParams(void *input) { VideoEncData *video = (VideoEncData *) input; VideoEncParams *encParams = video->encParams; rateControl **rc = video->rc; Int numLayers = encParams->nLayers; Int *LayerBitRate = encParams->LayerBitRate; float *LayerFrameRate = encParams->LayerFrameRate; MultiPass **pMP = video->pMP; Int n, VBV_fullness; Int diff_counter; extern Bool SetProfile_BufferSize(VideoEncData *video, float delay, Int bInitialized); /* Reset video buffer size due to target bitrate change */ SetProfile_BufferSize(video, video->encParams->VBV_delay, 0); /* output: video->encParams->BufferSize[] */ for (n = 0; n < numLayers; n++) { /* Remaining stuff about frame dropping and underflow check in update RC */ updateRC_PostProc(rc[n], video); rc[n]->skip_next_frame = 0; /* must be initialized */ /* New changes: bitrate and framerate, Bs, max_BitVariance_num, TMN_TH(optional), encoded_frames(optional) */ rc[n]->Bs = video->encParams->BufferSize[n]; VBV_fullness = (Int)(rc[n]->Bs * 0.5); if (n == 0) { rc[n]->TMN_TH = (Int)((float)LayerBitRate[n] / LayerFrameRate[n]); rc[n]->bitrate = pMP[n]->bitrate = LayerBitRate[n]; rc[n]->framerate = pMP[n]->framerate = LayerFrameRate[n]; // For h263 or short header mode, the bit variation is within (-2*Rmax*1001/3000, 2*Rmax*1001/3000) if (video->encParams->H263_Enabled) { rc[n]->max_BitVariance_num = (Int)((rc[n]->Bs - video->encParams->maxFrameSize) / 2 / (rc[n]->bitrate / rc[n]->framerate / 10.0)) - 5; //rc[n]->max_BitVariance_num = (Int)((float)(rc[n]->Bs - rc[n]->VBV_fullness)/((float)LayerBitRate[n]/LayerFrameRate[n]/10.0))-5; } else // MPEG-4 normal modes { rc[n]->max_BitVariance_num = (Int)((float)(rc[n]->Bs - VBV_fullness) * 10 / ((float)LayerBitRate[n] / LayerFrameRate[n])) - 5; } } else { if (LayerFrameRate[n] - LayerFrameRate[n-1] > 0) /* 7/31/03 */ { rc[n]->TMN_TH = (Int)((float)(LayerBitRate[n] - LayerBitRate[n-1]) / (LayerFrameRate[n] - LayerFrameRate[n-1])); rc[n]->max_BitVariance_num = (Int)((float)(rc[n]->Bs - VBV_fullness) * 10 / ((float)rc[n]->TMN_TH)) - 5; if (rc[n]->max_BitVariance_num < 0) rc[n]->max_BitVariance_num += 5; } else /* 7/31/03 */ { rc[n]->TMN_TH = 1 << 30; rc[n]->max_BitVariance_num = 0; } rc[n]->bitrate = pMP[n]->bitrate = LayerBitRate[n] - LayerBitRate[n-1]; rc[n]->framerate = pMP[n]->framerate = LayerFrameRate[n] - LayerFrameRate[n-1]; } pMP[n]->target_bits_per_frame_prev = pMP[n]->target_bits_per_frame; pMP[n]->target_bits_per_frame = pMP[n]->bitrate / (float)(pMP[n]->framerate + 0.0001); /* 7/31/03 */ /* rc[n]->VBV_fullness and rc[n]->TMN_W should be kept same */ /* update pMP[n]->counter_BTdst and pMP[n]->counter_BTsrc */ diff_counter = (Int)((float)(rc[n]->VBV_fullness - rc[n]->TMN_W) / (pMP[n]->target_bits_per_frame / 10 + 0.0001)); /* 7/31/03 */ pMP[n]->counter_BTdst = pMP[n]->counter_BTsrc = 0; if (diff_counter > 0) pMP[n]->counter_BTdst = diff_counter; else if (diff_counter < 0) pMP[n]->counter_BTsrc = -diff_counter; rc[n]->TMN_W = (Int)(rc[n]->VBV_fullness - /* re-calculate rc[n]->TMN_W in order for higher accuracy */ (pMP[n]->target_bits_per_frame / 10) * (pMP[n]->counter_BTdst - pMP[n]->counter_BTsrc)); /* Keep the current average mad */ if (pMP[n]->aver_mad != 0) { pMP[n]->aver_mad_prev = pMP[n]->aver_mad; pMP[n]->encoded_frames_prev = pMP[n]->encoded_frames; } pMP[n]->aver_mad = 0; pMP[n]->overlapped_win_size = 4; /* Misc */ pMP[n]->sum_mad = pMP[n]->sum_QP = 0; //pMP[n]->encoded_frames_prev = pMP[n]->encoded_frames; pMP[n]->encoded_frames = pMP[n]->re_encoded_frames = pMP[n]->re_encoded_times = 0; } /* end of: for(n=0; nT */ /* Return : Void */ /* Modified : */ /* ================================================================================ */ void targetBitCalculation(void *input) { VideoEncData *video = (VideoEncData *) input; MultiPass *pMP = video->pMP[video->currLayer]; Vol *currVol = video->vol[video->currLayer]; rateControl *rc = video->rc[video->currLayer]; float curr_mad;//, average_mad; Int diff_counter_BTsrc, diff_counter_BTdst, prev_counter_diff, curr_counter_diff, bound; /* BT = Bit Transfer, for pMP->counter_BTsrc, pMP->counter_BTdst */ if (video == NULL || currVol == NULL || pMP == NULL || rc == NULL) return; /* some stuff about frame dropping remained here to be done because pMP cannot be inserted into updateRateControl()*/ updateRC_PostProc(rc, video); /* update pMP->counter_BTsrc and pMP->counter_BTdst to avoid interger overflow */ if (pMP->counter_BTsrc > 1000 && pMP->counter_BTdst > 1000) { pMP->counter_BTsrc -= 1000; pMP->counter_BTdst -= 1000; } /* ---------------------------------------------------------------------------------------------------*/ /* target calculation */ curr_mad = video->sumMAD / (float)currVol->nTotalMB; if (curr_mad < MAD_MIN) curr_mad = MAD_MIN; /* MAD_MIN is defined as 1 in mp4def.h */ diff_counter_BTsrc = diff_counter_BTdst = 0; pMP->diff_counter = 0; /*1.calculate average mad */ pMP->sum_mad += curr_mad; //average_mad = (pMP->encoded_frames < 1 ? curr_mad : pMP->sum_mad/(float)(pMP->encoded_frames+1)); /* this function is called from the scond encoded frame*/ //pMP->aver_mad = average_mad; if (pMP->encoded_frames >= 0) /* pMP->encoded_frames is set to -1 initially, so forget about the very first I frame */ pMP->aver_mad = (pMP->aver_mad * pMP->encoded_frames + curr_mad) / (pMP->encoded_frames + 1); if (pMP->overlapped_win_size > 0 && pMP->encoded_frames_prev >= 0) /* 7/31/03 */ pMP->aver_mad_prev = (pMP->aver_mad_prev * pMP->encoded_frames_prev + curr_mad) / (pMP->encoded_frames_prev + 1); /*2.average_mad, mad ==> diff_counter_BTsrc, diff_counter_BTdst */ if (pMP->overlapped_win_size == 0) { /* original verison */ if (curr_mad > pMP->aver_mad*1.1) { if (curr_mad / (pMP->aver_mad + 0.0001) > 2) diff_counter_BTdst = (Int)(M4VENC_SQRT(curr_mad / (pMP->aver_mad + 0.0001)) * 10 + 0.4) - 10; //diff_counter_BTdst = (Int)((sqrt(curr_mad/pMP->aver_mad)*2+curr_mad/pMP->aver_mad)/(3*0.1) + 0.4) - 10; else diff_counter_BTdst = (Int)(curr_mad / (pMP->aver_mad + 0.0001) * 10 + 0.4) - 10; } else /* curr_mad <= average_mad*1.1 */ //diff_counter_BTsrc = 10 - (Int)((sqrt(curr_mad/pMP->aver_mad) + pow(curr_mad/pMP->aver_mad, 1.0/3.0))/(2.0*0.1) + 0.4); diff_counter_BTsrc = 10 - (Int)(M4VENC_SQRT(curr_mad / (pMP->aver_mad + 0.0001)) * 10 + 0.5); //diff_counter_BTsrc = 10 - (Int)(curr_mad/pMP->aver_mad/0.1 + 0.5) /* actively fill in the possible gap */ if (diff_counter_BTsrc == 0 && diff_counter_BTdst == 0 && curr_mad <= pMP->aver_mad*1.1 && pMP->counter_BTsrc < pMP->counter_BTdst) diff_counter_BTsrc = 1; } else if (pMP->overlapped_win_size > 0) { /* transition time: use previous average mad "pMP->aver_mad_prev" instead of the current average mad "pMP->aver_mad" */ if (curr_mad > pMP->aver_mad_prev*1.1) { if (curr_mad / pMP->aver_mad_prev > 2) diff_counter_BTdst = (Int)(M4VENC_SQRT(curr_mad / (pMP->aver_mad_prev + 0.0001)) * 10 + 0.4) - 10; //diff_counter_BTdst = (Int)((M4VENC_SQRT(curr_mad/pMP->aver_mad_prev)*2+curr_mad/pMP->aver_mad_prev)/(3*0.1) + 0.4) - 10; else diff_counter_BTdst = (Int)(curr_mad / (pMP->aver_mad_prev + 0.0001) * 10 + 0.4) - 10; } else /* curr_mad <= average_mad*1.1 */ //diff_counter_BTsrc = 10 - (Int)((sqrt(curr_mad/pMP->aver_mad_prev) + pow(curr_mad/pMP->aver_mad_prev, 1.0/3.0))/(2.0*0.1) + 0.4); diff_counter_BTsrc = 10 - (Int)(M4VENC_SQRT(curr_mad / (pMP->aver_mad_prev + 0.0001)) * 10 + 0.5); //diff_counter_BTsrc = 10 - (Int)(curr_mad/pMP->aver_mad_prev/0.1 + 0.5) /* actively fill in the possible gap */ if (diff_counter_BTsrc == 0 && diff_counter_BTdst == 0 && curr_mad <= pMP->aver_mad_prev*1.1 && pMP->counter_BTsrc < pMP->counter_BTdst) diff_counter_BTsrc = 1; if (--pMP->overlapped_win_size <= 0) pMP->overlapped_win_size = 0; } /* if difference is too much, do clipping */ /* First, set the upper bound for current bit allocation variance: 80% of available buffer */ bound = (Int)((rc->Bs / 2 - rc->VBV_fullness) * 0.6 / (pMP->target_bits_per_frame / 10)); /* rc->Bs */ diff_counter_BTsrc = PV_MIN(diff_counter_BTsrc, bound); diff_counter_BTdst = PV_MIN(diff_counter_BTdst, bound); /* Second, set another upper bound for current bit allocation: 4-5*bitrate/framerate */ bound = 50; // if(video->encParams->RC_Type == CBR_LOWDELAY) // not necessary bound = 10; /* 1/17/02 -- For Low delay */ diff_counter_BTsrc = PV_MIN(diff_counter_BTsrc, bound); diff_counter_BTdst = PV_MIN(diff_counter_BTdst, bound); /* Third, check the buffer */ prev_counter_diff = pMP->counter_BTdst - pMP->counter_BTsrc; curr_counter_diff = prev_counter_diff + (diff_counter_BTdst - diff_counter_BTsrc); if (PV_ABS(prev_counter_diff) >= rc->max_BitVariance_num || PV_ABS(curr_counter_diff) >= rc->max_BitVariance_num) // PV_ABS(curr_counter_diff) >= PV_ABS(prev_counter_diff) ) { //diff_counter_BTsrc = diff_counter_BTdst = 0; if (curr_counter_diff > rc->max_BitVariance_num && diff_counter_BTdst) { diff_counter_BTdst = (rc->max_BitVariance_num - prev_counter_diff) + diff_counter_BTsrc; if (diff_counter_BTdst < 0) diff_counter_BTdst = 0; } else if (curr_counter_diff < -rc->max_BitVariance_num && diff_counter_BTsrc) { diff_counter_BTsrc = diff_counter_BTdst - (-rc->max_BitVariance_num - prev_counter_diff); if (diff_counter_BTsrc < 0) diff_counter_BTsrc = 0; } } /*3.diff_counter_BTsrc, diff_counter_BTdst ==> TMN_TH */ //rc->TMN_TH = (Int)((float)pMP->bitrate/pMP->framerate); rc->TMN_TH = (Int)(pMP->target_bits_per_frame); pMP->diff_counter = 0; if (diff_counter_BTsrc) { rc->TMN_TH -= (Int)(pMP->target_bits_per_frame * diff_counter_BTsrc * 0.1); pMP->diff_counter = -diff_counter_BTsrc; } else if (diff_counter_BTdst) { rc->TMN_TH += (Int)(pMP->target_bits_per_frame * diff_counter_BTdst * 0.1); pMP->diff_counter = diff_counter_BTdst; } /*4.update pMP->counter_BTsrc, pMP->counter_BTdst */ pMP->counter_BTsrc += diff_counter_BTsrc; pMP->counter_BTdst += diff_counter_BTdst; /*5.target bit calculation */ rc->T = rc->TMN_TH - rc->TMN_W; //rc->T = rc->TMN_TH - (Int)((float)rc->TMN_W/rc->frameRate); if (video->encParams->H263_Enabled && rc->T > video->encParams->maxFrameSize) { rc->T = video->encParams->maxFrameSize; // added this 11/07/05 } } /* ================================================================================ */ /* Function : calculateQuantizer_Multipass */ /* Date : 10/01/2001 */ /* Purpose : variable rate bit allocation + new QP determination scheme */ /* */ /* In/out : rc->T and rc->Qc */ /* Return : Void */ /* Modified : */ /* ================================================================================ */ /* Mad based variable bit allocation + QP calculation with a new quadratic method */ void calculateQuantizer_Multipass(void *input) { VideoEncData *video = (VideoEncData *) input; MultiPass *pMP = video->pMP[video->currLayer]; Vol *currVol = video->vol[video->currLayer]; rateControl *rc = video->rc[video->currLayer]; Int prev_QP, prev_actual_bits, curr_target, i, j; float curr_mad, prev_mad, curr_RD, prev_RD, average_mad, aver_QP; if (video == NULL || currVol == NULL || pMP == NULL || rc == NULL) return; /* Mad based variable bit allocation */ targetBitCalculation((void*) video); if (rc->T <= 0 || video->sumMAD == 0) { if (rc->T < 0) rc->Qc = 31; return; } /* ---------------------------------------------------------------------------------------------------*/ /* current frame QP estimation */ curr_target = rc->T; curr_mad = video->sumMAD / (float)currVol->nTotalMB; if (curr_mad < MAD_MIN) curr_mad = MAD_MIN; /* MAD_MIN is defined as 1 in mp4def.h */ curr_RD = (float)curr_target / curr_mad; /* Another version of search the optimal point */ prev_actual_bits = pMP->pRDSamples[0][0].actual_bits; prev_mad = pMP->pRDSamples[0][0].mad; for (i = 0, j = 0; i < pMP->frameRange; i++) { if (pMP->pRDSamples[i][0].mad != 0 && prev_mad != 0 && PV_ABS(prev_mad - curr_mad) > PV_ABS(pMP->pRDSamples[i][0].mad - curr_mad)) { prev_mad = pMP->pRDSamples[i][0].mad; prev_actual_bits = pMP->pRDSamples[i][0].actual_bits; j = i; } } prev_QP = pMP->pRDSamples[j][0].QP; for (i = 1; i < pMP->samplesPerFrame[j]; i++) { if (PV_ABS(prev_actual_bits - curr_target) > PV_ABS(pMP->pRDSamples[j][i].actual_bits - curr_target)) { prev_actual_bits = pMP->pRDSamples[j][i].actual_bits; prev_QP = pMP->pRDSamples[j][i].QP; } } // quadratic approximation prev_RD = (float)prev_actual_bits / prev_mad; //rc->Qc = (Int)(prev_QP * sqrt(prev_actual_bits/curr_target) + 0.4); if (prev_QP == 1) // 11/14/05, added this to allow getting out of QP = 1 easily { rc->Qc = (Int)(prev_RD / curr_RD + 0.5); } else { rc->Qc = (Int)(prev_QP * M4VENC_SQRT(prev_RD / curr_RD) + 0.9); if (prev_RD / curr_RD > 0.5 && prev_RD / curr_RD < 2.0) rc->Qc = (Int)(prev_QP * (M4VENC_SQRT(prev_RD / curr_RD) + prev_RD / curr_RD) / 2.0 + 0.9); /* Quadratic and linear approximation */ else rc->Qc = (Int)(prev_QP * (M4VENC_SQRT(prev_RD / curr_RD) + M4VENC_POW(prev_RD / curr_RD, 1.0 / 3.0)) / 2.0 + 0.9); } //rc->Qc =(Int)(prev_QP * sqrt(prev_RD/curr_RD) + 0.4); // 11/08/05 // lower bound on Qc should be a function of curr_mad // When mad is already low, lower bound on Qc doesn't have to be small. // Note, this doesn't work well for low complexity clip encoded at high bit rate // it doesn't hit the target bit rate due to this QP lower bound. /// if((curr_mad < 8) && (rc->Qc < 12)) rc->Qc = 12; // else if((curr_mad < 128) && (rc->Qc < 3)) rc->Qc = 3; if (rc->Qc < 1) rc->Qc = 1; if (rc->Qc > 31) rc->Qc = 31; /* active bit resource protection */ aver_QP = (pMP->encoded_frames == 0 ? 0 : pMP->sum_QP / (float)pMP->encoded_frames); average_mad = (pMP->encoded_frames == 0 ? 0 : pMP->sum_mad / (float)pMP->encoded_frames); /* this function is called from the scond encoded frame*/ if (pMP->diff_counter == 0 && ((float)rc->Qc <= aver_QP*1.1 || curr_mad <= average_mad*1.1) && pMP->counter_BTsrc <= (pMP->counter_BTdst + (Int)(pMP->framerate*1.0 + 0.5))) { rc->TMN_TH -= (Int)(pMP->target_bits_per_frame / 10.0); rc->T = rc->TMN_TH - rc->TMN_W; pMP->counter_BTsrc++; pMP->diff_counter--; } } /* ======================================================================== */ /* Function : updateRateControl */ /* Date : 11/17/2000 */ /* Purpose :Update the RD Modal (After Encoding the Current Frame) */ /* In/out : */ /* Return : */ /* Modified : */ /* ======================================================================== */ void updateRateControl(rateControl *rc, VideoEncData *video) { Int frame_bits; /* rate contro\l */ frame_bits = (Int)(rc->bitrate / rc->framerate); rc->TMN_W += (rc->Rc - rc->TMN_TH); rc->VBV_fullness += (rc->Rc - frame_bits); //rc->Rp); //if(rc->VBV_fullness < 0) rc->VBV_fullness = -1; rc->encoded_frames++; /* frame dropping */ rc->skip_next_frame = 0; if ((video->encParams->H263_Enabled && rc->Rc > video->encParams->maxFrameSize) || /* For H263/short header mode, drop the frame if the actual frame size exceeds the bound */ (rc->VBV_fullness > rc->Bs / 2 && !rc->no_pre_skip)) /* skip the current frame */ /* rc->Bs */ { rc->TMN_W -= (rc->Rc - rc->TMN_TH); rc->VBV_fullness -= rc->Rc; rc->skip_next_frame = -1; } else if ((float)(rc->VBV_fullness - rc->VBV_fullness_offset) > (rc->Bs / 2 - rc->VBV_fullness_offset)*0.95 && !rc->no_frame_skip) /* skip next frame */ { rc->VBV_fullness -= frame_bits; //rc->Rp; rc->skip_next_frame = 1; /* skip more than 1 frames */ //while(rc->VBV_fullness > rc->Bs*0.475) while ((rc->VBV_fullness - rc->VBV_fullness_offset) > (rc->Bs / 2 - rc->VBV_fullness_offset)*0.95) { rc->VBV_fullness -= frame_bits; //rc->Rp; rc->skip_next_frame++; } /* END */ } } /* ======================================================================== */ /* Function : updateRC_PostProc */ /* Date : 04/08/2002 */ /* Purpose : Remaing RC update stuff for frame skip and buffer underflow */ /* check */ /* In/out : */ /* Return : */ /* Modified : */ /* ======================================================================== */ void updateRC_PostProc(rateControl *rc, VideoEncData *video) { MultiPass *pMP = video->pMP[video->currLayer]; if (rc->skip_next_frame == 1 && !rc->no_frame_skip) /* skip next frame */ { pMP->counter_BTsrc += 10 * rc->skip_next_frame; } else if (rc->skip_next_frame == -1 && !rc->no_pre_skip) /* skip current frame */ { pMP->counter_BTdst -= pMP->diff_counter; pMP->counter_BTsrc += 10; pMP->sum_mad -= pMP->mad; pMP->aver_mad = (pMP->aver_mad * pMP->encoded_frames - pMP->mad) / (float)(pMP->encoded_frames - 1 + 0.0001); pMP->sum_QP -= pMP->QP; pMP->encoded_frames --; } /* some stuff in update VBV_fullness remains here */ //if(rc->VBV_fullness < -rc->Bs/2) /* rc->Bs */ if (rc->VBV_fullness < rc->low_bound) { rc->VBV_fullness = rc->low_bound; // -rc->Bs/2; rc->TMN_W = rc->VBV_fullness - rc->low_bound; pMP->counter_BTsrc = pMP->counter_BTdst + (Int)((float)(rc->Bs / 2 - rc->low_bound) / 2.0 / (pMP->target_bits_per_frame / 10)); } } ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/rate_control.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef _RATE_CONTROL_H_ #define _RATE_CONTROL_H_ #include "mp4def.h" typedef struct tagdataPointArray { Int Qp; Int Rp; float Mp; /* for MB-based RC, 3/14/01 */ struct tagdataPointArray *next; struct tagdataPointArray *prev; } dataPointArray; typedef struct { Int alpha; /* weight for I frame */ Int Rs; /*bit rate for the sequence (or segment) e.g., 24000 bits/sec */ Int Rc; /*bits used for the current frame. It is the bit count obtained after encoding. */ Int Rp; /*bits to be removed from the buffer per picture. */ /*? is this the average one, or just the bits coded for the previous frame */ Int Rps; /*bit to be removed from buffer per src frame */ float Ts; /*number of seconds for the sequence (or segment). e.g., 10 sec */ float Ep; float Ec; /*mean absolute difference for the current frame after motion compensation.*/ /*If the macroblock is intra coded, the original spatial pixel values are summed.*/ Int Qc; /*quantization level used for the current frame. */ Int Nr; /*number of P frames remaining for encoding.*/ Int Rr; /*number of bits remaining for encoding this sequence (or segment).*/ Int Rr_Old;/* 12/24/00 */ Int T; /*target bit to be used for the current frame.*/ Int S; /*number of bits used for encoding the previous frame.*/ Int Hc; /*header and motion vector bits used in the current frame. It includes all the information except to the residual information.*/ Int Hp; /*header and motion vector bits used in the previous frame. It includes all the information except to the residual information.*/ Int Ql; /*quantization level used in the previous frame */ Int Bs; /*buffer size e.g., R/2 */ Int B; /*current buffer level e.g., R/4 - start from the middle of the buffer */ float X1; float X2; float X11; float M; /*safe margin for the buffer */ float smTick; /*ratio of src versus enc frame rate */ double remnant; /*remainder frame of src/enc frame for fine frame skipping */ Int timeIncRes; /* vol->timeIncrementResolution */ dataPointArray *end; /*quantization levels for the past (20) frames */ Int frameNumber; /* ranging from 0 to 20 nodes*/ Int w; Int Nr_Original; Int Nr_Old, Nr_Old2; Int skip_next_frame; Int Qdep; /* smooth Q adjustment */ Int fine_frame_skip; Int VBR_Enabled; Int no_frame_skip; Int no_pre_skip; Int totalFrameNumber; /* total coded frames, for debugging!!*/ char oFirstTime; /* BX rate control */ Int TMN_W; Int TMN_TH; Int VBV_fullness; Int max_BitVariance_num; /* the number of the maximum bit variance within the given buffer with the unit of 10% of bitrate/framerate*/ Int encoded_frames; /* counter for all encoded frames */ float framerate; Int bitrate; Int low_bound; /* bound for underflow detection, usually low_bound=-Bs/2, but could be changed in H.263 mode */ Int VBV_fullness_offset; /* offset of VBV_fullness, usually is zero, but can be changed in H.263 mode*/ /* End BX */ } rateControl; #endif /* _RATE_CONTROL_H_ */ ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/sad.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "oscl_base_macros.h" // for OSCL_UNUSED_ARG #include "mp4def.h" #include "mp4lib_int.h" #include "sad_inline.h" #define Cached_lx 176 #ifdef _SAD_STAT ULong num_sad_MB = 0; ULong num_sad_Blk = 0; ULong num_sad_MB_call = 0; ULong num_sad_Blk_call = 0; #define NUM_SAD_MB_CALL() num_sad_MB_call++ #define NUM_SAD_MB() num_sad_MB++ #define NUM_SAD_BLK_CALL() num_sad_Blk_call++ #define NUM_SAD_BLK() num_sad_Blk++ #else #define NUM_SAD_MB_CALL() #define NUM_SAD_MB() #define NUM_SAD_BLK_CALL() #define NUM_SAD_BLK() #endif /* consist of Int SAD_Macroblock_C(UChar *ref,UChar *blk,Int dmin,Int lx,void *extra_info) Int SAD_MB_HTFM_Collect(UChar *ref,UChar *blk,Int dmin,Int lx,void *extra_info) Int SAD_MB_HTFM(UChar *ref,UChar *blk,Int dmin,Int lx,void *extra_info) Int SAD_Block_C(UChar *ref,UChar *blk,Int dmin,Int lx,void *extra_info) Int SAD_Blk_PADDING(UChar *ref,UChar *cur,Int dmin,Int lx,void *extra_info) Int SAD_MB_PADDING(UChar *ref,UChar *cur,Int dmin,Int lx,void *extra_info) Int SAD_MB_PAD1(UChar *ref,UChar *cur,Int dmin,Int lx,Int *rep); Int SAD_MB_PADDING_HTFM_Collect(UChar *ref,UChar *cur,Int dmin,Int lx,void *extra_info) Int SAD_MB_PADDING_HTFM(UChar *ref,UChar *cur,Int dmin,Int lx,void *vptr) */ #ifdef __cplusplus extern "C" { #endif Int SAD_MB_PAD1(UChar *ref, UChar *cur, Int dmin, Int lx, Int *rep); /*================================================================== Function: SAD_Macroblock Date: 09/07/2000 Purpose: Compute SAD 16x16 between blk and ref. To do: Uniform subsampling will be inserted later! Hypothesis Testing Fast Matching to be used later! Changes: 11/7/00: implemented MMX 1/24/01: implemented SSE ==================================================================*/ /********** C ************/ Int SAD_Macroblock_C(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info) { int32 x10; Int dmin = (ULong)dmin_lx >> 16; Int lx = dmin_lx & 0xFFFF; OSCL_UNUSED_ARG(extra_info); NUM_SAD_MB_CALL(); x10 = simd_sad_mb(ref, blk, dmin, lx); return x10; } #ifdef HTFM /* HTFM with uniform subsampling implementation, 2/28/01 */ /*=============================================================== Function: SAD_MB_HTFM_Collect and SAD_MB_HTFM Date: 3/2/1 Purpose: Compute the SAD on a 16x16 block using uniform subsampling and hypothesis testing fast matching for early dropout. SAD_MB_HP_HTFM_Collect is to collect the statistics to compute the thresholds to be used in SAD_MB_HP_HTFM. Input/Output: Changes: ===============================================================*/ Int SAD_MB_HTFM_Collect(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info) { Int i; Int sad = 0; UChar *p1; Int lx4 = (dmin_lx << 2) & 0x3FFFC; ULong cur_word; Int saddata[16], tmp, tmp2; /* used when collecting flag (global) is on */ Int difmad; HTFM_Stat *htfm_stat = (HTFM_Stat*) extra_info; Int *abs_dif_mad_avg = &(htfm_stat->abs_dif_mad_avg); UInt *countbreak = &(htfm_stat->countbreak); Int *offsetRef = htfm_stat->offsetRef; NUM_SAD_MB_CALL(); blk -= 4; for (i = 0; i < 16; i++) { p1 = ref + offsetRef[i]; cur_word = *((ULong*)(blk += 4)); tmp = p1[12]; tmp2 = (cur_word >> 24) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[8]; tmp2 = (cur_word >> 16) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[4]; tmp2 = (cur_word >> 8) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[0]; p1 += lx4; tmp2 = (cur_word & 0xFF); sad = SUB_SAD(sad, tmp, tmp2); cur_word = *((ULong*)(blk += 4)); tmp = p1[12]; tmp2 = (cur_word >> 24) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[8]; tmp2 = (cur_word >> 16) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[4]; tmp2 = (cur_word >> 8) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[0]; p1 += lx4; tmp2 = (cur_word & 0xFF); sad = SUB_SAD(sad, tmp, tmp2); cur_word = *((ULong*)(blk += 4)); tmp = p1[12]; tmp2 = (cur_word >> 24) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[8]; tmp2 = (cur_word >> 16) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[4]; tmp2 = (cur_word >> 8) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[0]; p1 += lx4; tmp2 = (cur_word & 0xFF); sad = SUB_SAD(sad, tmp, tmp2); cur_word = *((ULong*)(blk += 4)); tmp = p1[12]; tmp2 = (cur_word >> 24) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[8]; tmp2 = (cur_word >> 16) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[4]; tmp2 = (cur_word >> 8) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[0]; p1 += lx4; tmp2 = (cur_word & 0xFF); sad = SUB_SAD(sad, tmp, tmp2); NUM_SAD_MB(); saddata[i] = sad; if (i > 0) { if ((ULong)sad > ((ULong)dmin_lx >> 16)) { difmad = saddata[0] - ((saddata[1] + 1) >> 1); (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad); (*countbreak)++; return sad; } } } difmad = saddata[0] - ((saddata[1] + 1) >> 1); (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad); (*countbreak)++; return sad; } Int SAD_MB_HTFM(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info) { Int sad = 0; UChar *p1; Int i; Int tmp, tmp2; Int lx4 = (dmin_lx << 2) & 0x3FFFC; Int sadstar = 0, madstar; Int *nrmlz_th = (Int*) extra_info; Int *offsetRef = (Int*) extra_info + 32; ULong cur_word; madstar = (ULong)dmin_lx >> 20; NUM_SAD_MB_CALL(); blk -= 4; for (i = 0; i < 16; i++) { p1 = ref + offsetRef[i]; cur_word = *((ULong*)(blk += 4)); tmp = p1[12]; tmp2 = (cur_word >> 24) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[8]; tmp2 = (cur_word >> 16) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[4]; tmp2 = (cur_word >> 8) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[0]; p1 += lx4; tmp2 = (cur_word & 0xFF); sad = SUB_SAD(sad, tmp, tmp2); cur_word = *((ULong*)(blk += 4)); tmp = p1[12]; tmp2 = (cur_word >> 24) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[8]; tmp2 = (cur_word >> 16) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[4]; tmp2 = (cur_word >> 8) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[0]; p1 += lx4; tmp2 = (cur_word & 0xFF); sad = SUB_SAD(sad, tmp, tmp2); cur_word = *((ULong*)(blk += 4)); tmp = p1[12]; tmp2 = (cur_word >> 24) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[8]; tmp2 = (cur_word >> 16) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[4]; tmp2 = (cur_word >> 8) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[0]; p1 += lx4; tmp2 = (cur_word & 0xFF); sad = SUB_SAD(sad, tmp, tmp2); cur_word = *((ULong*)(blk += 4)); tmp = p1[12]; tmp2 = (cur_word >> 24) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[8]; tmp2 = (cur_word >> 16) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[4]; tmp2 = (cur_word >> 8) & 0xFF; sad = SUB_SAD(sad, tmp, tmp2); tmp = p1[0]; p1 += lx4; tmp2 = (cur_word & 0xFF); sad = SUB_SAD(sad, tmp, tmp2); NUM_SAD_MB(); sadstar += madstar; if (((ULong)sad <= ((ULong)dmin_lx >> 16)) && (sad <= (sadstar - *nrmlz_th++))) ; else return 65536; } return sad; } #endif /* HTFM */ #ifndef NO_INTER4V /*================================================================== Function: SAD_Block Date: 09/07/2000 Purpose: Compute SAD 16x16 between blk and ref. To do: Uniform subsampling will be inserted later! Hypothesis Testing Fast Matching to be used later! Changes: 11/7/00: implemented MMX 1/24/01: implemented SSE ==================================================================*/ /********** C ************/ Int SAD_Block_C(UChar *ref, UChar *blk, Int dmin, Int lx, void *) { Int sad = 0; Int i; UChar *ii; Int *kk; Int tmp, tmp2, tmp3, mask = 0xFF; Int width = (lx - 32); NUM_SAD_BLK_CALL(); ii = ref; kk = (Int*)blk; /* assuming word-align for blk */ for (i = 0; i < 8; i++) { tmp3 = kk[1]; tmp = ii[7]; tmp2 = (UInt)tmp3 >> 24; sad = SUB_SAD(sad, tmp, tmp2); tmp = ii[6]; tmp2 = (tmp3 >> 16) & mask; sad = SUB_SAD(sad, tmp, tmp2); tmp = ii[5]; tmp2 = (tmp3 >> 8) & mask; sad = SUB_SAD(sad, tmp, tmp2); tmp = ii[4]; tmp2 = tmp3 & mask; sad = SUB_SAD(sad, tmp, tmp2); tmp3 = *kk; kk += (width >> 2); tmp = ii[3]; tmp2 = (UInt)tmp3 >> 24; sad = SUB_SAD(sad, tmp, tmp2); tmp = ii[2]; tmp2 = (tmp3 >> 16) & mask; sad = SUB_SAD(sad, tmp, tmp2); tmp = ii[1]; tmp2 = (tmp3 >> 8) & mask; sad = SUB_SAD(sad, tmp, tmp2); tmp = *ii; ii += lx; tmp2 = tmp3 & mask; sad = SUB_SAD(sad, tmp, tmp2); NUM_SAD_BLK(); if (sad > dmin) return sad; } return sad; } #endif /* NO_INTER4V */ #ifdef __cplusplus } #endif ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/sad_halfpel.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /* contains Int HalfPel1_SAD_MB(UChar *ref,UChar *blk,Int dmin,Int width,Int ih,Int jh) Int HalfPel2_SAD_MB(UChar *ref,UChar *blk,Int dmin,Int width) Int HalfPel1_SAD_Blk(UChar *ref,UChar *blk,Int dmin,Int width,Int ih,Int jh) Int HalfPel2_SAD_Blk(UChar *ref,UChar *blk,Int dmin,Int width) Int SAD_MB_HalfPel_C(UChar *ref,UChar *blk,Int dmin,Int width,Int rx,Int xh,Int yh,void *extra_info) Int SAD_MB_HP_HTFM_Collect(UChar *ref,UChar *blk,Int dmin,Int width,Int rx,Int xh,Int yh,void *extra_info) Int SAD_MB_HP_HTFM(UChar *ref,UChar *blk,Int dmin,Int width,Int rx,Int xh,Int yh,void *extra_info) Int SAD_Blk_HalfPel_C(UChar *ref,UChar *blk,Int dmin,Int width,Int rx,Int xh,Int yh,void *extra_info) */ //#include /* for RAND_MAX */ #include "oscl_base_macros.h" // for OSCL_UNUSED_ARG #include "mp4def.h" #include "mp4lib_int.h" #include "sad_halfpel_inline.h" #ifdef _SAD_STAT ULong num_sad_HP_MB = 0; ULong num_sad_HP_Blk = 0; ULong num_sad_HP_MB_call = 0; ULong num_sad_HP_Blk_call = 0; #define NUM_SAD_HP_MB_CALL() num_sad_HP_MB_call++ #define NUM_SAD_HP_MB() num_sad_HP_MB++ #define NUM_SAD_HP_BLK_CALL() num_sad_HP_Blk_call++ #define NUM_SAD_HP_BLK() num_sad_HP_Blk++ #else #define NUM_SAD_HP_MB_CALL() #define NUM_SAD_HP_MB() #define NUM_SAD_HP_BLK_CALL() #define NUM_SAD_HP_BLK() #endif #ifdef __cplusplus extern "C" { #endif /*================================================================== Function: HalfPel1_SAD_MB Date: 03/27/2001 Purpose: Compute SAD 16x16 between blk and ref in halfpel resolution, Changes: ==================================================================*/ /* One component is half-pel */ Int HalfPel1_SAD_MB(UChar *ref, UChar *blk, Int dmin, Int width, Int ih, Int jh) { Int i, j; Int sad = 0; UChar *kk, *p1, *p2; Int temp; OSCL_UNUSED_ARG(jh); p1 = ref; if (ih) p2 = ref + 1; else p2 = ref + width; kk = blk; for (i = 0; i < 16; i++) { for (j = 0; j < 16; j++) { temp = ((p1[j] + p2[j] + 1) >> 1) - *kk++; sad += PV_ABS(temp); } if (sad > dmin) return sad; p1 += width; p2 += width; } return sad; } /* Two components need half-pel */ Int HalfPel2_SAD_MB(UChar *ref, UChar *blk, Int dmin, Int width) { Int i, j; Int sad = 0; UChar *kk, *p1, *p2, *p3, *p4; Int temp; p1 = ref; p2 = ref + 1; p3 = ref + width; p4 = ref + width + 1; kk = blk; for (i = 0; i < 16; i++) { for (j = 0; j < 16; j++) { temp = ((p1[j] + p2[j] + p3[j] + p4[j] + 2) >> 2) - *kk++; sad += PV_ABS(temp); } if (sad > dmin) return sad; p1 += width; p3 += width; p2 += width; p4 += width; } return sad; } #ifndef NO_INTER4V /*================================================================== Function: HalfPel1_SAD_Blk Date: 03/27/2001 Purpose: Compute SAD 8x8 between blk and ref in halfpel resolution. Changes: ==================================================================*/ /* One component needs half-pel */ Int HalfPel1_SAD_Blk(UChar *ref, UChar *blk, Int dmin, Int width, Int ih, Int jh) { Int i, j; Int sad = 0; UChar *kk, *p1, *p2; Int temp; OSCL_UNUSED_ARG(jh); p1 = ref; if (ih) p2 = ref + 1; else p2 = ref + width; kk = blk; for (i = 0; i < 8; i++) { for (j = 0; j < 8; j++) { temp = ((p1[j] + p2[j] + 1) >> 1) - *kk++; sad += PV_ABS(temp); } if (sad > dmin) return sad; p1 += width; p2 += width; kk += 8; } return sad; } /* Two components need half-pel */ Int HalfPel2_SAD_Blk(UChar *ref, UChar *blk, Int dmin, Int width) { Int i, j; Int sad = 0; UChar *kk, *p1, *p2, *p3, *p4; Int temp; p1 = ref; p2 = ref + 1; p3 = ref + width; p4 = ref + width + 1; kk = blk; for (i = 0; i < 8; i++) { for (j = 0; j < 8; j++) { temp = ((p1[j] + p2[j] + p3[j] + p4[j] + 2) >> 2) - *kk++; sad += PV_ABS(temp); } if (sad > dmin) return sad; p1 += width; p3 += width; p2 += width; p4 += width; kk += 8; } return sad; } #endif // NO_INTER4V /*=============================================================== Function: SAD_MB_HalfPel Date: 09/17/2000 Purpose: Compute the SAD on the half-pel resolution Input/Output: hmem is assumed to be a pointer to the starting point of the search in the 33x33 matrix search region Changes: 11/7/00: implemented MMX ===============================================================*/ /*================================================================== Function: SAD_MB_HalfPel_C Date: 04/30/2001 Purpose: Compute SAD 16x16 between blk and ref in halfpel resolution, Changes: ==================================================================*/ /* One component is half-pel */ Int SAD_MB_HalfPel_Cxhyh(UChar *ref, UChar *blk, Int dmin_rx, void *extra_info) { Int i, j; Int sad = 0; UChar *kk, *p1, *p2, *p3, *p4; // Int sumref=0; Int temp; Int rx = dmin_rx & 0xFFFF; OSCL_UNUSED_ARG(extra_info); NUM_SAD_HP_MB_CALL(); p1 = ref; p2 = ref + 1; p3 = ref + rx; p4 = ref + rx + 1; kk = blk; for (i = 0; i < 16; i++) { for (j = 0; j < 16; j++) { temp = ((p1[j] + p2[j] + p3[j] + p4[j] + 2) >> 2) - *kk++; sad += PV_ABS(temp); } NUM_SAD_HP_MB(); if (sad > (Int)((ULong)dmin_rx >> 16)) return sad; p1 += rx; p3 += rx; p2 += rx; p4 += rx; } return sad; } Int SAD_MB_HalfPel_Cyh(UChar *ref, UChar *blk, Int dmin_rx, void *extra_info) { Int i, j; Int sad = 0; UChar *kk, *p1, *p2; // Int sumref=0; Int temp; Int rx = dmin_rx & 0xFFFF; OSCL_UNUSED_ARG(extra_info); NUM_SAD_HP_MB_CALL(); p1 = ref; p2 = ref + rx; /* either left/right or top/bottom pixel */ kk = blk; for (i = 0; i < 16; i++) { for (j = 0; j < 16; j++) { temp = ((p1[j] + p2[j] + 1) >> 1) - *kk++; sad += PV_ABS(temp); } NUM_SAD_HP_MB(); if (sad > (Int)((ULong)dmin_rx >> 16)) return sad; p1 += rx; p2 += rx; } return sad; } Int SAD_MB_HalfPel_Cxh(UChar *ref, UChar *blk, Int dmin_rx, void *extra_info) { Int i, j; Int sad = 0; UChar *kk, *p1; // Int sumref=0; Int temp; Int rx = dmin_rx & 0xFFFF; OSCL_UNUSED_ARG(extra_info); NUM_SAD_HP_MB_CALL(); p1 = ref; kk = blk; for (i = 0; i < 16; i++) { for (j = 0; j < 16; j++) { temp = ((p1[j] + p1[j+1] + 1) >> 1) - *kk++; sad += PV_ABS(temp); } NUM_SAD_HP_MB(); if (sad > (Int)((ULong)dmin_rx >> 16)) return sad; p1 += rx; } return sad; } #ifdef HTFM /* HTFM with uniform subsampling implementation, 2/28/01 */ //Checheck here Int SAD_MB_HP_HTFM_Collectxhyh(UChar *ref, UChar *blk, Int dmin_rx, void *extra_info) { Int i, j; Int sad = 0; UChar *p1, *p2; Int rx = dmin_rx & 0xFFFF; Int refwx4 = rx << 2; Int saddata[16]; /* used when collecting flag (global) is on */ Int difmad, tmp, tmp2; HTFM_Stat *htfm_stat = (HTFM_Stat*) extra_info; Int *abs_dif_mad_avg = &(htfm_stat->abs_dif_mad_avg); UInt *countbreak = &(htfm_stat->countbreak); Int *offsetRef = htfm_stat->offsetRef; ULong cur_word; NUM_SAD_HP_MB_CALL(); blk -= 4; for (i = 0; i < 16; i++) /* 16 stages */ { p1 = ref + offsetRef[i]; p2 = p1 + rx; j = 4;/* 4 lines */ do { cur_word = *((ULong*)(blk += 4)); tmp = p1[12] + p2[12]; tmp2 = p1[13] + p2[13]; tmp += tmp2; tmp2 = (cur_word >> 24) & 0xFF; tmp += 2; sad = INTERP2_SUB_SAD(sad, tmp, tmp2);; tmp = p1[8] + p2[8]; tmp2 = p1[9] + p2[9]; tmp += tmp2; tmp2 = (cur_word >> 16) & 0xFF; tmp += 2; sad = INTERP2_SUB_SAD(sad, tmp, tmp2);; tmp = p1[4] + p2[4]; tmp2 = p1[5] + p2[5]; tmp += tmp2; tmp2 = (cur_word >> 8) & 0xFF; tmp += 2; sad = INTERP2_SUB_SAD(sad, tmp, tmp2);; tmp2 = p1[1] + p2[1]; tmp = p1[0] + p2[0]; p1 += refwx4; p2 += refwx4; tmp += tmp2; tmp2 = (cur_word & 0xFF); tmp += 2; sad = INTERP2_SUB_SAD(sad, tmp, tmp2);; } while (--j); NUM_SAD_HP_MB(); saddata[i] = sad; if (i > 0) { if (sad > (Int)((ULong)dmin_rx >> 16)) { difmad = saddata[0] - ((saddata[1] + 1) >> 1); (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad); (*countbreak)++; return sad; } } } difmad = saddata[0] - ((saddata[1] + 1) >> 1); (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad); (*countbreak)++; return sad; } Int SAD_MB_HP_HTFM_Collectyh(UChar *ref, UChar *blk, Int dmin_rx, void *extra_info) { Int i, j; Int sad = 0; UChar *p1, *p2; Int rx = dmin_rx & 0xFFFF; Int refwx4 = rx << 2; Int saddata[16]; /* used when collecting flag (global) is on */ Int difmad, tmp, tmp2; HTFM_Stat *htfm_stat = (HTFM_Stat*) extra_info; Int *abs_dif_mad_avg = &(htfm_stat->abs_dif_mad_avg); UInt *countbreak = &(htfm_stat->countbreak); Int *offsetRef = htfm_stat->offsetRef; ULong cur_word; NUM_SAD_HP_MB_CALL(); blk -= 4; for (i = 0; i < 16; i++) /* 16 stages */ { p1 = ref + offsetRef[i]; p2 = p1 + rx; j = 4; do { cur_word = *((ULong*)(blk += 4)); tmp = p1[12]; tmp2 = p2[12]; tmp++; tmp2 += tmp; tmp = (cur_word >> 24) & 0xFF; sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; tmp = p1[8]; tmp2 = p2[8]; tmp++; tmp2 += tmp; tmp = (cur_word >> 16) & 0xFF; sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; tmp = p1[4]; tmp2 = p2[4]; tmp++; tmp2 += tmp; tmp = (cur_word >> 8) & 0xFF; sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; tmp = p1[0]; p1 += refwx4; tmp2 = p2[0]; p2 += refwx4; tmp++; tmp2 += tmp; tmp = (cur_word & 0xFF); sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; } while (--j); NUM_SAD_HP_MB(); saddata[i] = sad; if (i > 0) { if (sad > (Int)((ULong)dmin_rx >> 16)) { difmad = saddata[0] - ((saddata[1] + 1) >> 1); (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad); (*countbreak)++; return sad; } } } difmad = saddata[0] - ((saddata[1] + 1) >> 1); (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad); (*countbreak)++; return sad; } Int SAD_MB_HP_HTFM_Collectxh(UChar *ref, UChar *blk, Int dmin_rx, void *extra_info) { Int i, j; Int sad = 0; UChar *p1; Int rx = dmin_rx & 0xFFFF; Int refwx4 = rx << 2; Int saddata[16]; /* used when collecting flag (global) is on */ Int difmad, tmp, tmp2; HTFM_Stat *htfm_stat = (HTFM_Stat*) extra_info; Int *abs_dif_mad_avg = &(htfm_stat->abs_dif_mad_avg); UInt *countbreak = &(htfm_stat->countbreak); Int *offsetRef = htfm_stat->offsetRef; ULong cur_word; NUM_SAD_HP_MB_CALL(); blk -= 4; for (i = 0; i < 16; i++) /* 16 stages */ { p1 = ref + offsetRef[i]; j = 4; /* 4 lines */ do { cur_word = *((ULong*)(blk += 4)); tmp = p1[12]; tmp2 = p1[13]; tmp++; tmp2 += tmp; tmp = (cur_word >> 24) & 0xFF; sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; tmp = p1[8]; tmp2 = p1[9]; tmp++; tmp2 += tmp; tmp = (cur_word >> 16) & 0xFF; sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; tmp = p1[4]; tmp2 = p1[5]; tmp++; tmp2 += tmp; tmp = (cur_word >> 8) & 0xFF; sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; tmp = p1[0]; tmp2 = p1[1]; p1 += refwx4; tmp++; tmp2 += tmp; tmp = (cur_word & 0xFF); sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; } while (--j); NUM_SAD_HP_MB(); saddata[i] = sad; if (i > 0) { if (sad > (Int)((ULong)dmin_rx >> 16)) { difmad = saddata[0] - ((saddata[1] + 1) >> 1); (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad); (*countbreak)++; return sad; } } } difmad = saddata[0] - ((saddata[1] + 1) >> 1); (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad); (*countbreak)++; return sad; } Int SAD_MB_HP_HTFMxhyh(UChar *ref, UChar *blk, Int dmin_rx, void *extra_info) { Int i, j; Int sad = 0, tmp, tmp2; UChar *p1, *p2; Int rx = dmin_rx & 0xFFFF; Int refwx4 = rx << 2; Int sadstar = 0, madstar; Int *nrmlz_th = (Int*) extra_info; Int *offsetRef = nrmlz_th + 32; ULong cur_word; madstar = (ULong)dmin_rx >> 20; NUM_SAD_HP_MB_CALL(); blk -= 4; for (i = 0; i < 16; i++) /* 16 stages */ { p1 = ref + offsetRef[i]; p2 = p1 + rx; j = 4; /* 4 lines */ do { cur_word = *((ULong*)(blk += 4)); tmp = p1[12] + p2[12]; tmp2 = p1[13] + p2[13]; tmp += tmp2; tmp2 = (cur_word >> 24) & 0xFF; tmp += 2; sad = INTERP2_SUB_SAD(sad, tmp, tmp2);; tmp = p1[8] + p2[8]; tmp2 = p1[9] + p2[9]; tmp += tmp2; tmp2 = (cur_word >> 16) & 0xFF; tmp += 2; sad = INTERP2_SUB_SAD(sad, tmp, tmp2);; tmp = p1[4] + p2[4]; tmp2 = p1[5] + p2[5]; tmp += tmp2; tmp2 = (cur_word >> 8) & 0xFF; tmp += 2; sad = INTERP2_SUB_SAD(sad, tmp, tmp2);; tmp2 = p1[1] + p2[1]; tmp = p1[0] + p2[0]; p1 += refwx4; p2 += refwx4; tmp += tmp2; tmp2 = (cur_word & 0xFF); tmp += 2; sad = INTERP2_SUB_SAD(sad, tmp, tmp2);; } while (--j); NUM_SAD_HP_MB(); sadstar += madstar; if (sad > sadstar - nrmlz_th[i] || sad > (Int)((ULong)dmin_rx >> 16)) { return 65536; } } return sad; } Int SAD_MB_HP_HTFMyh(UChar *ref, UChar *blk, Int dmin_rx, void *extra_info) { Int i, j; Int sad = 0, tmp, tmp2; UChar *p1, *p2; Int rx = dmin_rx & 0xFFFF; Int refwx4 = rx << 2; Int sadstar = 0, madstar; Int *nrmlz_th = (Int*) extra_info; Int *offsetRef = nrmlz_th + 32; ULong cur_word; madstar = (ULong)dmin_rx >> 20; NUM_SAD_HP_MB_CALL(); blk -= 4; for (i = 0; i < 16; i++) /* 16 stages */ { p1 = ref + offsetRef[i]; p2 = p1 + rx; j = 4; do { cur_word = *((ULong*)(blk += 4)); tmp = p1[12]; tmp2 = p2[12]; tmp++; tmp2 += tmp; tmp = (cur_word >> 24) & 0xFF; sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; tmp = p1[8]; tmp2 = p2[8]; tmp++; tmp2 += tmp; tmp = (cur_word >> 16) & 0xFF; sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; tmp = p1[4]; tmp2 = p2[4]; tmp++; tmp2 += tmp; tmp = (cur_word >> 8) & 0xFF; sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; tmp = p1[0]; p1 += refwx4; tmp2 = p2[0]; p2 += refwx4; tmp++; tmp2 += tmp; tmp = (cur_word & 0xFF); sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; } while (--j); NUM_SAD_HP_MB(); sadstar += madstar; if (sad > sadstar - nrmlz_th[i] || sad > (Int)((ULong)dmin_rx >> 16)) { return 65536; } } return sad; } Int SAD_MB_HP_HTFMxh(UChar *ref, UChar *blk, Int dmin_rx, void *extra_info) { Int i, j; Int sad = 0, tmp, tmp2; UChar *p1; Int rx = dmin_rx & 0xFFFF; Int refwx4 = rx << 2; Int sadstar = 0, madstar; Int *nrmlz_th = (Int*) extra_info; Int *offsetRef = nrmlz_th + 32; ULong cur_word; madstar = (ULong)dmin_rx >> 20; NUM_SAD_HP_MB_CALL(); blk -= 4; for (i = 0; i < 16; i++) /* 16 stages */ { p1 = ref + offsetRef[i]; j = 4;/* 4 lines */ do { cur_word = *((ULong*)(blk += 4)); tmp = p1[12]; tmp2 = p1[13]; tmp++; tmp2 += tmp; tmp = (cur_word >> 24) & 0xFF; sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; tmp = p1[8]; tmp2 = p1[9]; tmp++; tmp2 += tmp; tmp = (cur_word >> 16) & 0xFF; sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; tmp = p1[4]; tmp2 = p1[5]; tmp++; tmp2 += tmp; tmp = (cur_word >> 8) & 0xFF; sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; tmp = p1[0]; tmp2 = p1[1]; p1 += refwx4; tmp++; tmp2 += tmp; tmp = (cur_word & 0xFF); sad = INTERP1_SUB_SAD(sad, tmp, tmp2);; } while (--j); NUM_SAD_HP_MB(); sadstar += madstar; if (sad > sadstar - nrmlz_th[i] || sad > (Int)((ULong)dmin_rx >> 16)) { return 65536; } } return sad; } #endif /* HTFM */ #ifndef NO_INTER4V /*================================================================== Function: SAD_Blk_HalfPel_C Date: 04/30/2001 Purpose: Compute SAD 16x16 between blk and ref in halfpel resolution, Changes: ==================================================================*/ /* One component is half-pel */ Int SAD_Blk_HalfPel_C(UChar *ref, UChar *blk, Int dmin, Int width, Int rx, Int xh, Int yh, void *extra_info) { Int i, j; Int sad = 0; UChar *kk, *p1, *p2, *p3, *p4; Int temp; OSCL_UNUSED_ARG(extra_info); NUM_SAD_HP_BLK_CALL(); if (xh && yh) { p1 = ref; p2 = ref + xh; p3 = ref + yh * rx; p4 = ref + yh * rx + xh; kk = blk; for (i = 0; i < 8; i++) { for (j = 0; j < 8; j++) { temp = ((p1[j] + p2[j] + p3[j] + p4[j] + 2) >> 2) - kk[j]; sad += PV_ABS(temp); } NUM_SAD_HP_BLK(); if (sad > dmin) return sad; p1 += rx; p3 += rx; p2 += rx; p4 += rx; kk += width; } return sad; } else { p1 = ref; p2 = ref + xh + yh * rx; /* either left/right or top/bottom pixel */ kk = blk; for (i = 0; i < 8; i++) { for (j = 0; j < 8; j++) { temp = ((p1[j] + p2[j] + 1) >> 1) - kk[j]; sad += PV_ABS(temp); } NUM_SAD_HP_BLK(); if (sad > dmin) return sad; p1 += rx; p2 += rx; kk += width; } return sad; } } #endif /* NO_INTER4V */ #ifdef __cplusplus } #endif ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/sad_halfpel_inline.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /*********************************************************************************/ /* Filename: sad_halfpel_inline.h */ /* Description: Implementation for in-line functions used in dct.cpp */ /* Modified: */ /*********************************************************************************/ #ifndef _SAD_HALFPEL_INLINE_H_ #define _SAD_HALFPEL_INLINE_H_ #ifdef __cplusplus extern "C" { #endif #if !defined(PV_ARM_GCC_V5) && !defined(PV_ARM_GCC_V4) /* ARM GNU COMPILER */ __inline int32 INTERP1_SUB_SAD(int32 sad, int32 tmp, int32 tmp2) { tmp = (tmp2 >> 1) - tmp; if (tmp > 0) sad += tmp; else sad -= tmp; return sad; } __inline int32 INTERP2_SUB_SAD(int32 sad, int32 tmp, int32 tmp2) { tmp = (tmp >> 2) - tmp2; if (tmp > 0) sad += tmp; else sad -= tmp; return sad; } #elif defined(__CC_ARM) /* only work with arm v5 */ __inline int32 INTERP1_SUB_SAD(int32 sad, int32 tmp, int32 tmp2) { __asm { rsbs tmp, tmp, tmp2, asr #1 ; rsbmi tmp, tmp, #0 ; add sad, sad, tmp ; } return sad; } __inline int32 INTERP2_SUB_SAD(int32 sad, int32 tmp, int32 tmp2) { __asm { rsbs tmp, tmp2, tmp, asr #2 ; rsbmi tmp, tmp, #0 ; add sad, sad, tmp ; } return sad; } #elif ( defined(PV_ARM_GCC_V5) || defined(PV_ARM_GCC_V4) ) /* ARM GNU COMPILER */ __inline int32 INTERP1_SUB_SAD(int32 sad, int32 tmp, int32 tmp2) { register int32 out; register int32 temp1; register int32 ss = sad; register int32 tt = tmp; register int32 uu = tmp2; asm volatile("rsbs %1, %3, %4, asr #1\n\t" "rsbmi %1, %1, #0\n\t" "add %0, %2, %1" : "=&r"(out), "=&r"(temp1) : "r"(ss), "r"(tt), "r"(uu)); return out; } __inline int32 INTERP2_SUB_SAD(int32 sad, int32 tmp, int32 tmp2) { register int32 out; register int32 temp1; register int32 ss = sad; register int32 tt = tmp; register int32 uu = tmp2; asm volatile("rsbs %1, %4, %3, asr #2\n\t" "rsbmi %1, %1, #0\n\t" "add %0, %2, %1" : "=&r"(out), "=&r"(temp1) : "r"(ss), "r"(tt), "r"(uu)); return out; } #endif // Diff OS #ifdef __cplusplus } #endif #endif //_SAD_HALFPEL_INLINE_H_ ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/sad_inline.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /*********************************************************************************/ /* Filename: sad_inline.h */ /* Description: Implementation for in-line functions used in dct.cpp */ /* Modified: */ /*********************************************************************************/ #ifndef _SAD_INLINE_H_ #define _SAD_INLINE_H_ #ifdef __cplusplus extern "C" { #endif #if !defined(PV_ARM_GCC_V5) && !defined(PV_ARM_GCC_V4) /* ARM GNU COMPILER */ __inline int32 SUB_SAD(int32 sad, int32 tmp, int32 tmp2) { tmp = tmp - tmp2; if (tmp > 0) sad += tmp; else sad -= tmp; return sad; } __inline int32 sad_4pixel(int32 src1, int32 src2, int32 mask) { int32 x7; x7 = src2 ^ src1; /* check odd/even combination */ if ((uint32)src2 >= (uint32)src1) { src1 = src2 - src1; /* subs */ } else { src1 = src1 - src2; } x7 = x7 ^ src1; /* only odd bytes need to add carry */ x7 = mask & ((uint32)x7 >> 1); x7 = (x7 << 8) - x7; src1 = src1 + (x7 >> 7); /* add 0xFF to the negative byte, add back carry */ src1 = src1 ^(x7 >> 7); /* take absolute value of negative byte */ return src1; } #define NUMBER 3 #define SHIFT 24 #include "sad_mb_offset.h" #undef NUMBER #define NUMBER 2 #undef SHIFT #define SHIFT 16 #include "sad_mb_offset.h" #undef NUMBER #define NUMBER 1 #undef SHIFT #define SHIFT 8 #include "sad_mb_offset.h" __inline int32 simd_sad_mb(UChar *ref, UChar *blk, Int dmin, Int lx) { int32 x4, x5, x6, x8, x9, x10, x11, x12, x14; x9 = 0x80808080; /* const. */ x8 = (uint32)ref & 0x3; if (x8 == 3) goto SadMBOffset3; if (x8 == 2) goto SadMBOffset2; if (x8 == 1) goto SadMBOffset1; // x5 = (x4<<8)-x4; /* x5 = x4*255; */ x4 = x5 = 0; x6 = 0xFFFF00FF; ref -= lx; blk -= 16; x8 = 16; LOOP_SAD0: /****** process 8 pixels ******/ x10 = *((uint32*)(ref += lx)); x11 = *((uint32*)(ref + 4)); x12 = *((uint32*)(blk += 16)); x14 = *((uint32*)(blk + 4)); /* process x11 & x14 */ x11 = sad_4pixel(x11, x14, x9); /* process x12 & x10 */ x10 = sad_4pixel(x10, x12, x9); x5 = x5 + x10; /* accumulate low bytes */ x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */ x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */ x5 = x5 + x11; /* accumulate low bytes */ x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */ x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */ /****** process 8 pixels ******/ x10 = *((uint32*)(ref + 8)); x11 = *((uint32*)(ref + 12)); x12 = *((uint32*)(blk + 8)); x14 = *((uint32*)(blk + 12)); /* process x11 & x14 */ x11 = sad_4pixel(x11, x14, x9); /* process x12 & x10 */ x10 = sad_4pixel(x10, x12, x9); x5 = x5 + x10; /* accumulate low bytes */ x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */ x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */ x5 = x5 + x11; /* accumulate low bytes */ x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */ x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */ /****************/ x10 = x5 - (x4 << 8); /* extract low bytes */ x10 = x10 + x4; /* add with high bytes */ x10 = x10 + (x10 << 16); /* add with lower half word */ if (((uint32)x10 >> 16) <= (uint32)dmin) /* compare with dmin */ { if (--x8) { goto LOOP_SAD0; } } return ((uint32)x10 >> 16); SadMBOffset3: return sad_mb_offset3(ref, blk, lx, dmin); SadMBOffset2: return sad_mb_offset2(ref, blk, lx, dmin); SadMBOffset1: return sad_mb_offset1(ref, blk, lx, dmin); } #elif defined(__CC_ARM) /* only work with arm v5 */ __inline int32 SUB_SAD(int32 sad, int32 tmp, int32 tmp2) { __asm { rsbs tmp, tmp, tmp2 ; rsbmi tmp, tmp, #0 ; add sad, sad, tmp ; } return sad; } __inline int32 sad_4pixel(int32 src1, int32 src2, int32 mask) { int32 x7; __asm { EOR x7, src2, src1; /* check odd/even combination */ SUBS src1, src2, src1; EOR x7, x7, src1; AND x7, mask, x7, lsr #1; ORRCC x7, x7, #0x80000000; RSB x7, x7, x7, lsl #8; ADD src1, src1, x7, asr #7; /* add 0xFF to the negative byte, add back carry */ EOR src1, src1, x7, asr #7; /* take absolute value of negative byte */ } return src1; } __inline int32 sad_4pixelN(int32 src1, int32 src2, int32 mask) { int32 x7; __asm { EOR x7, src2, src1; /* check odd/even combination */ ADDS src1, src2, src1; EOR x7, x7, src1; /* only odd bytes need to add carry */ ANDS x7, mask, x7, rrx; RSB x7, x7, x7, lsl #8; SUB src1, src1, x7, asr #7; /* add 0xFF to the negative byte, add back carry */ EOR src1, src1, x7, asr #7; /* take absolute value of negative byte */ } return src1; } #define sum_accumulate __asm{ SBC x5, x5, x10; /* accumulate low bytes */ \ BIC x10, x6, x10; /* x10 & 0xFF00FF00 */ \ ADD x4, x4, x10,lsr #8; /* accumulate high bytes */ \ SBC x5, x5, x11; /* accumulate low bytes */ \ BIC x11, x6, x11; /* x11 & 0xFF00FF00 */ \ ADD x4, x4, x11,lsr #8; } /* accumulate high bytes */ #define NUMBER 3 #define SHIFT 24 #define INC_X8 0x08000001 #include "sad_mb_offset.h" #undef NUMBER #define NUMBER 2 #undef SHIFT #define SHIFT 16 #undef INC_X8 #define INC_X8 0x10000001 #include "sad_mb_offset.h" #undef NUMBER #define NUMBER 1 #undef SHIFT #define SHIFT 8 #undef INC_X8 #define INC_X8 0x08000001 #include "sad_mb_offset.h" __inline int32 simd_sad_mb(UChar *ref, UChar *blk, Int dmin, Int lx) { int32 x4, x5, x6, x8, x9, x10, x11, x12, x14; x9 = 0x80808080; /* const. */ x4 = x5 = 0; __asm { MOVS x8, ref, lsl #31 ; BHI SadMBOffset3; BCS SadMBOffset2; BMI SadMBOffset1; MVN x6, #0xFF00; } LOOP_SAD0: /****** process 8 pixels ******/ x11 = *((int32*)(ref + 12)); x10 = *((int32*)(ref + 8)); x14 = *((int32*)(blk + 12)); x12 = *((int32*)(blk + 8)); /* process x11 & x14 */ x11 = sad_4pixel(x11, x14, x9); /* process x12 & x10 */ x10 = sad_4pixel(x10, x12, x9); x5 = x5 + x10; /* accumulate low bytes */ x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */ x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */ x5 = x5 + x11; /* accumulate low bytes */ x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */ x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */ __asm { /****** process 8 pixels ******/ LDR x11, [ref, #4]; LDR x10, [ref], lx ; LDR x14, [blk, #4]; LDR x12, [blk], #16 ; } /* process x11 & x14 */ x11 = sad_4pixel(x11, x14, x9); /* process x12 & x10 */ x10 = sad_4pixel(x10, x12, x9); x5 = x5 + x10; /* accumulate low bytes */ x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */ x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */ x5 = x5 + x11; /* accumulate low bytes */ x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */ x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */ /****************/ x10 = x5 - (x4 << 8); /* extract low bytes */ x10 = x10 + x4; /* add with high bytes */ x10 = x10 + (x10 << 16); /* add with lower half word */ __asm { /****************/ RSBS x11, dmin, x10, lsr #16; ADDLSS x8, x8, #0x10000001; BLS LOOP_SAD0; } return ((uint32)x10 >> 16); SadMBOffset3: return sad_mb_offset3(ref, blk, lx, dmin, x8); SadMBOffset2: return sad_mb_offset2(ref, blk, lx, dmin, x8); SadMBOffset1: return sad_mb_offset1(ref, blk, lx, dmin, x8); } #elif ( defined(PV_ARM_GCC_V5) || defined(PV_ARM_GCC_V4) ) /* ARM GNU COMPILER */ __inline int32 SUB_SAD(int32 sad, int32 tmp, int32 tmp2) { register int32 out; register int32 temp1; register int32 ss = sad; register int32 tt = tmp; register int32 uu = tmp2; asm volatile("rsbs %1, %4, %3\n\t" "rsbmi %1, %1, #0\n\t" "add %0, %2, %1" : "=&r"(out), "=&r"(temp1) : "r"(ss), "r"(tt), "r"(uu)); return out; } __inline int32 sad_4pixel(int32 src1, int32 src2, int32 mask) { register int32 out; register int32 temp1; register int32 s1 = src1; register int32 s2 = src2; register int32 mm = mask; asm volatile("eor %0, %3, %2\n\t" "subs %1, %3, %2\n\t" "eor %0, %0, %1\n\t" "and %0, %4, %0, lsr #1\n\t" "orrcc %0, %0, #0x80000000\n\t" "rsb %0, %0, %0, lsl #8\n\t" "add %1, %1, %0, asr #7\n\t" "eor %1, %1, %0, asr #7" : "=&r"(out), "=&r"(temp1) : "r"(s1), "r"(s2), "r"(mm)); return temp1; } __inline int32 sad_4pixelN(int32 src1, int32 src2, int32 mask) { register int32 out; register int32 temp1; register int32 s1 = src1; register int32 s2 = src2; register int32 mm = mask; asm volatile("eor %1, %3, %2\n\t" "adds %0, %3, %2\n\t" "eor %1, %1, %0\n\t" "ands %1, %4, %1,rrx\n\t" "rsb %1, %1, %1, lsl #8\n\t" "sub %0, %0, %1, asr #7\n\t" "eor %0, %0, %1, asr #7" : "=&r"(out), "=&r"(temp1) : "r"(s1), "r"(s2), "r"(mm)); return (out); } #define sum_accumulate asm volatile("sbc %0, %0, %1\n\t" \ "bic %1, %4, %1\n\t" \ "add %2, %2, %1, lsr #8\n\t" \ "sbc %0, %0, %3\n\t" \ "bic %3, %4, %3\n\t" \ "add %2, %2, %3, lsr #8" \ :"+r"(x5), "+r"(x10), "+r"(x4), "+r"(x11) \ :"r"(x6)); #define NUMBER 3 #define SHIFT 24 #define INC_X8 0x08000001 #include "sad_mb_offset.h" #undef NUMBER #define NUMBER 2 #undef SHIFT #define SHIFT 16 #undef INC_X8 #define INC_X8 0x10000001 #include "sad_mb_offset.h" #undef NUMBER #define NUMBER 1 #undef SHIFT #define SHIFT 8 #undef INC_X8 #define INC_X8 0x08000001 #include "sad_mb_offset.h" __inline int32 simd_sad_mb(UChar *ref, UChar *blk, Int dmin, Int lx) { int32 x4, x5, x6, x8, x9, x10, x11, x12, x14; x9 = 0x80808080; /* const. */ x4 = x5 = 0; x8 = (uint32)ref & 0x3; if (x8 == 3) goto SadMBOffset3; if (x8 == 2) goto SadMBOffset2; if (x8 == 1) goto SadMBOffset1; asm volatile("mvn %0, #0xFF00": "=r"(x6)); LOOP_SAD0: /****** process 8 pixels ******/ x11 = *((int32*)(ref + 12)); x10 = *((int32*)(ref + 8)); x14 = *((int32*)(blk + 12)); x12 = *((int32*)(blk + 8)); /* process x11 & x14 */ x11 = sad_4pixel(x11, x14, x9); /* process x12 & x10 */ x10 = sad_4pixel(x10, x12, x9); x5 = x5 + x10; /* accumulate low bytes */ x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */ x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */ x5 = x5 + x11; /* accumulate low bytes */ x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */ x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */ asm volatile("ldr %0, [%4, #4]\n\t" "ldr %1, [%4], %6\n\t" "ldr %2, [%5, #4]\n\t" "ldr %3, [%5], #16" : "=r"(x11), "=r"(x10), "=r"(x14), "=r"(x12), "+r"(ref), "+r"(blk) : "r"(lx)); /* process x11 & x14 */ x11 = sad_4pixel(x11, x14, x9); /* process x12 & x10 */ x10 = sad_4pixel(x10, x12, x9); x5 = x5 + x10; /* accumulate low bytes */ x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */ x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */ x5 = x5 + x11; /* accumulate low bytes */ x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */ x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */ /****************/ x10 = x5 - (x4 << 8); /* extract low bytes */ x10 = x10 + x4; /* add with high bytes */ x10 = x10 + (x10 << 16); /* add with lower half word */ if (((uint32)x10 >> 16) <= (uint32)dmin) /* compare with dmin */ { if (--x8) { goto LOOP_SAD0; } } return ((uint32)x10 >> 16); SadMBOffset3: return sad_mb_offset3(ref, blk, lx, dmin); SadMBOffset2: return sad_mb_offset2(ref, blk, lx, dmin); SadMBOffset1: return sad_mb_offset1(ref, blk, lx, dmin); } #endif // OS #ifdef __cplusplus } #endif #endif // _SAD_INLINE_H_ ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/sad_mb_offset.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /*********************************************************************************/ /* Filename: sad_mb_offset.h */ /* Description: Implementation for in-line functions used in dct.cpp */ /* Modified: */ /*********************************************************************************/ #if !defined(PV_ARM_GCC_V4) && !defined(PV_ARM_GCC_V5) /* ARM GNU COMPILER */ #if (NUMBER==3) __inline int32 sad_mb_offset3(UChar *ref, UChar *blk, Int lx, Int dmin) #elif (NUMBER==2) __inline int32 sad_mb_offset2(UChar *ref, UChar *blk, Int lx, Int dmin) #elif (NUMBER==1) __inline int32 sad_mb_offset1(UChar *ref, UChar *blk, Int lx, Int dmin) #endif { int32 x4, x5, x6, x8, x9, x10, x11, x12, x14; // x5 = (x4<<8) - x4; x4 = x5 = 0; x6 = 0xFFFF00FF; x9 = 0x80808080; /* const. */ ref -= NUMBER; /* bic ref, ref, #3 */ ref -= lx; blk -= 16; x8 = 16; #if (NUMBER==3) LOOP_SAD3: #elif (NUMBER==2) LOOP_SAD2: #elif (NUMBER==1) LOOP_SAD1: #endif /****** process 8 pixels ******/ x10 = *((uint32*)(ref += lx)); /* D C B A */ x11 = *((uint32*)(ref + 4)); /* H G F E */ x12 = *((uint32*)(ref + 8)); /* L K J I */ x10 = ((uint32)x10 >> SHIFT); /* 0 0 0 D */ x10 = x10 | (x11 << (32 - SHIFT)); /* G F E D */ x11 = ((uint32)x11 >> SHIFT); /* 0 0 0 H */ x11 = x11 | (x12 << (32 - SHIFT)); /* K J I H */ x12 = *((uint32*)(blk += 16)); x14 = *((uint32*)(blk + 4)); /* process x11 & x14 */ x11 = sad_4pixel(x11, x14, x9); /* process x12 & x10 */ x10 = sad_4pixel(x10, x12, x9); x5 = x5 + x10; /* accumulate low bytes */ x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */ x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */ x5 = x5 + x11; /* accumulate low bytes */ x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */ x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */ /****** process 8 pixels ******/ x10 = *((uint32*)(ref + 8)); /* D C B A */ x11 = *((uint32*)(ref + 12)); /* H G F E */ x12 = *((uint32*)(ref + 16)); /* L K J I */ x10 = ((uint32)x10 >> SHIFT); /* mvn x10, x10, lsr #24 = 0xFF 0xFF 0xFF ~D */ x10 = x10 | (x11 << (32 - SHIFT)); /* bic x10, x10, x11, lsl #8 = ~G ~F ~E ~D */ x11 = ((uint32)x11 >> SHIFT); /* 0xFF 0xFF 0xFF ~H */ x11 = x11 | (x12 << (32 - SHIFT)); /* ~K ~J ~I ~H */ x12 = *((uint32*)(blk + 8)); x14 = *((uint32*)(blk + 12)); /* process x11 & x14 */ x11 = sad_4pixel(x11, x14, x9); /* process x12 & x10 */ x10 = sad_4pixel(x10, x12, x9); x5 = x5 + x10; /* accumulate low bytes */ x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */ x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */ x5 = x5 + x11; /* accumulate low bytes */ x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */ x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */ /****************/ x10 = x5 - (x4 << 8); /* extract low bytes */ x10 = x10 + x4; /* add with high bytes */ x10 = x10 + (x10 << 16); /* add with lower half word */ if (((uint32)x10 >> 16) <= (uint32)dmin) /* compare with dmin */ { if (--x8) { #if (NUMBER==3) goto LOOP_SAD3; #elif (NUMBER==2) goto LOOP_SAD2; #elif (NUMBER==1) goto LOOP_SAD1; #endif } } return ((uint32)x10 >> 16); } #elif defined(__CC_ARM) /* only work with arm v5 */ #if (NUMBER==3) __inline int32 sad_mb_offset3(UChar *ref, UChar *blk, Int lx, Int dmin, int32 x8) #elif (NUMBER==2) __inline int32 sad_mb_offset2(UChar *ref, UChar *blk, Int lx, Int dmin, int32 x8) #elif (NUMBER==1) __inline int32 sad_mb_offset1(UChar *ref, UChar *blk, Int lx, Int dmin, int32 x8) #endif { int32 x4, x5, x6, x9, x10, x11, x12, x14; x9 = 0x80808080; /* const. */ x4 = x5 = 0; __asm{ MVN x6, #0xff0000; BIC ref, ref, #3; #if (NUMBER==3) LOOP_SAD3: #elif (NUMBER==2) LOOP_SAD2: #elif (NUMBER==1) LOOP_SAD1: #endif } /****** process 8 pixels ******/ x11 = *((int32*)(ref + 12)); x12 = *((int32*)(ref + 16)); x10 = *((int32*)(ref + 8)); x14 = *((int32*)(blk + 12)); __asm{ MVN x10, x10, lsr #SHIFT; BIC x10, x10, x11, lsl #(32-SHIFT); MVN x11, x11, lsr #SHIFT; BIC x11, x11, x12, lsl #(32-SHIFT); LDR x12, [blk, #8]; } /* process x11 & x14 */ x11 = sad_4pixelN(x11, x14, x9); /* process x12 & x10 */ x10 = sad_4pixelN(x10, x12, x9); sum_accumulate; __asm{ /****** process 8 pixels ******/ LDR x11, [ref, #4]; LDR x12, [ref, #8]; LDR x10, [ref], lx ; LDR x14, [blk, #4]; MVN x10, x10, lsr #SHIFT; BIC x10, x10, x11, lsl #(32-SHIFT); MVN x11, x11, lsr #SHIFT; BIC x11, x11, x12, lsl #(32-SHIFT); LDR x12, [blk], #16; } /* process x11 & x14 */ x11 = sad_4pixelN(x11, x14, x9); /* process x12 & x10 */ x10 = sad_4pixelN(x10, x12, x9); sum_accumulate; /****************/ x10 = x5 - (x4 << 8); /* extract low bytes */ x10 = x10 + x4; /* add with high bytes */ x10 = x10 + (x10 << 16); /* add with lower half word */ __asm{ RSBS x11, dmin, x10, lsr #16 ADDLSS x8, x8, #INC_X8 #if (NUMBER==3) BLS LOOP_SAD3; #elif (NUMBER==2) BLS LOOP_SAD2; #elif (NUMBER==1) BLS LOOP_SAD1; #endif } return ((uint32)x10 >> 16); } #elif ( defined(PV_ARM_GCC_V5) || defined(PV_ARM_GCC_V4) ) /* ARM GNU COMPILER */ #if (NUMBER==3) __inline int32 sad_mb_offset3(UChar *ref, UChar *blk, Int lx, Int dmin) #elif (NUMBER==2) __inline int32 sad_mb_offset2(UChar *ref, UChar *blk, Int lx, Int dmin) #elif (NUMBER==1) __inline int32 sad_mb_offset1(UChar *ref, UChar *blk, Int lx, Int dmin) #endif { int32 x4, x5, x6, x8, x9, x10, x11, x12, x14; // x5 = (x4<<8) - x4; x4 = x5 = 0; x6 = 0xFFFF00FF; x9 = 0x80808080; /* const. */ ref -= NUMBER; /* bic ref, ref, #3 */ ref -= lx; x8 = 16; #if (NUMBER==3) LOOP_SAD3: #elif (NUMBER==2) LOOP_SAD2: #elif (NUMBER==1) LOOP_SAD1: #endif /****** process 8 pixels ******/ x10 = *((uint32*)(ref += lx)); /* D C B A */ x11 = *((uint32*)(ref + 4)); /* H G F E */ x12 = *((uint32*)(ref + 8)); /* L K J I */ int32 shift = SHIFT; int32 shift2 = 32 - SHIFT; asm volatile("ldr %3, [%4, #4]\n\t" "mvn %0, %0, lsr %5\n\t" "bic %0, %0, %1, lsl %6\n\t" "mvn %1, %1, lsr %5\n\t" "bic %1, %1, %2, lsl %6\n\t" "ldr %2, [%4, #8]" : "+r"(x10), "+r"(x11), "+r"(x12), "=r"(x14) : "r"(blk), "r"(shift), "r"(shift2)); /* process x11 & x14 */ x11 = sad_4pixel(x11, x14, x9); /* process x12 & x10 */ x10 = sad_4pixel(x10, x12, x9); sum_accumulate; /****** process 8 pixels ******/ x10 = *((uint32*)(ref + 8)); /* D C B A */ x11 = *((uint32*)(ref + 12)); /* H G F E */ x12 = *((uint32*)(ref + 16)); /* L K J I */ asm volatile("ldr %3, [%4, #4]\n\t" "mvn %0, %0, lsr %5\n\t" "bic %0, %0, %1, lsl %6\n\t" "mvn %1, %1, lsr %5\n\t" "bic %1, %1, %2, lsl %6\n\t" "ldr %2, [%4, #8]" : "+r"(x10), "+r"(x11), "+r"(x12), "=r"(x14) : "r"(blk), "r"(shift), "r"(shift2)); /* process x11 & x14 */ x11 = sad_4pixel(x11, x14, x9); /* process x12 & x10 */ x10 = sad_4pixel(x10, x12, x9); sum_accumulate; /****************/ x10 = x5 - (x4 << 8); /* extract low bytes */ x10 = x10 + x4; /* add with high bytes */ x10 = x10 + (x10 << 16); /* add with lower half word */ if (((uint32)x10 >> 16) <= (uint32)dmin) /* compare with dmin */ { if (--x8) { #if (NUMBER==3) goto LOOP_SAD3; #elif (NUMBER==2) goto LOOP_SAD2; #elif (NUMBER==1) goto LOOP_SAD1; #endif } } return ((uint32)x10 >> 16); } #endif ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/vlc_enc_tab.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /****************************************************************************** * * This software module was originally developed by * * Robert Danielsen (Telenor / ACTS-MoMuSys). * * and edited by * * Minhua Zhou (HHI / ACTS-MoMuSys). * Luis Ducla-Soares (IST / ACTS-MoMuSys). * * in the course of development of the MPEG-4 Video (ISO/IEC 14496-2) standard. * This software module is an implementation of a part of one or more MPEG-4 * Video (ISO/IEC 14496-2) tools as specified by the MPEG-4 Video (ISO/IEC * 14496-2) standard. * * ISO/IEC gives users of the MPEG-4 Video (ISO/IEC 14496-2) standard free * license to this software module or modifications thereof for use in hardware * or software products claiming conformance to the MPEG-4 Video (ISO/IEC * 14496-2) standard. * * Those intending to use this software module in hardware or software products * are advised that its use may infringe existing patents. The original * developer of this software module and his/her company, the subsequent * editors and their companies, and ISO/IEC have no liability for use of this * software module or modifications thereof in an implementation. Copyright is * not released for non MPEG-4 Video (ISO/IEC 14496-2) standard conforming * products. * * ACTS-MoMuSys partners retain full right to use the code for his/her own * purpose, assign or donate the code to a third party and to inhibit third * parties from using the code for non MPEG-4 Video (ISO/IEC 14496-2) standard * conforming products. This copyright notice must be included in all copies or * derivative works. * * Copyright (c) 1997 * *****************************************************************************/ /***********************************************************HeaderBegin******* * * File: vlc.h * * Author: Robert Danielsen * Created: 07.06.96 * * Description: vlc tables for encoder * * Notes: Idea taken from MPEG-2 software simulation group * * Modified: * 28.10.96 Robert Danielsen: Added tables for Intra luminance * coefficients * 01.05.97 Luis Ducla-Soares: added VM7.0 Reversible VLC tables (RVLC). * 13.05.97 Minhua Zhou: added cbpy_tab3,cbpy_tab2 * ***********************************************************HeaderEnd*********/ /************************ INCLUDE FILES ********************************/ #ifndef _VLC_ENC_TAB_H_ #define _VLC_ENC_TAB_H_ #include "mp4def.h" /* type definitions for variable length code table entries */ static const Int intra_max_level[2][64] = { {27, 10, 5, 4, 3, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, {8, 3, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }; static const Int inter_max_level[2][64] = { {12, 6, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }; static const Int intra_max_run0[28] = { 999, 14, 9, 7, 3, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; static const Int intra_max_run1[9] = { 999, 20, 6, 1, 0, 0, 0, 0, 0 }; static const Int inter_max_run0[13] = { 999, 26, 10, 6, 2, 1, 1, 0, 0, 0, 0, 0, 0 }; static const Int inter_max_run1[4] = { 999, 40, 1, 0 }; /* DC prediction sizes */ static const VLCtable DCtab_lum[13] = { {3, 3}, {3, 2}, {2, 2}, {2, 3}, {1, 3}, {1, 4}, {1, 5}, {1, 6}, {1, 7}, {1, 8}, {1, 9}, {1, 10}, {1, 11} }; static const VLCtable DCtab_chrom[13] = { {3, 2}, {2, 2}, {1, 2}, {1, 3}, {1, 4}, {1, 5}, {1, 6}, {1, 7}, {1, 8}, {1, 9}, {1, 10}, {1, 11}, {1, 12} }; /* Motion vectors */ static const VLCtable mvtab[33] = { {1, 1}, {1, 2}, {1, 3}, {1, 4}, {3, 6}, {5, 7}, {4, 7}, {3, 7}, {11, 9}, {10, 9}, {9, 9}, {17, 10}, {16, 10}, {15, 10}, {14, 10}, {13, 10}, {12, 10}, {11, 10}, {10, 10}, {9, 10}, {8, 10}, {7, 10}, {6, 10}, {5, 10}, {4, 10}, {7, 11}, {6, 11}, {5, 11}, {4, 11}, {3, 11}, {2, 11}, {3, 12}, {2, 12} }; /* MCBPC Indexing by cbpc in first two bits, mode in last two. CBPC as in table 4/H.263, MB type (mode): 3 = 01, 4 = 10. Example: cbpc = 01 and mode = 4 gives index = 0110 = 6. */ static const VLCtable mcbpc_intra_tab[15] = { {0x01, 9}, {0x01, 1}, {0x01, 4}, {0x00, 0}, {0x00, 0}, {0x01, 3}, {0x01, 6}, {0x00, 0}, {0x00, 0}, {0x02, 3}, {0x02, 6}, {0x00, 0}, {0x00, 0}, {0x03, 3}, {0x03, 6} }; /* MCBPC inter. Addressing: 5 bit ccmmm (cc = CBPC, mmm = mode (1-4 binary)) */ static const VLCtable mcbpc_inter_tab[29] = { {1, 1}, {3, 3}, {2, 3}, {3, 5}, {4, 6}, {1, 9}, {0, 0}, {0, 0}, {3, 4}, {7, 7}, {5, 7}, {4, 8}, {4, 9}, {0, 0}, {0, 0}, {0, 0}, {2, 4}, {6, 7}, {4, 7}, {3, 8}, {3, 9}, {0, 0}, {0, 0}, {0, 0}, {5, 6}, {5, 9}, {5, 8}, {3, 7}, {2, 9} }; /* CBPY. Straightforward indexing */ static const VLCtable cbpy_tab[16] = { {3, 4}, {5, 5}, {4, 5}, {9, 4}, {3, 5}, {7, 4}, {2, 6}, {11, 4}, {2, 5}, {3, 6}, {5, 4}, {10, 4}, {4, 4}, {8, 4}, {6, 4}, {3, 2} }; static const VLCtable cbpy_tab3[8] = { {3, 3}, {1, 6}, {1, 5}, {2, 3}, {2, 5}, {3, 5}, {1, 3}, {1, 1} }; static const VLCtable cbpy_tab2[4] = { {1, 4}, {1, 3}, {1, 2}, {1, 1} }; /* DCT coefficients. Four tables, two for last = 0, two for last = 1. the sign bit must be added afterwards. */ /* first part of coeffs for last = 0. Indexed by [run][level-1] */ static const VLCtable coeff_tab0[2][12] = { /* run = 0 */ { {0x02, 2}, {0x0f, 4}, {0x15, 6}, {0x17, 7}, {0x1f, 8}, {0x25, 9}, {0x24, 9}, {0x21, 10}, {0x20, 10}, {0x07, 11}, {0x06, 11}, {0x20, 11} }, /* run = 1 */ { {0x06, 3}, {0x14, 6}, {0x1e, 8}, {0x0f, 10}, {0x21, 11}, {0x50, 12}, {0x00, 0}, {0x00, 0}, {0x00, 0}, {0x00, 0}, {0x00, 0}, {0x00, 0} } }; /* rest of coeffs for last = 0. indexing by [run-2][level-1] */ static const VLCtable coeff_tab1[25][4] = { /* run = 2 */ { {0x0e, 4}, {0x1d, 8}, {0x0e, 10}, {0x51, 12} }, /* run = 3 */ { {0x0d, 5}, {0x23, 9}, {0x0d, 10}, {0x00, 0} }, /* run = 4-26 */ { {0x0c, 5}, {0x22, 9}, {0x52, 12}, {0x00, 0} }, { {0x0b, 5}, {0x0c, 10}, {0x53, 12}, {0x00, 0} }, { {0x13, 6}, {0x0b, 10}, {0x54, 12}, {0x00, 0} }, { {0x12, 6}, {0x0a, 10}, {0x00, 0}, {0x00, 0} }, { {0x11, 6}, {0x09, 10}, {0x00, 0}, {0x00, 0} }, { {0x10, 6}, {0x08, 10}, {0x00, 0}, {0x00, 0} }, { {0x16, 7}, {0x55, 12}, {0x00, 0}, {0x00, 0} }, { {0x15, 7}, {0x00, 0}, {0x00, 0}, {0x00, 0} }, { {0x14, 7}, {0x00, 0}, {0x00, 0}, {0x00, 0} }, { {0x1c, 8}, {0x00, 0}, {0x00, 0}, {0x00, 0} }, { {0x1b, 8}, {0x00, 0}, {0x00, 0}, {0x00, 0} }, { {0x21, 9}, {0x00, 0}, {0x00, 0}, {0x00, 0} }, { {0x20, 9}, {0x00, 0}, {0x00, 0}, {0x00, 0} }, { {0x1f, 9}, {0x00, 0}, {0x00, 0}, {0x00, 0} }, { {0x1e, 9}, {0x00, 0}, {0x00, 0}, {0x00, 0} }, { {0x1d, 9}, {0x00, 0}, {0x00, 0}, {0x00, 0} }, { {0x1c, 9}, {0x00, 0}, {0x00, 0}, {0x00, 0} }, { {0x1b, 9}, {0x00, 0}, {0x00, 0}, {0x00, 0} }, { {0x1a, 9}, {0x00, 0}, {0x00, 0}, {0x00, 0} }, { {0x22, 11}, {0x00, 0}, {0x00, 0}, {0x00, 0} }, { {0x23, 11}, {0x00, 0}, {0x00, 0}, {0x00, 0} }, { {0x56, 12}, {0x00, 0}, {0x00, 0}, {0x00, 0} }, { {0x57, 12}, {0x00, 0}, {0x00, 0}, {0x00, 0} } }; /* first coeffs of last = 1. indexing by [run][level-1] */ static const VLCtable coeff_tab2[2][3] = { /* run = 0 */ { {0x07, 4}, {0x19, 9}, {0x05, 11} }, /* run = 1 */ { {0x0f, 6}, {0x04, 11}, {0x00, 0} } }; /* rest of coeffs for last = 1. indexing by [run-2] */ static const VLCtable coeff_tab3[40] = { {0x0e, 6}, {0x0d, 6}, {0x0c, 6}, {0x13, 7}, {0x12, 7}, {0x11, 7}, {0x10, 7}, {0x1a, 8}, {0x19, 8}, {0x18, 8}, {0x17, 8}, {0x16, 8}, {0x15, 8}, {0x14, 8}, {0x13, 8}, {0x18, 9}, {0x17, 9}, {0x16, 9}, {0x15, 9}, {0x14, 9}, {0x13, 9}, {0x12, 9}, {0x11, 9}, {0x07, 10}, {0x06, 10}, {0x05, 10}, {0x04, 10}, {0x24, 11}, {0x25, 11}, {0x26, 11}, {0x27, 11}, {0x58, 12}, {0x59, 12}, {0x5a, 12}, {0x5b, 12}, {0x5c, 12}, {0x5d, 12}, {0x5e, 12}, {0x5f, 12}, {0x00, 0} }; /* New tables for Intra luminance coefficients. Same codewords, different meaning */ /* Coeffs for last = 0, run = 0. Indexed by [level-1] */ static const VLCtable coeff_tab4[27] = { /* run = 0 */ {0x02, 2}, {0x06, 3}, {0x0f, 4}, {0x0d, 5}, {0x0c, 5}, {0x15, 6}, {0x13, 6}, {0x12, 6}, {0x17, 7}, {0x1f, 8}, {0x1e, 8}, {0x1d, 8}, {0x25, 9}, {0x24, 9}, {0x23, 9}, {0x21, 9}, {0x21, 10}, {0x20, 10}, {0x0f, 10}, {0x0e, 10}, {0x07, 11}, {0x06, 11}, {0x20, 11}, {0x21, 11}, {0x50, 12}, {0x51, 12}, {0x52, 12} }; /* Coeffs for last = 0, run = 1. Indexed by [level-1] */ static const VLCtable coeff_tab5[10] = { {0x0e, 4}, {0x14, 6}, {0x16, 7}, {0x1c, 8}, {0x20, 9}, {0x1f, 9}, {0x0d, 10}, {0x22, 11}, {0x53, 12}, {0x55, 12} }; /* Coeffs for last = 0, run = 2 -> 9. Indexed by [run-2][level-1] */ static const VLCtable coeff_tab6[8][5] = { /* run = 2 */ { {0x0b, 5}, {0x15, 7}, {0x1e, 9}, {0x0c, 10}, {0x56, 12} }, /* run = 3 */ { {0x11, 6}, {0x1b, 8}, {0x1d, 9}, {0x0b, 10}, {0x00, 0} }, /* run = 4 */ { {0x10, 6}, {0x22, 9}, {0x0a, 10}, {0x00, 0}, {0x00, 0} }, /* run = 5 */ { {0x0d, 6}, {0x1c, 9}, {0x08, 10}, {0x00, 0}, {0x00, 0} }, /* run = 6 */ { {0x12, 7}, {0x1b, 9}, {0x54, 12}, {0x00, 0}, {0x00, 0} }, /* run = 7 */ { {0x14, 7}, {0x1a, 9}, {0x57, 12}, {0x00, 0}, {0x00, 0} }, /* run = 8 */ { {0x19, 8}, {0x09, 10}, {0x00, 0}, {0x00, 0}, {0x00, 0} }, /* run = 9 */ { {0x18, 8}, {0x23, 11}, {0x00, 0}, {0x00, 0}, {0x00, 0} } }; /* Coeffs for last = 0, run = 10 -> 14. Indexed by [run-10] */ static const VLCtable coeff_tab7[5] = { {0x17, 8}, {0x19, 9}, {0x18, 9}, {0x07, 10}, {0x58, 12} }; /* Coeffs for last = 1, run = 0. Indexed by [level-1] */ static const VLCtable coeff_tab8[8] = { {0x07, 4}, {0x0c, 6}, {0x16, 8}, {0x17, 9}, {0x06, 10}, {0x05, 11}, {0x04, 11}, {0x59, 12} }; /* Coeffs for last = 1, run = 1 -> 6. Indexed by [run-1][level-1] */ static const VLCtable coeff_tab9[6][3] = { /* run = 1 */ { {0x0f, 6}, {0x16, 9}, {0x05, 10} }, /* run = 2 */ { {0x0e, 6}, {0x04, 10}, {0x00, 0} }, /* run = 3 */ { {0x11, 7}, {0x24, 11}, {0x00, 0} }, /* run = 4 */ { {0x10, 7}, {0x25, 11}, {0x00, 0} }, /* run = 5 */ { {0x13, 7}, {0x5a, 12}, {0x00, 0} }, /* run = 6 */ { {0x15, 8}, {0x5b, 12}, {0x00, 0} } }; /* Coeffs for last = 1, run = 7 -> 20. Indexed by [run-7] */ static const VLCtable coeff_tab10[14] = { {0x14, 8}, {0x13, 8}, {0x1a, 8}, {0x15, 9}, {0x14, 9}, {0x13, 9}, {0x12, 9}, {0x11, 9}, {0x26, 11}, {0x27, 11}, {0x5c, 12}, {0x5d, 12}, {0x5e, 12}, {0x5f, 12} }; #ifndef NO_RVLC /* RVLC tables */ /* DCT coefficients. Four tables, two for last = 0, two for last = 1. the sign bit must be added afterwards. */ /* DCT coeffs (intra) for last = 0. */ /* Indexed by [level-1] */ static const VLCtable coeff_RVLCtab1[27] = { /* run = 0 */ { 0x6, 3}, { 0x7, 3}, { 0xa, 4}, { 0x9, 5}, { 0x14, 6}, { 0x15, 6}, { 0x34, 7}, { 0x74, 8}, { 0x75, 8}, { 0xdd, 9}, { 0xec, 9}, { 0x1ec, 10}, { 0x1ed, 10}, { 0x1f4, 10}, { 0x3ec, 11}, { 0x3ed, 11}, { 0x3f4, 11}, { 0x77d, 12}, { 0x7bc, 12}, { 0xfbd, 13}, { 0xfdc, 13}, { 0x7bd, 12}, { 0xfdd, 13}, { 0x1fbd, 14}, { 0x1fdc, 14}, { 0x1fdd, 14}, { 0x1ffc, 15} }; /* Indexed by [level-1] */ static const VLCtable coeff_RVLCtab2[13] = { /* run = 1 */ { 0x1, 4}, { 0x8, 5}, { 0x2d, 7}, { 0x6c, 8}, { 0x6d, 8}, { 0xdc, 9}, { 0x1dd, 10}, { 0x3dc, 11}, { 0x3dd, 11}, { 0x77c, 12}, { 0xfbc, 13}, { 0x1f7d, 14}, { 0x1fbc, 14} }; /* Indexed by [level-1] */ static const VLCtable coeff_RVLCtab3[11] = { /* run = 2 */ { 0x4, 5}, { 0x2c, 7}, { 0xbc, 9}, { 0x1dc, 10}, { 0x3bc, 11}, { 0x3bd, 11}, { 0xefd, 13}, { 0xf7c, 13}, { 0xf7d, 13}, { 0x1efd, 14}, { 0x1f7c, 14} }; /* Indexed by [level-1] */ static const VLCtable coeff_RVLCtab4[9] = { /* run = 3 */ { 0x5, 5}, { 0x5c, 8}, { 0xbd, 9}, { 0x37d, 11}, { 0x6fc, 12}, { 0xefc, 13}, { 0x1dfd, 14}, { 0x1efc, 14}, { 0x1ffd, 15} }; /* Indexed by [run-4][level-1] */ static const VLCtable coeff_RVLCtab5[2][6] = { /* run = 4 */ { { 0xc, 6}, { 0x5d, 8}, { 0x1bd, 10}, { 0x3fd, 12}, { 0x6fd, 12}, { 0x1bfd, 14} }, /* run = 5 */ { { 0xd, 6}, { 0x7d, 9}, { 0x2fc, 11}, { 0x5fc, 12}, { 0x1bfc, 14}, { 0x1dfc, 14} } }; /* Indexed by [run-6][level-1] */ static const VLCtable coeff_RVLCtab6[2][5] = { /* run = 6 */ { { 0x1c, 7}, { 0x17c, 10}, { 0x2fd, 11}, { 0x5fd, 12}, { 0x2ffc, 15} }, /* run = 7 */ { { 0x1d, 7}, { 0x17d, 10}, { 0x37c, 11}, { 0xdfd, 13}, { 0x2ffd, 15} } }; /* Indexed by [run-8][level-1] */ static const VLCtable coeff_RVLCtab7[2][4] = { /* run = 8 */ { { 0x3c, 8}, { 0x1bc, 10}, { 0xbfd, 13}, { 0x17fd, 14} }, /* run = 9 */ { { 0x3d, 8}, { 0x1fd, 11}, { 0xdfc, 13}, { 0x37fc, 15}, } }; /* Indexed by [run-10][level-1] */ static const VLCtable coeff_RVLCtab8[3][2] = { /* run = 10 */ { { 0x7c, 9}, { 0x3fc, 12} }, /* run = 11 */ { { 0xfc, 10}, { 0xbfc, 13} }, /* run = 12 */ { { 0xfd, 10}, { 0x37fd, 15} } }; /* Indexed by [level-1] */ static const VLCtable coeff_RVLCtab9[7] = { /* run = 13 -> 19 */ { 0x1fc, 11}, { 0x7fc, 13}, { 0x7fd, 13}, { 0xffc, 14}, { 0xffd, 14}, { 0x17fc, 14}, { 0x3bfc, 15} }; /* first coeffs of last = 1. indexing by [run][level-1] */ static const VLCtable coeff_RVLCtab10[2][5] = { /* run = 0 */ { { 0xb, 4}, { 0x78, 8}, { 0x3f5, 11}, { 0xfec, 13}, { 0x1fec, 14} }, /* run = 1 */ { { 0x12, 5}, { 0xed, 9}, { 0x7dc, 12}, { 0x1fed, 14}, { 0x3bfd, 15} } }; static const VLCtable coeff_RVLCtab11[3] = { /* run = 2 */ { 0x13, 5}, { 0x3f8, 11}, { 0x3dfc, 15} }; static const VLCtable coeff_RVLCtab12[11][2] = { /* run = 3 */ { { 0x18, 6}, { 0x7dd, 12} }, /* run = 4 */ { { 0x19, 6}, { 0x7ec, 12} }, /* run = 5 */ { { 0x22, 6}, { 0xfed, 13} }, /* run = 6 */ { { 0x23, 6}, { 0xff4, 13} }, /* run = 7 */ { { 0x35, 7}, { 0xff5, 13} }, /* run = 8 */ { { 0x38, 7}, { 0xff8, 13} }, /* run = 9 */ { { 0x39, 7}, { 0xff9, 13} }, /* run = 10 */ { { 0x42, 7}, { 0x1ff4, 14} }, /* run = 11 */ { { 0x43, 7}, { 0x1ff5, 14} }, /* run = 12 */ { { 0x79, 8}, { 0x1ff8, 14} }, /* run = 13 */ { { 0x82, 8}, { 0x3dfd, 15} } }; static const VLCtable coeff_RVLCtab13[32] = { /* run = 14 -> 44 */ { 0x83, 8}, { 0xf4, 9}, { 0xf5, 9}, { 0xf8, 9}, { 0xf9, 9}, { 0x102, 9}, { 0x103, 9}, { 0x1f5, 10}, { 0x1f8, 10}, { 0x1f9, 10}, { 0x202, 10}, { 0x203, 10}, { 0x3f9, 11}, { 0x402, 11}, { 0x403, 11}, { 0x7ed, 12}, { 0x7f4, 12}, { 0x7f5, 12}, { 0x7f8, 12}, { 0x7f9, 12}, { 0x802, 12}, { 0x803, 12}, { 0x1002, 13}, { 0x1003, 13}, { 0x1ff9, 14}, { 0x2002, 14}, { 0x2003, 14}, { 0x3efc, 15}, { 0x3efd, 15}, { 0x3f7c, 15}, { 0x3f7d, 15} }; /* Coeffs for last = 0, run = 0. Indexed by [level-1] */ static const VLCtable coeff_RVLCtab14[19] = { /* run = 0 */ { 0x6, 3}, { 0x1, 4}, { 0x4, 5}, { 0x1c, 7}, { 0x3c, 8}, { 0x3d, 8}, { 0x7c, 9}, { 0xfc, 10}, { 0xfd, 10}, { 0x1fc, 11}, { 0x1fd, 11}, { 0x3fc, 12}, { 0x7fc, 13}, { 0x7fd, 13}, { 0xbfc, 13}, { 0xbfd, 13}, { 0xffc, 14}, { 0xffd, 14}, { 0x1ffc, 15} }; static const VLCtable coeff_RVLCtab15[10] = { /* run = 1 */ { 0x7, 3}, { 0xc, 6}, { 0x5c, 8}, { 0x7d, 9}, { 0x17c, 10}, { 0x2fc, 11}, { 0x3fd, 12}, { 0xdfc, 13}, { 0x17fc, 14}, { 0x17fd, 14} }; static const VLCtable coeff_RVLCtab16[2][7] = { /* run = 2 */ { { 0xa, 4}, { 0x1d, 7}, { 0xbc, 9}, { 0x2fd, 11}, { 0x5fc, 12}, { 0x1bfc, 14}, { 0x1bfd, 14} }, /* run = 3 */ { { 0x5, 5}, { 0x5d, 8}, { 0x17d, 10}, { 0x5fd, 12}, { 0xdfd, 13}, { 0x1dfc, 14}, { 0x1ffd, 15} } }; static const VLCtable coeff_RVLCtab17[5] = { /* run = 4 */ { 0x8, 5}, { 0x6c, 8}, { 0x37c, 11}, { 0xefc, 13}, { 0x2ffc, 15} }; static const VLCtable coeff_RVLCtab18[3][4] = { /* run = 5 */ { { 0x9, 5}, { 0xbd, 9}, { 0x37d, 11}, { 0xefd, 13} }, /* run = 6 */ { { 0xd, 6}, { 0x1bc, 10}, { 0x6fc, 12}, { 0x1dfd, 14} }, /* run = 7 */ { { 0x14, 6}, { 0x1bd, 10}, { 0x6fd, 12}, { 0x2ffd, 15} } }; static const VLCtable coeff_RVLCtab19[2][3] = { /* run = 8 */ { { 0x15, 6}, { 0x1dc, 10}, { 0xf7c, 13} }, /* run = 9 */ { { 0x2c, 7}, { 0x1dd, 10}, { 0x1efc, 14} } }; static const VLCtable coeff_RVLCtab20[8][2] = { /* run = 10 */ { { 0x2d, 7}, { 0x3bc, 11} }, /* run = 11 */ { { 0x34, 7}, { 0x77c, 12} }, /* run = 12 */ { { 0x6d, 8}, { 0xf7d, 13} }, /* run = 13 */ { { 0x74, 8}, { 0x1efd, 14} }, /* run = 14 */ { { 0x75, 8}, { 0x1f7c, 14} }, /* run = 15 */ { { 0xdc, 9}, { 0x1f7d, 14} }, /* run = 16 */ { { 0xdd, 9}, { 0x1fbc, 14} }, /* run = 17 */ { { 0xec, 9}, { 0x37fc, 15} } }; static const VLCtable coeff_RVLCtab21[21] = { /* run = 18 -> 38 */ { 0x1ec, 10}, { 0x1ed, 10}, { 0x1f4, 10}, { 0x3bd, 11}, { 0x3dc, 11}, { 0x3dd, 11}, { 0x3ec, 11}, { 0x3ed, 11}, { 0x3f4, 11}, { 0x77d, 12}, { 0x7bc, 12}, { 0x7bd, 12}, { 0xfbc, 13}, { 0xfbd, 13}, { 0xfdc, 13}, { 0xfdd, 13}, { 0x1fbd, 14}, { 0x1fdc, 14}, { 0x1fdd, 14}, { 0x37fd, 15}, { 0x3bfc, 15} }; /* first coeffs of last = 1. indexing by [run][level-1] */ static const VLCtable coeff_RVLCtab22[2][5] = { /* run = 0 */ { { 0xb, 4}, { 0x78, 8}, { 0x3f5, 11}, { 0xfec, 13}, { 0x1fec, 14} }, /* run = 1 */ { { 0x12, 5}, { 0xed, 9}, { 0x7dc, 12}, { 0x1fed, 14}, { 0x3bfd, 15} } }; static const VLCtable coeff_RVLCtab23[3] = { /* run = 2 */ { 0x13, 5}, { 0x3f8, 11}, { 0x3dfc, 15} }; static const VLCtable coeff_RVLCtab24[11][2] = { /* run = 3 */ { { 0x18, 6}, { 0x7dd, 12} }, /* run = 4 */ { { 0x19, 6}, { 0x7ec, 12} }, /* run = 5 */ { { 0x22, 6}, { 0xfed, 13} }, /* run = 6 */ { { 0x23, 6}, { 0xff4, 13} }, /* run = 7 */ { { 0x35, 7}, { 0xff5, 13} }, /* run = 8 */ { { 0x38, 7}, { 0xff8, 13} }, /* run = 9 */ { { 0x39, 7}, { 0xff9, 13} }, /* run = 10 */ { { 0x42, 7}, { 0x1ff4, 14} }, /* run = 11 */ { { 0x43, 7}, { 0x1ff5, 14} }, /* run = 12 */ { { 0x79, 8}, { 0x1ff8, 14} }, /* run = 13 */ { { 0x82, 8}, { 0x3dfd, 15} } }; static const VLCtable coeff_RVLCtab25[32] = { /* run = 14 -> 44 */ { 0x83, 8}, { 0xf4, 9}, { 0xf5, 9}, { 0xf8, 9}, { 0xf9, 9}, { 0x102, 9}, { 0x103, 9}, { 0x1f5, 10}, { 0x1f8, 10}, { 0x1f9, 10}, { 0x202, 10}, { 0x203, 10}, { 0x3f9, 11}, { 0x402, 11}, { 0x403, 11}, { 0x7ed, 12}, { 0x7f4, 12}, { 0x7f5, 12}, { 0x7f8, 12}, { 0x7f9, 12}, { 0x802, 12}, { 0x803, 12}, { 0x1002, 13}, { 0x1003, 13}, { 0x1ff9, 14}, { 0x2002, 14}, { 0x2003, 14}, { 0x3efc, 15}, { 0x3efd, 15}, { 0x3f7c, 15}, { 0x3f7d, 15} }; #endif /* NO_RVLC */ #endif /* _VLC_ENC_TAB_H_ */ ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/vlc_encode.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /****************************************************************************** * * This software module was originally developed by * * Robert Danielsen (Telenor / ACTS-MoMuSys). * * and edited by * * Luis Ducla-Soares (IST / ACTS-MoMuSys). * Cor Quist (KPN / ACTS-MoMuSys). * * in the course of development of the MPEG-4 Video (ISO/IEC 14496-2) standard. * This software module is an implementation of a part of one or more MPEG-4 * Video (ISO/IEC 14496-2) tools as specified by the MPEG-4 Video (ISO/IEC * 14496-2) standard. * * ISO/IEC gives users of the MPEG-4 Video (ISO/IEC 14496-2) standard free * license to this software module or modifications thereof for use in hardware * or software products claiming conformance to the MPEG-4 Video (ISO/IEC * 14496-2) standard. * * Those intending to use this software module in hardware or software products * are advised that its use may infringe existing patents. The original * developer of this software module and his/her company, the subsequent * editors and their companies, and ISO/IEC have no liability for use of this * software module or modifications thereof in an implementation. Copyright is * not released for non MPEG-4 Video (ISO/IEC 14496-2) standard conforming * products. * * ACTS-MoMuSys partners retain full right to use the code for his/her own * purpose, assign or donate the code to a third party and to inhibit third * parties from using the code for non MPEG-4 Video (ISO/IEC 14496-2) standard * conforming products. This copyright notice must be included in all copies or * derivative works. * * Copyright (c) 1997 * *****************************************************************************/ /***********************************************************HeaderBegin******* * * File: putvlc.c * * Author: Robert Danielsen, Telenor R&D * Created: 07.07.96 * * Description: Functions for writing to bitstream * * Notes: Same kind of tables as in the MPEG-2 software simulation * group software. * * Modified: * 28.10.96 Robert Danielsen: Added PutCoeff_Intra(), renamed * PutCoeff() to PutCoeff_Inter(). * 06.11.96 Robert Danielsen: Added PutMCBPC_sep() * 01.05.97 Luis Ducla-Soares: added PutCoeff_Intra_RVLC() and * PutCoeff_Inter_RVLC(). * ***********************************************************HeaderEnd*********/ /************************ INCLUDE FILES ********************************/ #include "mp4lib_int.h" #include "mp4enc_lib.h" #include "vlc_enc_tab.h" #include "bitstream_io.h" #include "m4venc_oscl.h" #include "vlc_encode_inline.h" typedef void (*BlockCodeCoeffPtr)(RunLevelBlock*, BitstreamEncVideo*, Int, Int, UChar) ; const static Int mode_MBtype[] = { 3, 0, 4, 1, 2, }; const static Int zigzag_inv[NCOEFF_BLOCK] = { 0, 1, 8, 16, 9, 2, 3, 10, 17, 24, 32, 25, 18, 11, 4, 5, 12, 19, 26, 33, 40, 48, 41, 34, 27, 20, 13, 6, 7, 14, 21, 28, 35, 42, 49, 56, 57, 50, 43, 36, 29, 22, 15, 23, 30, 37, 44, 51, 58, 59, 52, 45, 38, 31, 39, 46, 53, 60, 61, 54, 47, 55, 62, 63 }; /* Horizontal zigzag inverse */ const static Int zigzag_h_inv[NCOEFF_BLOCK] = { 0, 1, 2, 3, 8, 9, 16, 17, 10, 11, 4, 5, 6, 7, 15, 14, 13, 12, 19, 18, 24, 25, 32, 33, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31, 34, 35, 40, 41, 48, 49, 42, 43, 36, 37, 38, 39, 44, 45, 46, 47, 50, 51, 56, 57, 58, 59, 52, 53, 54, 55, 60, 61, 62, 63 }; /* Vertical zigzag inverse */ const static Int zigzag_v_inv[NCOEFF_BLOCK] = { 0, 8, 16, 24, 1, 9, 2, 10, 17, 25, 32, 40, 48, 56, 57, 49, 41, 33, 26, 18, 3, 11, 4, 12, 19, 27, 34, 42, 50, 58, 35, 43, 51, 59, 20, 28, 5, 13, 6, 14, 21, 29, 36, 44, 52, 60, 37, 45, 53, 61, 22, 30, 7, 15, 23, 31, 38, 46, 54, 62, 39, 47, 55, 63 }; #ifdef __cplusplus extern "C" { #endif Int PutCoeff_Inter(Int run, Int level, BitstreamEncVideo *bitstream); Int PutCoeff_Inter_Last(Int run, Int level, BitstreamEncVideo *bitstream); Int PutCoeff_Intra(Int run, Int level, BitstreamEncVideo *bitstream); Int PutCoeff_Intra_Last(Int run, Int level, BitstreamEncVideo *bitstream); Int PutCBPY(Int cbpy, Char intra, BitstreamEncVideo *bitstream); Int PutMCBPC_Inter(Int cbpc, Int mode, BitstreamEncVideo *bitstream); Int PutMCBPC_Intra(Int cbpc, Int mode, BitstreamEncVideo *bitstream); Int PutMV(Int mvint, BitstreamEncVideo *bitstream); Int PutDCsize_chrom(Int size, BitstreamEncVideo *bitstream); Int PutDCsize_lum(Int size, BitstreamEncVideo *bitstream); Int PutDCsize_lum(Int size, BitstreamEncVideo *bitstream); #ifndef NO_RVLC Int PutCoeff_Inter_RVLC(Int run, Int level, BitstreamEncVideo *bitstream); Int PutCoeff_Inter_RVLC_Last(Int run, Int level, BitstreamEncVideo *bitstream); Int PutCoeff_Intra_RVLC(Int run, Int level, BitstreamEncVideo *bitstream); Int PutCoeff_Intra_RVLC_Last(Int run, Int level, BitstreamEncVideo *bitstream); #endif Int PutRunCoeff_Inter(Int run, Int level, BitstreamEncVideo *bitstream); Int PutRunCoeff_Inter_Last(Int run, Int level, BitstreamEncVideo *bitstream); Int PutRunCoeff_Intra(Int run, Int level, BitstreamEncVideo *bitstream); Int PutRunCoeff_Intra_Last(Int run, Int level, BitstreamEncVideo *bitstream); Int PutLevelCoeff_Inter(Int run, Int level, BitstreamEncVideo *bitstream); Int PutLevelCoeff_Inter_Last(Int run, Int level, BitstreamEncVideo *bitstream); Int PutLevelCoeff_Intra(Int run, Int level, BitstreamEncVideo *bitstream); Int PutLevelCoeff_Intra_Last(Int run, Int level, BitstreamEncVideo *bitstream); void RunLevel(VideoEncData *video, Int intra, Int intraDC_decision, Int ncoefblck[]); Int IntraDC_dpcm(Int val, Int lum, BitstreamEncVideo *bitstream); Void DCACPred(VideoEncData *video, UChar Mode, Int *intraDC_decision, Int intraDCVlcQP); Void find_pmvs(VideoEncData *video, Int block, Int *mvx, Int *mvy); Void WriteMVcomponent(Int f_code, Int dmv, BitstreamEncVideo *bs); static Bool IntraDCSwitch_Decision(Int Mode, Int intra_dc_vlc_threshold, Int intraDCVlcQP); Void ScaleMVD(Int f_code, Int diff_vector, Int *residual, Int *vlc_code_mag); #ifdef __cplusplus } #endif Int PutDCsize_lum(Int size, BitstreamEncVideo *bitstream) { Int length; if (!(size >= 0 && size < 13)) return -1; length = DCtab_lum[size].len; if (length) BitstreamPutBits(bitstream, length, DCtab_lum[size].code); return length; } Int PutDCsize_chrom(Int size, BitstreamEncVideo *bitstream) { Int length; if (!(size >= 0 && size < 13)) return -1; length = DCtab_chrom[size].len; if (length) BitstreamPutBits(bitstream, length, DCtab_chrom[size].code); return length; } Int PutMV(Int mvint, BitstreamEncVideo *bitstream) { Int sign = 0; Int absmv; Int length; if (mvint > 32) { absmv = -mvint + 65; sign = 1; } else absmv = mvint; length = mvtab[absmv].len; if (length) BitstreamPutBits(bitstream, length, mvtab[absmv].code); if (mvint != 0) { BitstreamPut1Bits(bitstream, sign); return (length + 1); } else return length; } Int PutMCBPC_Intra(Int cbp, Int mode, BitstreamEncVideo *bitstream) { Int ind; Int length; ind = ((mode_MBtype[mode] >> 1) & 3) | ((cbp & 3) << 2); length = mcbpc_intra_tab[ind].len; if (length) BitstreamPutBits(bitstream, length, mcbpc_intra_tab[ind].code); return length; } Int PutMCBPC_Inter(Int cbp, Int mode, BitstreamEncVideo *bitstream) { Int ind; Int length; ind = (mode_MBtype[mode] & 7) | ((cbp & 3) << 3); length = mcbpc_inter_tab[ind].len; if (length) BitstreamPutBits(bitstream, length, mcbpc_inter_tab[ind].code); return length; } Int PutCBPY(Int cbpy, Char intra, BitstreamEncVideo *bitstream) { Int ind; Int length; if ((intra == 0)) cbpy = 15 - cbpy; ind = cbpy; length = cbpy_tab[ind].len; if (length) BitstreamPutBits(bitstream, length, (UInt)cbpy_tab[ind].code); return length; } /* 5/16/01, break up function for last and not-last coefficient */ /* Note:::: I checked the ARM assembly for if( run > x && run < y) type of code, they do a really good job compiling it to if( (UInt)(run-x) < y-x). No need to hand-code it!!!!!, 6/1/2001 */ Int PutCoeff_Inter(Int run, Int level, BitstreamEncVideo *bitstream) { Int length = 0; if (run < 2 && level < 13) { length = coeff_tab0[run][level-1].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_tab0[run][level-1].code); } else if (run > 1 && run < 27 && level < 5) { length = coeff_tab1[run-2][level-1].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_tab1[run-2][level-1].code); } return length; } Int PutCoeff_Inter_Last(Int run, Int level, BitstreamEncVideo *bitstream) { Int length = 0; if (run < 2 && level < 4) { length = coeff_tab2[run][level-1].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_tab2[run][level-1].code); } else if (run > 1 && run < 42 && level == 1) { length = coeff_tab3[run-2].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_tab3[run-2].code); } return length; } /* 5/16/01, break up function for last and not-last coefficient */ Int PutCoeff_Intra(Int run, Int level, BitstreamEncVideo *bitstream) { Int length = 0; if (run == 0 && level < 28) { length = coeff_tab4[level-1].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_tab4[level-1].code); } else if (run == 1 && level < 11) { length = coeff_tab5[level-1].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_tab5[level-1].code); } else if (run > 1 && run < 10 && level < 6) { length = coeff_tab6[run-2][level-1].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_tab6[run-2][level-1].code); } else if (run > 9 && run < 15 && level == 1) { length = coeff_tab7[run-10].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_tab7[run-10].code); } return length; } Int PutCoeff_Intra_Last(Int run, Int level, BitstreamEncVideo *bitstream) { Int length = 0; if (run == 0 && level < 9) { length = coeff_tab8[level-1].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_tab8[level-1].code); } else if (run > 0 && run < 7 && level < 4) { length = coeff_tab9[run-1][level-1].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_tab9[run-1][level-1].code); } else if (run > 6 && run < 21 && level == 1) { length = coeff_tab10[run-7].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_tab10[run-7].code); } return length; } /* 5/16/01, break up function for last and not-last coefficient */ #ifndef NO_RVLC Int PutCoeff_Inter_RVLC(Int run, Int level, BitstreamEncVideo *bitstream) { Int length = 0; if (run == 0 && level < 20) { length = coeff_RVLCtab14[level-1].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab14[level-1].code); } else if (run == 1 && level < 11) { length = coeff_RVLCtab15[level-1].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab15[level-1].code); } else if (run > 1 && run < 4 && level < 8) { length = coeff_RVLCtab16[run-2][level-1].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab16[run-2][level-1].code); } else if (run == 4 && level < 6) { length = coeff_RVLCtab17[level-1].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab17[level-1].code); } else if (run > 4 && run < 8 && level < 5) { length = coeff_RVLCtab18[run-5][level-1].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab18[run-5][level-1].code); } else if (run > 7 && run < 10 && level < 4) { length = coeff_RVLCtab19[run-8][level-1].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab19[run-8][level-1].code); } else if (run > 9 && run < 18 && level < 3) { length = coeff_RVLCtab20[run-10][level-1].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab20[run-10][level-1].code); } else if (run > 17 && run < 39 && level == 1) { length = coeff_RVLCtab21[run-18].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab21[run-18].code); } return length; } Int PutCoeff_Inter_RVLC_Last(Int run, Int level, BitstreamEncVideo *bitstream) { Int length = 0; if (run >= 0 && run < 2 && level < 6) { length = coeff_RVLCtab22[run][level-1].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab22[run][level-1].code); } else if (run == 2 && level < 4) { length = coeff_RVLCtab23[level-1].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab23[level-1].code); } else if (run > 2 && run < 14 && level < 3) { length = coeff_RVLCtab24[run-3][level-1].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab24[run-3][level-1].code); } else if (run > 13 && run < 45 && level == 1) { length = coeff_RVLCtab25[run-14].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab25[run-14].code); } return length; } /* 5/16/01, break up function for last and not-last coefficient */ Int PutCoeff_Intra_RVLC(Int run, Int level, BitstreamEncVideo *bitstream) { Int length = 0; if (run == 0 && level < 28) { length = coeff_RVLCtab1[level-1].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab1[level-1].code); } else if (run == 1 && level < 14) { length = coeff_RVLCtab2[level-1].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab2[level-1].code); } else if (run == 2 && level < 12) { length = coeff_RVLCtab3[level-1].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab3[level-1].code); } else if (run == 3 && level < 10) { length = coeff_RVLCtab4[level-1].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab4[level-1].code); } else if (run > 3 && run < 6 && level < 7) { length = coeff_RVLCtab5[run-4][level-1].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab5[run-4][level-1].code); } else if (run > 5 && run < 8 && level < 6) { length = coeff_RVLCtab6[run-6][level-1].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab6[run-6][level-1].code); } else if (run > 7 && run < 10 && level < 5) { length = coeff_RVLCtab7[run-8][level-1].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab7[run-8][level-1].code); } else if (run > 9 && run < 13 && level < 3) { length = coeff_RVLCtab8[run-10][level-1].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab8[run-10][level-1].code); } else if (run > 12 && run < 20 && level == 1) { length = coeff_RVLCtab9[run-13].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab9[run-13].code); } return length; } Int PutCoeff_Intra_RVLC_Last(Int run, Int level, BitstreamEncVideo *bitstream) { Int length = 0; if (run >= 0 && run < 2 && level < 6) { length = coeff_RVLCtab10[run][level-1].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab10[run][level-1].code); } else if (run == 2 && level < 4) { length = coeff_RVLCtab11[level-1].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab11[level-1].code); } else if (run > 2 && run < 14 && level < 3) { length = coeff_RVLCtab12[run-3][level-1].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab12[run-3][level-1].code); } else if (run > 13 && run < 45 && level == 1) { length = coeff_RVLCtab13[run-14].len; if (length) BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab13[run-14].code); } return length; } #endif /* The following is for 3-mode VLC */ Int PutRunCoeff_Inter(Int run, Int level, BitstreamEncVideo *bitstream) { Int length = 0; if (run < 2 && level < 13) { length = coeff_tab0[run][level-1].len; if (length) { BitstreamPutGT8Bits(bitstream, 7 + 2, 14/*3*/); //BitstreamPutBits(bitstream, 2, 2); BitstreamPutBits(bitstream, length, (UInt)coeff_tab0[run][level-1].code); length += 9; } } else if (run > 1 && run < 27 && level < 5) { length = coeff_tab1[run-2][level-1].len; if (length) { BitstreamPutGT8Bits(bitstream, 7 + 2, 14/*3*/); //BitstreamPutBits(bitstream, 2, 2); BitstreamPutBits(bitstream, length, (UInt)coeff_tab1[run-2][level-1].code); length += 9; } } return length; } Int PutRunCoeff_Inter_Last(Int run, Int level, BitstreamEncVideo *bitstream) { Int length = 0; if (run < 2 && level < 4) { length = coeff_tab2[run][level-1].len; if (length) { BitstreamPutGT8Bits(bitstream, 7 + 2, 14/*3*/); //BitstreamPutBits(bitstream, 2, 2); BitstreamPutBits(bitstream, length, (UInt)coeff_tab2[run][level-1].code); length += 9; } } else if (run > 1 && run < 42 && level == 1) { length = coeff_tab3[run-2].len; if (length) { BitstreamPutGT8Bits(bitstream, 7 + 2, 14/*3*/); //BitstreamPutBits(bitstream, 2, 2); BitstreamPutBits(bitstream, length, (UInt)coeff_tab3[run-2].code); length += 9; } } return length; } Int PutRunCoeff_Intra(Int run, Int level, BitstreamEncVideo *bitstream) { Int length = 0; if (run == 0 && level < 28) { length = coeff_tab4[level-1].len; if (length) { BitstreamPutGT8Bits(bitstream, 7 + 2, 14/*3*/); //BitstreamPutBits(bitstream, 2, 2); BitstreamPutBits(bitstream, length, (UInt)coeff_tab4[level-1].code); length += 9; } } else if (run == 1 && level < 11) { length = coeff_tab5[level-1].len; if (length) { BitstreamPutGT8Bits(bitstream, 7 + 2, 14/*3*/); //BitstreamPutBits(bitstream, 2, 2); BitstreamPutBits(bitstream, length, (UInt)coeff_tab5[level-1].code); length += 9; } } else if (run > 1 && run < 10 && level < 6) { length = coeff_tab6[run-2][level-1].len; if (length) { BitstreamPutGT8Bits(bitstream, 7 + 2, 14/*3*/); //BitstreamPutBits(bitstream, 2, 2); BitstreamPutBits(bitstream, length, (UInt)coeff_tab6[run-2][level-1].code); length += 9; } } else if (run > 9 && run < 15 && level == 1) { length = coeff_tab7[run-10].len; if (length) { BitstreamPutGT8Bits(bitstream, 7 + 2, 14/*3*/); //BitstreamPutBits(bitstream, 2, 2); BitstreamPutBits(bitstream, length, (UInt)coeff_tab7[run-10].code); length += 9; } } return length; } Int PutRunCoeff_Intra_Last(Int run, Int level, BitstreamEncVideo *bitstream) { Int length = 0; if (run == 0 && level < 9) { length = coeff_tab8[level-1].len; if (length) { BitstreamPutGT8Bits(bitstream, 7 + 2, 14/*3*/); //BitstreamPutBits(bitstream, 2, 2); BitstreamPutBits(bitstream, length, (UInt)coeff_tab8[level-1].code); length += 9; } } else if (run > 0 && run < 7 && level < 4) { length = coeff_tab9[run-1][level-1].len; if (length) { BitstreamPutGT8Bits(bitstream, 7 + 2, 14/*3*/); //BitstreamPutBits(bitstream, 2, 2); BitstreamPutBits(bitstream, length, (UInt)coeff_tab9[run-1][level-1].code); length += 9; } } else if (run > 6 && run < 21 && level == 1) { length = coeff_tab10[run-7].len; if (length) { BitstreamPutGT8Bits(bitstream, 7 + 2, 14/*3*/); //BitstreamPutBits(bitstream, 2, 2); BitstreamPutBits(bitstream, length, (UInt)coeff_tab10[run-7].code); length += 9; } } return length; } Int PutLevelCoeff_Inter(Int run, Int level, BitstreamEncVideo *bitstream) { Int length = 0; if (run < 2 && level < 13) { length = coeff_tab0[run][level-1].len; if (length) { BitstreamPutBits(bitstream, 7 + 1, 6/*3*/); BitstreamPutBits(bitstream, length, (UInt)coeff_tab0[run][level-1].code); length += 8; } } else if (run > 1 && run < 27 && level < 5) { length = coeff_tab1[run-2][level-1].len; if (length) { BitstreamPutBits(bitstream, 7 + 1, 6/*3*/); BitstreamPutBits(bitstream, length, (UInt)coeff_tab1[run-2][level-1].code); length += 8; } } return length; } Int PutLevelCoeff_Inter_Last(Int run, Int level, BitstreamEncVideo *bitstream) { Int length = 0; if (run < 2 && level < 4) { length = coeff_tab2[run][level-1].len; if (length) { BitstreamPutBits(bitstream, 7 + 1, 6/*3*/); BitstreamPutBits(bitstream, length, (UInt)coeff_tab2[run][level-1].code); length += 8; } } else if (run > 1 && run < 42 && level == 1) { length = coeff_tab3[run-2].len; if (length) { BitstreamPutBits(bitstream, 7 + 1, 6/*3*/); BitstreamPutBits(bitstream, length, (UInt)coeff_tab3[run-2].code); length += 8; } } return length; } Int PutLevelCoeff_Intra(Int run, Int level, BitstreamEncVideo *bitstream) { Int length = 0; if (run == 0 && level < 28) { length = coeff_tab4[level-1].len; if (length) { BitstreamPutBits(bitstream, 7 + 1, 6/*3*/); BitstreamPutBits(bitstream, length, (UInt)coeff_tab4[level-1].code); length += 8; } } else if (run == 1 && level < 11) { length = coeff_tab5[level-1].len; if (length) { BitstreamPutBits(bitstream, 7 + 1, 6/*3*/); BitstreamPutBits(bitstream, length, (UInt)coeff_tab5[level-1].code); length += 8; } } else if (run > 1 && run < 10 && level < 6) { length = coeff_tab6[run-2][level-1].len; if (length) { BitstreamPutBits(bitstream, 7 + 1, 6/*3*/); BitstreamPutBits(bitstream, length, (UInt)coeff_tab6[run-2][level-1].code); length += 8; } } else if (run > 9 && run < 15 && level == 1) { length = coeff_tab7[run-10].len; if (length) { BitstreamPutBits(bitstream, 7 + 1, 6/*3*/); BitstreamPutBits(bitstream, length, (UInt)coeff_tab7[run-10].code); length += 8; } } return length; } Int PutLevelCoeff_Intra_Last(Int run, Int level, BitstreamEncVideo *bitstream) { Int length = 0; if (run == 0 && level < 9) { length = coeff_tab8[level-1].len; if (length) { BitstreamPutBits(bitstream, 7 + 1, 6/*3*/); BitstreamPutBits(bitstream, length, (UInt)coeff_tab8[level-1].code); length += 8; } } else if (run > 0 && run < 7 && level < 4) { length = coeff_tab9[run-1][level-1].len; if (length) { BitstreamPutBits(bitstream, 7 + 1, 6/*3*/); BitstreamPutBits(bitstream, length, (UInt)coeff_tab9[run-1][level-1].code); length += 8; } } else if (run > 6 && run < 21 && level == 1) { length = coeff_tab10[run-7].len; if (length) { BitstreamPutBits(bitstream, 7 + 1, 6/*3*/); BitstreamPutBits(bitstream, length, (UInt)coeff_tab10[run-7].code); length += 8; } } return length; } /* ======================================================================== */ /* Function : MBVlcEncode() */ /* Date : 09/10/2000 */ /* Purpose : Encode GOV Header */ /* In/out : */ /* Return : */ /* Modified : 5/21/01, break up into smaller functions */ /* ======================================================================== */ #ifndef H263_ONLY /**************************************/ /* Data Partitioning I-VOP Encoding */ /**************************************/ void MBVlcEncodeDataPar_I_VOP( VideoEncData *video, Int ncoefblck[], void *blkCodePtr) { BitstreamEncVideo *bs1 = video->bitstream1; BitstreamEncVideo *bs2 = video->bitstream2; BitstreamEncVideo *bs3 = video->bitstream3; int i; UChar Mode = video->headerInfo.Mode[video->mbnum]; UChar CBP; // MacroBlock *MB=video->outputMB; Int mbnum = video->mbnum; Int intraDC_decision, DC; // int temp; Int dquant; /* 3/15/01 */ RunLevelBlock *RLB = video->RLB; BlockCodeCoeffPtr BlockCodeCoeff = (BlockCodeCoeffPtr) blkCodePtr; /* DC and AC Prediction, 5/28/01, compute CBP, intraDC_decision*/ DCACPred(video, Mode, &intraDC_decision, video->QP_prev); /* CBP, Run, Level, and Sign */ RunLevel(video, 1, intraDC_decision, ncoefblck); CBP = video->headerInfo.CBP[mbnum]; /* Compute DQuant */ dquant = video->QPMB[mbnum] - video->QP_prev; /* 3/15/01, QP_prev may not equal QPMB[mbnum-1] if mbnum-1 is skipped*/ video->QP_prev = video->QPMB[mbnum]; if (dquant && Mode == MODE_INTRA) { Mode = MODE_INTRA_Q; } if (dquant >= 0) dquant = (PV_ABS(dquant) + 1); else dquant = (PV_ABS(dquant) - 1); /* FIRST PART: ALL TO BS1 */ PutMCBPC_Intra(CBP, Mode, bs1); /* MCBPC */ if (Mode == MODE_INTRA_Q) /* MAY NEED TO CHANGE DQUANT HERE */ BitstreamPutBits(bs1, 2, dquant); /* dquant*/ if (intraDC_decision == 0) { for (i = 0; i < 6; i++) { DC = video->RLB[i].level[0]; if (video->RLB[i].s[0]) DC = -DC; if (i < 4) /*temp =*/ IntraDC_dpcm(DC, 1, bs1); /* dct_dc_size_luminance, */ else /* dct_dc_differential, and */ /*temp =*/ IntraDC_dpcm(DC, 0, bs1); /* marker bit */ } } /* SECOND PART: ALL TO BS2*/ BitstreamPut1Bits(bs2, video->acPredFlag[video->mbnum]); /* ac_pred_flag */ /*temp=*/ PutCBPY(CBP >> 2, (Char)(1), bs2); /* cbpy */ /* THIRD PART: ALL TO BS3*/ /* MB_CodeCoeff(video,bs3); */ /* 5/22/01, replaced with below */ for (i = 0; i < 6; i++) { if (CBP&(1 << (5 - i))) (*BlockCodeCoeff)(&(RLB[i]), bs3, 1 - intraDC_decision, ncoefblck[i], Mode);/* Code Intra AC*/ } return ; } /************************************/ /* Data Partitioning P-VOP Encoding */ /************************************/ void MBVlcEncodeDataPar_P_VOP( VideoEncData *video, Int ncoefblck[], void *blkCodePtr) { BitstreamEncVideo *bs1 = video->bitstream1; BitstreamEncVideo *bs2 = video->bitstream2; BitstreamEncVideo *bs3 = video->bitstream3; int i; Int mbnum = video->mbnum; UChar Mode = video->headerInfo.Mode[mbnum]; Int QP_tmp = video->QPMB[mbnum]; UChar CBP; // MacroBlock *MB=video->outputMB; Int intra, intraDC_decision, DC; Int pmvx, pmvy; // int temp; Int dquant; /* 3/15/01 */ RunLevelBlock *RLB = video->RLB; BlockCodeCoeffPtr BlockCodeCoeff = (BlockCodeCoeffPtr) blkCodePtr; intra = (Mode == MODE_INTRA || Mode == MODE_INTRA_Q); /* DC and AC Prediction, 5/28/01, compute CBP, intraDC_decision*/ if (intra) { if (video->usePrevQP) { QP_tmp = video->QPMB[mbnum-1]; } DCACPred(video, Mode, &intraDC_decision, QP_tmp); } else intraDC_decision = 0; /* used in RunLevel */ /* CBP, Run, Level, and Sign */ RunLevel(video, intra, intraDC_decision, ncoefblck); CBP = video->headerInfo.CBP[mbnum]; /* Compute DQuant */ dquant = video->QPMB[mbnum] - video->QP_prev; /* 3/15/01, QP_prev may not equal QPMB[mbnum-1] if mbnum-1 is skipped*/ if (dquant && (Mode == MODE_INTRA || Mode == MODE_INTER)) { Mode += 2; /* make it MODE_INTRA_Q and MODE_INTER_Q */ } if (dquant >= 0) dquant = (PV_ABS(dquant) + 1); else dquant = (PV_ABS(dquant) - 1); /* FIRST PART: ALL TO BS1 */ if (CBP == 0 && intra == 0) /* Determine if Skipped MB */ { if ((Mode == MODE_INTER) && (video->mot[mbnum][0].x == 0) && (video->mot[mbnum][0].y == 0)) Mode = video->headerInfo.Mode[video->mbnum] = MODE_SKIPPED; else if ((Mode == MODE_INTER4V) && (video->mot[mbnum][1].x == 0) && (video->mot[mbnum][1].y == 0) && (video->mot[mbnum][2].x == 0) && (video->mot[mbnum][2].y == 0) && (video->mot[mbnum][3].x == 0) && (video->mot[mbnum][3].y == 0) && (video->mot[mbnum][4].x == 0) && (video->mot[mbnum][4].y == 0)) Mode = video->headerInfo.Mode[video->mbnum] = MODE_SKIPPED; } if (Mode == MODE_SKIPPED) { BitstreamPut1Bits(bs1, 1); /* not_coded = 1 */ return; } else BitstreamPut1Bits(bs1, 0); /* not_coded =0 */ video->QP_prev = video->QPMB[mbnum]; video->usePrevQP = 1; PutMCBPC_Inter(CBP, Mode, bs1); /* MCBPC */ video->header_bits -= BitstreamGetPos(bs1); /* Header Bits */ if (Mode == MODE_INTER || Mode == MODE_INTER_Q) { find_pmvs(video, 0, &pmvx, &pmvy); /* Get predicted motion vectors */ WriteMVcomponent(video->currVop->fcodeForward, video->mot[mbnum][0].x - pmvx, bs1); /* Write x to bitstream */ WriteMVcomponent(video->currVop->fcodeForward, video->mot[mbnum][0].y - pmvy, bs1); /* Write y to bitstream */ } else if (Mode == MODE_INTER4V) { for (i = 1; i < 5; i++) { find_pmvs(video, i, &pmvx, &pmvy); WriteMVcomponent(video->currVop->fcodeForward, video->mot[mbnum][i].x - pmvx, bs1); WriteMVcomponent(video->currVop->fcodeForward, video->mot[mbnum][i].y - pmvy, bs1); } } video->header_bits += BitstreamGetPos(bs1); /* Header Bits */ /* SECOND PART: ALL TO BS2 */ if (intra) { BitstreamPut1Bits(bs2, video->acPredFlag[video->mbnum]); /* ac_pred_flag */ /*temp=*/ PutCBPY(CBP >> 2, (Char)(Mode == MODE_INTRA || Mode == MODE_INTRA_Q), bs2); /* cbpy */ if (Mode == MODE_INTRA_Q) BitstreamPutBits(bs2, 2, dquant); /* dquant, 3/15/01*/ if (intraDC_decision == 0) { for (i = 0; i < 6; i++) { DC = video->RLB[i].level[0]; if (video->RLB[i].s[0]) DC = -DC; if (i < 4) /*temp =*/ IntraDC_dpcm(DC, 1, bs2); /* dct_dc_size_luminance, */ else /* dct_dc_differential, and */ /*temp =*/ IntraDC_dpcm(DC, 0, bs2); /* marker bit */ } } /****************************/ /* THIRD PART: ALL TO BS3 */ for (i = 0; i < 6; i++) { if (CBP&(1 << (5 - i))) (*BlockCodeCoeff)(&(RLB[i]), bs3, 1 - intraDC_decision, ncoefblck[i], Mode);/* Code Intra AC*/ } } else { /*temp=*/ PutCBPY(CBP >> 2, (Char)(Mode == MODE_INTRA || Mode == MODE_INTRA_Q), bs2); /* cbpy */ if (Mode == MODE_INTER_Q) /* MAY NEED TO CHANGE DQUANT HERE */ BitstreamPutBits(bs2, 2, dquant); /* dquant, 3/15/01*/ /****************************/ /* THIRD PART: ALL TO BS3 */ for (i = 0; i < 6; i++) { if (CBP&(1 << (5 - i))) (*BlockCodeCoeff)(&(RLB[i]), bs3, 0, ncoefblck[i], Mode);/* Code Intra AC*/ } } return ; } #endif /* H263_ONLY */ /****************************************************************************************/ /* Short Header/Combined Mode with or without Error Resilience I-VOP and P-VOP Encoding */ /* 5/21/01, B-VOP is not implemented yet!!!! */ /****************************************************************************************/ void MBVlcEncodeCombined_I_VOP( VideoEncData *video, Int ncoefblck[], void *blkCodePtr) { BitstreamEncVideo *bs1 = video->bitstream1; // BitstreamEncVideo *bs2 = video->bitstream2; // BitstreamEncVideo *bs3 = video->bitstream3; int i; UChar Mode = video->headerInfo.Mode[video->mbnum]; UChar CBP = video->headerInfo.CBP[video->mbnum]; // MacroBlock *MB=video->outputMB; Int mbnum = video->mbnum; Int intraDC_decision; // int temp; Int dquant; /* 3/15/01 */ RunLevelBlock *RLB = video->RLB; Int DC; Int shortVideoHeader = video->vol[video->currLayer]->shortVideoHeader; BlockCodeCoeffPtr BlockCodeCoeff = (BlockCodeCoeffPtr) blkCodePtr; /* DC and AC Prediction, 5/28/01, compute CBP, intraDC_decision*/ #ifndef H263_ONLY if (!shortVideoHeader) DCACPred(video, Mode, &intraDC_decision, video->QP_prev); else #endif { intraDC_decision = 0; } /* CBP, Run, Level, and Sign */ RunLevel(video, 1, intraDC_decision, ncoefblck); CBP = video->headerInfo.CBP[mbnum]; /* Compute DQuant */ dquant = video->QPMB[mbnum] - video->QP_prev; /* 3/15/01, QP_prev may not equal QPMB[mbnum-1] if mbnum-1 is skipped*/ video->QP_prev = video->QPMB[mbnum]; if (dquant && Mode == MODE_INTRA) { Mode = MODE_INTRA_Q; } if (dquant >= 0) dquant = (PV_ABS(dquant) + 1); else dquant = (PV_ABS(dquant) - 1); PutMCBPC_Intra(CBP, Mode, bs1); /* mcbpc I_VOP */ if (!video->vol[video->currLayer]->shortVideoHeader) { BitstreamPut1Bits(bs1, video->acPredFlag[video->mbnum]); /* ac_pred_flag */ } /*temp=*/ PutCBPY(CBP >> 2, (Char)(1), bs1); /* cbpy */ if (Mode == MODE_INTRA_Q) /* MAY NEED TO CHANGE DQUANT HERE */ BitstreamPutBits(bs1, 2, dquant); /* dquant, 3/15/01*/ /*MB_CodeCoeff(video,bs1); 5/21/01, replaced by below */ /*******************/ #ifndef H263_ONLY if (shortVideoHeader) /* Short Header DC coefficients */ { #endif for (i = 0; i < 6; i++) { DC = RLB[i].level[0]; if (RLB[i].s[0]) DC = -DC; if (DC != 128) BitstreamPutBits(bs1, 8, DC); /* intra_dc_size_luminance */ else BitstreamPutBits(bs1, 8, 255); /* intra_dc_size_luminance */ if (CBP&(1 << (5 - i))) (*BlockCodeCoeff)(&(RLB[i]), bs1, 1, ncoefblck[i], Mode); /* Code short header Intra AC*/ } #ifndef H263_ONLY } else if (intraDC_decision == 0) /* Combined Intra Mode DC and AC coefficients */ { for (i = 0; i < 6; i++) { DC = RLB[i].level[0]; if (RLB[i].s[0]) DC = -DC; if (i < 4) /*temp =*/ IntraDC_dpcm(DC, 1, bs1); /* dct_dc_size_luminance, */ else /* dct_dc_differential, and */ /*temp =*/ IntraDC_dpcm(DC, 0, bs1); /* marker bit */ if (CBP&(1 << (5 - i))) (*BlockCodeCoeff)(&(RLB[i]), bs1, 1, ncoefblck[i], Mode);/* Code Intra AC */ } } else /* Combined Mode Intra DC/AC coefficients */ { for (i = 0; i < 6; i++) { if (CBP&(1 << (5 - i))) (*BlockCodeCoeff)(&(RLB[i]), bs1, 0, ncoefblck[i], Mode);/* Code Intra AC */ } } #endif /*******************/ return ; } void MBVlcEncodeCombined_P_VOP( VideoEncData *video, Int ncoefblck[], void *blkCodePtr) { BitstreamEncVideo *bs1 = video->bitstream1; // BitstreamEncVideo *bs2 = video->bitstream2; // BitstreamEncVideo *bs3 = video->bitstream3; int i; Int mbnum = video->mbnum; UChar Mode = video->headerInfo.Mode[mbnum]; Int QP_tmp = video->QPMB[mbnum]; UChar CBP ; // MacroBlock *MB=video->outputMB; Int intra, intraDC_decision; Int pmvx, pmvy; // int temp; Int dquant; /* 3/15/01 */ RunLevelBlock *RLB = video->RLB; Int DC; Int shortVideoHeader = video->vol[video->currLayer]->shortVideoHeader; BlockCodeCoeffPtr BlockCodeCoeff = (BlockCodeCoeffPtr) blkCodePtr; intra = (Mode == MODE_INTRA || Mode == MODE_INTRA_Q); /* DC and AC Prediction, 5/28/01, compute intraDC_decision*/ #ifndef H263_ONLY if (!shortVideoHeader && intra) { if (video->usePrevQP) { QP_tmp = video->QPMB[mbnum-1]; } DCACPred(video, Mode, &intraDC_decision, QP_tmp); } else #endif intraDC_decision = 0; /* CBP, Run, Level, and Sign */ RunLevel(video, intra, intraDC_decision, ncoefblck); CBP = video->headerInfo.CBP[mbnum]; /* Compute DQuant */ dquant = video->QPMB[mbnum] - video->QP_prev; /* 3/15/01, QP_prev may not equal QPMB[mbnum-1] if mbnum-1 is skipped*/ if (dquant && (Mode == MODE_INTRA || Mode == MODE_INTER)) { Mode += 2; /* make it MODE_INTRA_Q and MODE_INTER_Q */ } if (dquant >= 0) dquant = (PV_ABS(dquant) + 1); else dquant = (PV_ABS(dquant) - 1); if (CBP == 0 && intra == 0) /* Determine if Skipped MB */ { if ((Mode == MODE_INTER) && (video->mot[mbnum][0].x == 0) && (video->mot[mbnum][0].y == 0)) Mode = video->headerInfo.Mode[video->mbnum] = MODE_SKIPPED; else if ((Mode == MODE_INTER4V) && (video->mot[mbnum][1].x == 0) && (video->mot[mbnum][1].y == 0) && (video->mot[mbnum][2].x == 0) && (video->mot[mbnum][2].y == 0) && (video->mot[mbnum][3].x == 0) && (video->mot[mbnum][3].y == 0) && (video->mot[mbnum][4].x == 0) && (video->mot[mbnum][4].y == 0)) Mode = video->headerInfo.Mode[video->mbnum] = MODE_SKIPPED; } if (Mode == MODE_SKIPPED) { BitstreamPut1Bits(bs1, 1); /* not_coded = 1 */ return; } else BitstreamPut1Bits(bs1, 0); /* not_coded =0 */ video->QP_prev = video->QPMB[mbnum]; video->usePrevQP = 1; PutMCBPC_Inter(CBP, Mode, bs1); /* mcbpc P_VOP */ if (!video->vol[video->currLayer]->shortVideoHeader && intra) { BitstreamPut1Bits(bs1, video->acPredFlag[video->mbnum]); /* ac_pred_flag */ } /*temp=*/ PutCBPY(CBP >> 2, (Char)(intra), bs1); /* cbpy */ if (Mode == MODE_INTRA_Q || Mode == MODE_INTER_Q) /* MAY NEED TO CHANGE DQUANT HERE */ BitstreamPutBits(bs1, 2, dquant); /* dquant, 3/15/01*/ video->header_bits -= BitstreamGetPos(bs1); /* Header Bits */ if (!((video->vol[video->currLayer]->scalability) && (video->currVop->refSelectCode == 3))) { if (Mode == MODE_INTER || Mode == MODE_INTER_Q) { find_pmvs(video, 0, &pmvx, &pmvy); /* Get predicted motion vectors */ WriteMVcomponent(video->currVop->fcodeForward, video->mot[mbnum][0].x - pmvx, bs1); /* Write x to bitstream */ WriteMVcomponent(video->currVop->fcodeForward, video->mot[mbnum][0].y - pmvy, bs1); /* Write y to bitstream */ } else if (Mode == MODE_INTER4V) { for (i = 1; i < 5; i++) { find_pmvs(video, i, &pmvx, &pmvy); WriteMVcomponent(video->currVop->fcodeForward, video->mot[mbnum][i].x - pmvx, bs1); WriteMVcomponent(video->currVop->fcodeForward, video->mot[mbnum][i].y - pmvy, bs1); } } } video->header_bits += BitstreamGetPos(bs1); /* Header Bits */ /* MB_CodeCoeff(video,bs1); */ /* 5/22/01, replaced with below */ /****************************/ if (intra) { #ifndef H263_ONLY if (shortVideoHeader) /* Short Header DC coefficients */ { #endif for (i = 0; i < 6; i++) { DC = RLB[i].level[0]; if (RLB[i].s[0]) DC = -DC; if (DC != 128) BitstreamPutBits(bs1, 8, DC); /* intra_dc_size_luminance */ else BitstreamPutBits(bs1, 8, 255); /* intra_dc_size_luminance */ if (CBP&(1 << (5 - i))) (*BlockCodeCoeff)(&(RLB[i]), bs1, 1, ncoefblck[i], Mode); /* Code short header Intra AC*/ } #ifndef H263_ONLY } else if (intraDC_decision == 0) /* Combined Intra Mode DC and AC coefficients */ { for (i = 0; i < 6; i++) { DC = RLB[i].level[0]; if (RLB[i].s[0]) DC = -DC; if (i < 4) /*temp =*/ IntraDC_dpcm(DC, 1, bs1); /* dct_dc_size_luminance, */ else /* dct_dc_differential, and */ /*temp =*/ IntraDC_dpcm(DC, 0, bs1); /* marker bit */ if (CBP&(1 << (5 - i))) (*BlockCodeCoeff)(&(RLB[i]), bs1, 1, ncoefblck[i], Mode);/* Code Intra AC */ } } else /* Combined Mode Intra DC/AC coefficients */ { for (i = 0; i < 6; i++) { if (CBP&(1 << (5 - i))) (*BlockCodeCoeff)(&(RLB[i]), bs1, 0, ncoefblck[i], Mode);/* Code Intra AC */ } } #endif } else /* Shortheader or Combined INTER Mode AC coefficients */ { for (i = 0; i < 6; i++) { if (CBP&(1 << (5 - i))) (*BlockCodeCoeff)(&(RLB[i]), bs1, 0, ncoefblck[i], Mode);/* Code Inter AC*/ } } /****************************/ return ; } /* ======================================================================== */ /* Function : BlockCodeCoeff() */ /* Date : 09/18/2000 */ /* Purpose : VLC Encode AC/DC coeffs */ /* In/out : */ /* Return : */ /* Modified : 5/16/01 grouping BitstreamPutBits calls */ /* 5/22/01 break up function */ /* ======================================================================== */ #ifndef NO_RVLC /*****************/ /* RVLC ENCODING */ /*****************/ Void BlockCodeCoeff_RVLC(RunLevelBlock *RLB, BitstreamEncVideo *bs, Int j_start, Int j_stop, UChar Mode) { int length = 0; int i; Int level; Int run; Int intra = (Mode == MODE_INTRA || Mode == MODE_INTRA_Q); /* Not Last Coefficient */ for (i = j_start; i < j_stop - 1; i++) { run = RLB->run[i]; level = RLB->level[i]; //if(i==63||RLB->run[i+1] == -1) /* Don't Code Last Coefficient Here */ // break; /*ENCODE RUN LENGTH */ if (level < 28 && run < 39) { if (intra) length = PutCoeff_Intra_RVLC(run, level, bs); else length = PutCoeff_Inter_RVLC(run, level, bs); } else length = 0; /* ESCAPE CODING */ if (length == 0) { BitstreamPutBits(bs, 5 + 1, 2); /* ESCAPE + Not Last Coefficient */ //BitstreamPutBits(bs,1,0); /* Not Last Coefficient */ BitstreamPutBits(bs, 6 + 1, (run << 1) | 1); /* RUN + MARKER BIT*/ //BitstreamPutBits(bs,1,1); /* MARKER BIT */ BitstreamPutGT8Bits(bs, 11, level); /* LEVEL */ BitstreamPutBits(bs, 1 + 4, 16); /* MARKER BIT */ //BitstreamPutBits(bs,4,0); /* RVLC TRAILING ESCAPE */ } BitstreamPutBits(bs, 1, RLB->s[i]); /* SIGN BIT */ } /* Last Coefficient!!! */ run = RLB->run[i]; level = RLB->level[i]; /*ENCODE RUN LENGTH */ if (level < 6 && run < 45) { if (intra) length = PutCoeff_Intra_RVLC_Last(run, level, bs); else length = PutCoeff_Inter_RVLC_Last(run, level, bs); } else length = 0; /* ESCAPE CODING */ if (length == 0) { BitstreamPutBits(bs, 5 + 1, 3); /* ESCAPE CODE + Last Coefficient*/ //BitstreamPutBits(bs,1,1); /* Last Coefficient !*/ BitstreamPutBits(bs, 6 + 1, (run << 1) | 1); /* RUN + MARKER BIT*/ //BitstreamPutBits(bs,1,1); /* MARKER BIT */ BitstreamPutGT8Bits(bs, 11, level); /* LEVEL */ BitstreamPutBits(bs, 1 + 4, 16); /* MARKER BIT + RVLC TRAILING ESCAPE */ //BitstreamPutBits(bs,4,0); /* */ } BitstreamPut1Bits(bs, RLB->s[i]); /* SIGN BIT */ return ; } #endif /*******************************/ /* SHORT VIDEO HEADER ENCODING */ /*******************************/ Void BlockCodeCoeff_ShortHeader(RunLevelBlock *RLB, BitstreamEncVideo *bs, Int j_start, Int j_stop, UChar Mode) { int length = 0; int i; // int temp; Int level; Int run; OSCL_UNUSED_ARG(Mode); /* Not Last Coefficient */ for (i = j_start; i < j_stop - 1; i++) { run = RLB->run[i]; level = RLB->level[i]; // if(i==63 ||RLB->run[i+1] == -1) /* Don't Code Last Coefficient Here */ // break; /*ENCODE RUN LENGTH */ if (level < 13) { length = PutCoeff_Inter(run, level, bs); if (length != 0) /*temp =*/ BitstreamPut1Bits(bs, RLB->s[i]); /* Sign Bit */ } else length = 0; /* ESCAPE CODING */ if (length == 0) { if (RLB->s[i]) level = -level; BitstreamPutBits(bs, 7 + 1, 6); /* ESCAPE CODE + Not Last Coefficient */ //BitstreamPutBits(bs,1,0); /* Not Last Coefficient */ BitstreamPutBits(bs, 6, run); /* RUN */ BitstreamPutBits(bs, 8, level&0xFF); /* LEVEL, mask to make sure length 8 */ } } /* Last Coefficient!!! */ run = RLB->run[i]; level = RLB->level[i]; /*ENCODE RUN LENGTH */ if (level < 13) { length = PutCoeff_Inter_Last(run, level, bs); if (length != 0) /*temp =*/ BitstreamPut1Bits(bs, RLB->s[i]); /* Sign Bit */ } else length = 0; /* ESCAPE CODING */ if (length == 0) { if (RLB->s[i]) level = -level; BitstreamPutBits(bs, 7 + 1, 7); /* ESCAPE CODE + Last Coefficient */ //BitstreamPutBits(bs,1,1); /* Last Coefficient !!!*/ BitstreamPutBits(bs, 6, run); /* RUN */ BitstreamPutBits(bs, 8, level&0xFF); /* LEVEL, mask to make sure length 8 */ } return ; } #ifndef H263_ONLY /****************/ /* VLC ENCODING */ /****************/ Void BlockCodeCoeff_Normal(RunLevelBlock *RLB, BitstreamEncVideo *bs, Int j_start, Int j_stop, UChar Mode) { int length = 0; int i; //int temp; Int level; Int run; Int intra = (Mode == MODE_INTRA || Mode == MODE_INTRA_Q); Int level_minus_max; Int run_minus_max; Int(*PutCoeff)(Int, Int, BitstreamEncVideo *); /* pointer to functions, 5/28/01 */ /* Not Last Coefficient!!! */ if (intra) PutCoeff = &PutCoeff_Intra; else PutCoeff = &PutCoeff_Inter; for (i = j_start; i < j_stop - 1; i++) { run = RLB->run[i]; level = RLB->level[i]; /* Encode Run Length */ if (level < 28) { length = (*PutCoeff)(run, level, bs); /* 5/28/01 replaces above */ } else { length = 0; } /* First escape mode: LEVEL OFFSET */ if (length == 0) { if (intra) { level_minus_max = level - intra_max_level[0][run]; if (level_minus_max < 28) length = PutLevelCoeff_Intra(run, level_minus_max, bs); else length = 0; } else { level_minus_max = level - inter_max_level[0][run]; if (level_minus_max < 13) length = PutLevelCoeff_Inter(run, level_minus_max, bs); else length = 0; } /* Second escape mode: RUN OFFSET */ if (length == 0) { if (level < 28) { if (intra) { run_minus_max = run - (intra_max_run0[level] + 1); length = PutRunCoeff_Intra(run_minus_max, level, bs); } else if (level < 13) { run_minus_max = run - (inter_max_run0[level] + 1); length = PutRunCoeff_Inter(run_minus_max, level, bs); } else { length = 0; } } else { length = 0; } /* Third escape mode: FIXED LENGTH CODE */ if (length == 0) { if (RLB->s[i]) level = -level; /*temp =*/ BitstreamPutBits(bs, 7 + 2 + 1, 30); /* ESCAPE CODE + Followed by 11 + Not Last Coefficient*/ //temp = BitstreamPutBits(bs,2,3); /* Followed by 11 */ //temp = BitstreamPutBits(bs, 1, 0); /* Not Last Coefficient*/ /*temp =*/ BitstreamPutBits(bs, 6 + 1, (run << 1) | 1); /* Encode Run + Marker Bit */ //temp = BitstreamPutBits(bs,1,1); /* Marker Bit */ /*temp =*/ BitstreamPutGT8Bits(bs, 12 + 1, ((level << 1) | 1)&0x1FFF); /* Encode Level, mask to make sure length 12 */ //temp = BitstreamPutBits(bs,1,1); /* Marker Bit */ } } } /* Encode Sign Bit */ if (length != 0) /*temp =*/ BitstreamPut1Bits(bs, RLB->s[i]); /* Sign Bit */ } /* Last Coefficient */ run = RLB->run[i]; level = RLB->level[i]; /* Encode Run Length */ if (level < 9) { if (intra) { length = PutCoeff_Intra_Last(run, level, bs); } else if (level < 4) { length = PutCoeff_Inter_Last(run, level, bs); } else { length = 0; } } else { length = 0; } /* First escape mode: LEVEL OFFSET */ if (length == 0) { if (intra) { level_minus_max = level - intra_max_level[1][run]; if (level_minus_max < 9) length = PutLevelCoeff_Intra_Last(run, level_minus_max, bs); else length = 0; } else { level_minus_max = level - inter_max_level[1][run]; if (level_minus_max < 4) length = PutLevelCoeff_Inter_Last(run, level_minus_max, bs); else length = 0; } /* Second escape mode: RUN OFFSET */ if (length == 0) { if (level < 9) { if (intra) { run_minus_max = run - (intra_max_run1[level] + 1); length = PutRunCoeff_Intra_Last(run_minus_max, level, bs); } else if (level < 4) { run_minus_max = run - (inter_max_run1[level] + 1); length = PutRunCoeff_Inter_Last(run_minus_max, level, bs); } else { length = 0; } } else { length = 0; } /* Third escape mode: FIXED LENGTH CODE */ if (length == 0) { if (RLB->s[i]) level = -level; /*temp =*/ BitstreamPutGT8Bits(bs, 7 + 2 + 1, 31); /* ESCAPE CODE + Followed by 11 + Last Coefficient*/ //temp = BitstreamPutBits(bs,2,3); /* Followed by 11 */ //temp = BitstreamPutBits(bs, 1, 1); /* Last Coefficient!!!*/ /*temp =*/ BitstreamPutBits(bs, 6 + 1, (run << 1) | 1); /* Encode Run + Marker Bit */ //temp = BitstreamPutBits(bs,1,1); /* Marker Bit */ /*temp =*/ BitstreamPutGT8Bits(bs, 12 + 1, ((level << 1) | 1)&0x1FFF); /* Encode Level, mask to make sure length 8 */ //temp = BitstreamPutBits(bs,1,1); /* Marker Bit */ } } } /* Encode Sign Bit */ if (length != 0) /*temp =*/ BitstreamPut1Bits(bs, RLB->s[i]); return ; } #endif /* H263_ONLY */ /* ======================================================================== */ /* Function : RUNLevel */ /* Date : 09/20/2000 */ /* Purpose : Get the Coded Block Pattern for each block */ /* In/out : */ /* Int* qcoeff Quantized DCT coefficients Int Mode Coding Mode Int ncoeffs Number of coefficients */ /* Return : */ /* Int CBP Coded Block Pattern */ /* Modified : */ /* ======================================================================== */ void RunLevel(VideoEncData *video, Int intra, Int intraDC_decision, Int ncoefblck[]) { Int i, j; Int CBP = video->headerInfo.CBP[video->mbnum]; Int ShortNacNintra = (!(video->vol[video->currLayer]->shortVideoHeader) && video->acPredFlag[video->mbnum] && intra); MacroBlock *MB = video->outputMB; Short *dataBlock; Int level; RunLevelBlock *RLB; Int run, idx; Int *zz, nc, zzorder; UChar imask[6] = {0x1F, 0x2F, 0x37, 0x3B, 0x3D, 0x3E}; UInt *bitmapzz; /* Set Run, Level and CBP for this Macroblock */ /* ZZ scan is done here. */ if (intra) { if (intraDC_decision != 0) intra = 0; /* DC/AC in Run/Level */ for (i = 0; i < 6 ; i++) { zz = (Int *) zigzag_inv; RLB = video->RLB + i; dataBlock = MB->block[i]; if (intra) { RLB->run[0] = 0; level = dataBlock[0]; dataBlock[0] = 0; /* reset to zero */ if (level < 0) { RLB->level[0] = -level; RLB->s[0] = 1; } else { RLB->level[0] = level; RLB->s[0] = 0; } } idx = intra; if ((CBP >> (5 - i)) & 1) { if (ShortNacNintra) { switch ((video->zz_direction >> (5 - i))&1) { case 0: zz = (Int *)zigzag_v_inv; break; case 1: zz = (Int *)zigzag_h_inv; break; } } run = 0; nc = ncoefblck[i]; for (j = intra, zz += intra; j < nc; j++, zz++) { zzorder = *zz; level = dataBlock[zzorder]; if (level == 0) run++; else { dataBlock[zzorder] = 0; /* reset output */ if (level < 0) { RLB->level[idx] = -level; RLB->s[idx] = 1; RLB->run[idx] = run; run = 0; idx++; } else { RLB->level[idx] = level; RLB->s[idx] = 0; RLB->run[idx] = run; run = 0; idx++; } } } } ncoefblck[i] = idx; /* 5/22/01, reuse ncoefblck */ if (idx == intra) /* reset CBP, nothing to be coded */ CBP &= imask[i]; } video->headerInfo.CBP[video->mbnum] = CBP; return ; } else { // zz = (Int *) zigzag_inv; no need to use it, default if (CBP) { for (i = 0; i < 6 ; i++) { RLB = video->RLB + i; idx = 0; if ((CBP >> (5 - i)) & 1) { /* 7/30/01 */ /* Use bitmapzz to find the Run,Level,Sign symbols */ bitmapzz = video->bitmapzz[i]; dataBlock = MB->block[i]; nc = ncoefblck[i]; idx = zero_run_search(bitmapzz, dataBlock, RLB, nc); } ncoefblck[i] = idx; /* 5/22/01, reuse ncoefblck */ if (idx == 0) /* reset CBP, nothing to be coded */ CBP &= imask[i]; } video->headerInfo.CBP[video->mbnum] = CBP; } return ; } } #ifndef H263_ONLY #ifdef __cplusplus extern "C" { #endif static Bool IntraDCSwitch_Decision(Int Mode, Int intra_dc_vlc_thr, Int intraDCVlcQP) { Bool switched = FALSE; if (Mode == MODE_INTRA || Mode == MODE_INTRA_Q) { if (intra_dc_vlc_thr != 0) { switched = (intra_dc_vlc_thr == 7 || intraDCVlcQP >= intra_dc_vlc_thr * 2 + 11); } } return switched; } #ifdef __cplusplus } #endif Int IntraDC_dpcm(Int val, Int lum, BitstreamEncVideo *bitstream) { Int n_bits; Int absval, size = 0; absval = (val < 0) ? -val : val; /* abs(val) */ /* compute dct_dc_size */ size = 0; while (absval) { absval >>= 1; size++; } if (lum) { /* luminance */ n_bits = PutDCsize_lum(size, bitstream); } else { /* chrominance */ n_bits = PutDCsize_chrom(size, bitstream); } if (size != 0) { if (val >= 0) { ; } else { absval = -val; /* set to "-val" MW 14-NOV-1996 */ val = absval ^((1 << size) - 1); } BitstreamPutBits(bitstream, (size), (UInt)(val)); n_bits += size; if (size > 8) BitstreamPut1Bits(bitstream, 1); } return n_bits; /* # bits for intra_dc dpcm */ } /* ======================================================================== */ /* Function : DC_AC_PRED */ /* Date : 09/24/2000 */ /* Purpose : DC and AC encoding of Intra Blocks */ /* In/out : */ /* VideoEncData *video UChar Mode */ /* Return : */ /* */ /* ======================================================================== */ Int cal_dc_scalerENC(Int QP, Int type) ; #define PREDICT_AC for (m = 0; m < 7; m++){ \ tmp = DCAC[0]*QPtmp;\ if(tmp<0) tmp = (tmp-(QP/2))/QP;\ else tmp = (tmp+(QP/2))/QP;\ pred[m] = tmp;\ DCAC++;\ } Void DCACPred(VideoEncData *video, UChar Mode, Int *intraDC_decision, Int intraDCVlcQP) { MacroBlock *MB = video->outputMB; Int mbnum = video->mbnum; typeDCStore *DC_store = video->predDC + mbnum; typeDCACStore *DCAC_row = video->predDCAC_row; typeDCACStore *DCAC_col = video->predDCAC_col; Short *DCAC; UChar Mode_top, Mode_left; Vol *currVol = video->vol[video->currLayer]; Int nMBPerRow = currVol->nMBPerRow; Int x_pos = video->outputMB->mb_x; /* 5/28/01 */ Int y_pos = video->outputMB->mb_y; UChar QP = video->QPMB[mbnum]; UChar *QPMB = video->QPMB; UChar *slice_nb = video->sliceNo; Bool bACPredEnable = video->encParams->ACDCPrediction; Int *ACpred_flag = video->acPredFlag; Int mid_grey = 128 << 3; Int m; Int comp; Int dc_scale = 8, tmp; static const Int Xpos[6] = { -1, 0, -1, 0, -1, -1}; static const Int Ypos[6] = { -1, -1, 0, 0, -1, -1}; static const Int Xtab[6] = {1, 0, 3, 2, 4, 5}; static const Int Ytab[6] = {2, 3, 0, 1, 4, 5}; static const Int Ztab[6] = {3, 2, 1, 0, 4, 5}; /* I added these to speed up comparisons */ static const Int Pos0[6] = { 1, 1, 0, 0, 1, 1}; static const Int Pos1[6] = { 1, 0, 1, 0, 1, 1}; static const Int B_Xtab[6] = {0, 1, 0, 1, 2, 3}; static const Int B_Ytab[6] = {0, 0, 1, 1, 2, 3}; Int direction[6]; /* 0: HORIZONTAL, 1: VERTICAL */ Int block_A, block_B, block_C; Int grad_hor, grad_ver, DC_pred; Short pred[7], *predptr; Short pcoeff[42]; Short *qcoeff; Int S = 0, S1, S2; Int diff, QPtmp; Int newCBP[6]; UChar mask1[6] = {0x20, 0x10, 0x8, 0x4, 0x2, 0x1}; // UChar mask2[6] = {0x1f,0x2f,0x37,0x3b,0x3d,0x3e}; Int y_offset, x_offset, x_tab, y_tab, z_tab; /* speedup coefficients */ Int b_xtab, b_ytab; video->zz_direction = 0; /* Standard MPEG-4 Headers do DC/AC prediction*/ /* check whether neighbors are INTER */ if (y_pos > 0) { Mode_top = video->headerInfo.Mode[mbnum-nMBPerRow]; if (!(Mode_top == MODE_INTRA || Mode_top == MODE_INTRA_Q)) { DCAC = DC_store[-nMBPerRow]; *DCAC++ = mid_grey; *DCAC++ = mid_grey; *DCAC++ = mid_grey; *DCAC++ = mid_grey; *DCAC++ = mid_grey; *DCAC++ = mid_grey; /* set to 0 DCAC_row[x_pos][0..3] */ if (bACPredEnable == TRUE) { M4VENC_MEMSET(DCAC_row[x_pos][0], 0, sizeof(Short) << 5); } } } if (x_pos > 0) { Mode_left = video->headerInfo.Mode[mbnum-1]; if (!(Mode_left == MODE_INTRA || Mode_left == MODE_INTRA_Q)) { DCAC = DC_store[-1]; *DCAC++ = mid_grey; *DCAC++ = mid_grey; *DCAC++ = mid_grey; *DCAC++ = mid_grey; *DCAC++ = mid_grey; *DCAC++ = mid_grey; /* set to 0 DCAC_col[x_pos][0..3] */ if (bACPredEnable == TRUE) { M4VENC_MEMSET(DCAC_col[0][0], 0, sizeof(Short) << 5); } } } S1 = 0; S2 = 0; for (comp = 0; comp < 6; comp++) { if (Ypos[comp] != 0) y_offset = -nMBPerRow; else y_offset = 0; x_offset = Xpos[comp]; x_tab = Xtab[comp]; y_tab = Ytab[comp]; z_tab = Ztab[comp]; b_xtab = B_Xtab[comp]; b_ytab = B_Ytab[comp]; qcoeff = MB->block[comp]; /****************************/ /* Store DC coefficients */ /****************************/ /* Store coeff values for Intra MB */ if (comp == 0) dc_scale = cal_dc_scalerENC(QP, 1) ; if (comp == 4) dc_scale = cal_dc_scalerENC(QP, 2) ; QPtmp = qcoeff[0] * dc_scale; /* DC value */ if (QPtmp > 2047) /* 10/10/01, add clipping (bug fixed) */ DC_store[0][comp] = 2047; else if (QPtmp < -2048) DC_store[0][comp] = -2048; else DC_store[0][comp] = QPtmp; /**************************************************************/ /* Find the direction of the prediction and the DC prediction */ /**************************************************************/ if ((x_pos == 0) && y_pos == 0) { /* top left corner */ block_A = (comp == 1 || comp == 3) ? DC_store[0][x_tab] : mid_grey; block_B = (comp == 3) ? DC_store[x_offset][z_tab] : mid_grey; block_C = (comp == 2 || comp == 3) ? DC_store[0][y_tab] : mid_grey; } else if (x_pos == 0) { /* left edge */ block_A = (comp == 1 || comp == 3) ? DC_store[0][x_tab] : mid_grey; block_B = ((comp == 1 && (slice_nb[mbnum] == slice_nb[mbnum-nMBPerRow])) || comp == 3) ? DC_store[y_offset+x_offset][z_tab] : mid_grey; block_C = (comp == 2 || comp == 3 || (Pos0[comp] && (slice_nb[mbnum] == slice_nb[mbnum-nMBPerRow]))) ? DC_store[y_offset][y_tab] : mid_grey; } else if (y_pos == 0) { /* top row */ block_A = (comp == 1 || comp == 3 || (Pos1[comp] && (slice_nb[mbnum] == slice_nb[mbnum-1]))) ? DC_store[x_offset][x_tab] : mid_grey; block_B = ((comp == 2 && (slice_nb[mbnum] == slice_nb[mbnum-1])) || comp == 3) ? DC_store[y_offset + x_offset][z_tab] : mid_grey; block_C = (comp == 2 || comp == 3) ? DC_store[y_offset][y_tab] : mid_grey; } else { block_A = (comp == 1 || comp == 3 || (Pos1[comp] && (slice_nb[mbnum] == slice_nb[mbnum-1]))) ? DC_store[x_offset][x_tab] : mid_grey; block_B = (((comp == 0 || comp == 4 || comp == 5) && (slice_nb[mbnum] == slice_nb[mbnum-1-nMBPerRow])) || (comp == 1 && (slice_nb[mbnum] == slice_nb[mbnum-nMBPerRow])) || (comp == 2 && (slice_nb[mbnum] == slice_nb[mbnum-1])) || (comp == 3)) ? (DC_store[y_offset + x_offset][z_tab]) : mid_grey; block_C = (comp == 2 || comp == 3 || (Pos0[comp] && (slice_nb[mbnum] == slice_nb[mbnum-nMBPerRow]))) ? DC_store[y_offset][y_tab] : mid_grey; } grad_hor = block_B - block_C; grad_ver = block_A - block_B; if ((PV_ABS(grad_ver)) < (PV_ABS(grad_hor))) { DC_pred = block_C; direction[comp] = 1; video->zz_direction = (video->zz_direction) | mask1[comp]; } else { DC_pred = block_A; direction[comp] = 0; //video->zz_direction=video->zz_direction<<1; } /* DC prediction */ QPtmp = dc_scale; /* 5/28/01 */ qcoeff[0] -= (DC_pred + QPtmp / 2) / QPtmp; if (bACPredEnable) { /***********************/ /* Find AC prediction */ /***********************/ if ((x_pos == 0) && y_pos == 0) /* top left corner */ { if (direction[comp] == 0) { if (comp == 1 || comp == 3) { QPtmp = QPMB[mbnum+x_offset]; DCAC = DCAC_col[0][b_ytab]; if (QPtmp != QP) { predptr = pred; PREDICT_AC } else { predptr = DCAC; } } else { predptr = pred; pred[0] = pred[1] = pred[2] = pred[3] = pred[4] = pred[5] = pred[6] = 0; } } else { if (comp == 2 || comp == 3) { QPtmp = QPMB[mbnum+ y_offset]; DCAC = DCAC_row[x_pos][b_xtab]; if (QPtmp != QP) { predptr = pred; PREDICT_AC } else { predptr = DCAC; } } else { predptr = pred; pred[0] = pred[1] = pred[2] = pred[3] = pred[4] = pred[5] = pred[6] = 0; } } } else if (x_pos == 0) /* left edge */ { if (direction[comp] == 0) { if (comp == 1 || comp == 3) { QPtmp = QPMB[mbnum+x_offset]; DCAC = DCAC_col[0][b_ytab]; if (QPtmp != QP) { predptr = pred; PREDICT_AC } else { predptr = DCAC; } } else { predptr = pred; pred[0] = pred[1] = pred[2] = pred[3] = pred[4] = pred[5] = pred[6] = 0; } } else { if ((Pos0[comp] && (slice_nb[mbnum] == slice_nb[mbnum-nMBPerRow])) || comp == 2 || comp == 3) { QPtmp = QPMB[mbnum+y_offset]; DCAC = DCAC_row[x_pos][b_xtab]; if (QPtmp != QP) { predptr = pred; PREDICT_AC } else { predptr = DCAC; } } else { predptr = pred; pred[0] = pred[1] = pred[2] = pred[3] = pred[4] = pred[5] = pred[6] = 0; } } } else if (y_pos == 0) /* top row */ { if (direction[comp] == 0) { if ((Pos1[comp] && (slice_nb[mbnum] == slice_nb[mbnum-1])) || comp == 1 || comp == 3) { QPtmp = QPMB[mbnum+x_offset]; DCAC = DCAC_col[0][b_ytab]; if (QPtmp != QP) { predptr = pred; PREDICT_AC } else { predptr = DCAC; } } else { predptr = pred; pred[0] = pred[1] = pred[2] = pred[3] = pred[4] = pred[5] = pred[6] = 0; } } else { if (comp == 2 || comp == 3) { QPtmp = QPMB[mbnum+y_offset]; DCAC = DCAC_row[x_pos][b_xtab]; if (QPtmp != QP) { predptr = pred; PREDICT_AC } else { predptr = DCAC; } } else { predptr = pred; pred[0] = pred[1] = pred[2] = pred[3] = pred[4] = pred[5] = pred[6] = 0; } } } else { if (direction[comp] == 0) { if ((Pos1[comp] && (slice_nb[mbnum] == slice_nb[mbnum-1])) || comp == 1 || comp == 3) { QPtmp = QPMB[mbnum+x_offset]; DCAC = DCAC_col[0][b_ytab]; if (QPtmp != QP) { predptr = pred; PREDICT_AC } else { predptr = DCAC; } } else { predptr = pred; pred[0] = pred[1] = pred[2] = pred[3] = pred[4] = pred[5] = pred[6] = 0; } } else { if ((Pos0[comp] && (slice_nb[mbnum] == slice_nb[mbnum-nMBPerRow])) || comp == 2 || comp == 3) { QPtmp = QPMB[mbnum+y_offset]; DCAC = DCAC_row[x_pos][b_xtab]; if (QPtmp != QP) { predptr = pred; PREDICT_AC } else { predptr = DCAC; } } else { predptr = pred; pred[0] = pred[1] = pred[2] = pred[3] = pred[4] = pred[5] = pred[6] = 0; } } } /************************************/ /* Decide and Perform AC prediction */ /************************************/ newCBP[comp] = 0; if (direction[comp] == 0) /* Horizontal, left COLUMN of block A */ { DCAC = pcoeff + comp * 7; /* re-use DCAC as local var */ qcoeff += 8; for (m = 0; m < 7; m++) { QPtmp = qcoeff[m<<3]; if (QPtmp > 0) S1 += QPtmp; else S1 -= QPtmp; QPtmp -= predptr[m]; DCAC[m] = QPtmp; /* save prediction residue to pcoeff*/ if (QPtmp) newCBP[comp] = 1; diff = PV_ABS(QPtmp); S2 += diff; } } else /* Vertical, top ROW of block C */ { qcoeff++; DCAC = pcoeff + comp * 7; /* re-use DCAC as local var */ for (m = 0; m < 7; m++) { QPtmp = qcoeff[m]; if (QPtmp > 0) S1 += QPtmp; else S1 -= QPtmp; QPtmp -= predptr[m]; DCAC[m] = QPtmp; /* save prediction residue to pcoeff*/ if (QPtmp) newCBP[comp] = 1; diff = PV_ABS(QPtmp); S2 += diff; } } /****************************/ /* Store DCAC coefficients */ /****************************/ /* Store coeff values for Intra MB */ qcoeff = MB->block[comp]; DCAC = DCAC_row[x_pos][b_xtab]; DCAC[0] = qcoeff[1]; DCAC[1] = qcoeff[2]; DCAC[2] = qcoeff[3]; DCAC[3] = qcoeff[4]; DCAC[4] = qcoeff[5]; DCAC[5] = qcoeff[6]; DCAC[6] = qcoeff[7]; DCAC = DCAC_col[0][b_ytab]; DCAC[0] = qcoeff[8]; DCAC[1] = qcoeff[16]; DCAC[2] = qcoeff[24]; DCAC[3] = qcoeff[32]; DCAC[4] = qcoeff[40]; DCAC[5] = qcoeff[48]; DCAC[6] = qcoeff[56]; } /* bACPredEnable */ } /* END COMP FOR LOOP */ //if (diff > 2047) // break; S += (S1 - S2); if (S >= 0 && bACPredEnable == TRUE) { ACpred_flag[mbnum] = 1; DCAC = pcoeff; /* prediction residue */ qcoeff = MB->block[0]; for (comp = 0; comp < 6; comp++) { if (direction[comp] == 0) { qcoeff[8] = DCAC[0]; qcoeff[16] = DCAC[1]; qcoeff[24] = DCAC[2]; qcoeff[32] = DCAC[3]; qcoeff[40] = DCAC[4]; qcoeff[48] = DCAC[5]; qcoeff[56] = DCAC[6]; } else { qcoeff[1] = DCAC[0]; qcoeff[2] = DCAC[1]; qcoeff[3] = DCAC[2]; qcoeff[4] = DCAC[3]; qcoeff[5] = DCAC[4]; qcoeff[6] = DCAC[5]; qcoeff[7] = DCAC[6]; } if (newCBP[comp]) /* 5/28/01, update CBP */ video->headerInfo.CBP[mbnum] |= mask1[comp]; DCAC += 7; qcoeff += 64; } } else /* Only DC Prediction */ { ACpred_flag[mbnum] = 0; } *intraDC_decision = IntraDCSwitch_Decision(Mode, video->currVop->intraDCVlcThr, intraDCVlcQP); if (*intraDC_decision) /* code DC with AC , 5/28/01*/ { qcoeff = MB->block[0]; for (comp = 0; comp < 6; comp++) { if (*qcoeff) video->headerInfo.CBP[mbnum] |= mask1[comp]; qcoeff += 64; } } return; } #endif /* H263_ONLY */ Void find_pmvs(VideoEncData *video, Int block, Int *mvx, Int *mvy) { Vol *currVol = video->vol[video->currLayer]; // UChar *Mode = video->headerInfo.Mode; /* modes for MBs */ UChar *slice_nb = video->sliceNo; Int nMBPerRow = currVol->nMBPerRow; Int mbnum = video->mbnum; Int p1x, p2x, p3x; Int p1y, p2y, p3y; Int xin1, xin2, xin3; Int yin1, yin2, yin3; Int vec1, vec2, vec3; Int rule1, rule2, rule3; MOT **motdata = video->mot; Int x = mbnum % nMBPerRow; Int y = mbnum / nMBPerRow; /* In a previous version, a MB vector (block = 0) was predicted the same way as block 1, which is the most likely interpretation of the VM. Therefore, if we have advanced pred. mode, and if all MBs around have only one 16x16 vector each, we chose the appropiate block as if these MBs have 4 vectors. This different prediction affects only 16x16 vectors of MBs with transparent blocks. In the current version, we choose for the 16x16 mode the first non-transparent block in the surrounding MBs */ switch (block) { case 0: vec1 = 2 ; yin1 = y ; xin1 = x - 1; vec2 = 3 ; yin2 = y - 1; xin2 = x; vec3 = 3 ; yin3 = y - 1; xin3 = x + 1; break; case 1: vec1 = 2 ; yin1 = y ; xin1 = x - 1; vec2 = 3 ; yin2 = y - 1; xin2 = x; vec3 = 3 ; yin3 = y - 1; xin3 = x + 1; break; case 2: vec1 = 1 ; yin1 = y ; xin1 = x; vec2 = 4 ; yin2 = y - 1; xin2 = x; vec3 = 3 ; yin3 = y - 1; xin3 = x + 1; break; case 3: vec1 = 4 ; yin1 = y ; xin1 = x - 1; vec2 = 1 ; yin2 = y ; xin2 = x; vec3 = 2 ; yin3 = y ; xin3 = x; break; default: /* case 4 */ vec1 = 3 ; yin1 = y ; xin1 = x; vec2 = 1 ; yin2 = y ; xin2 = x; vec3 = 2 ; yin3 = y ; xin3 = x; break; } if (block == 0) { /* according to the motion encoding, we must choose a first non-transparent block in the surrounding MBs (16-mode) */ if (x > 0 && slice_nb[mbnum] == slice_nb[mbnum-1]) rule1 = 0; else rule1 = 1; if (y > 0 && slice_nb[mbnum] == slice_nb[mbnum-nMBPerRow]) rule2 = 0; else rule2 = 1; if ((x != nMBPerRow - 1) && (y > 0) && slice_nb[mbnum] == slice_nb[mbnum+1-nMBPerRow]) rule3 = 0; else rule3 = 1; } else { /* check borders for single blocks (advanced mode) */ /* rule 1 */ if (((block == 1 || block == 3) && (x == 0 || slice_nb[mbnum] != slice_nb[mbnum-1]))) rule1 = 1; else rule1 = 0; /* rule 2 */ if (((block == 1 || block == 2) && (y == 0 || slice_nb[mbnum] != slice_nb[mbnum-nMBPerRow]))) rule2 = 1; else rule2 = 0; /* rule 3 */ if (((block == 1 || block == 2) && (x == nMBPerRow - 1 || y == 0 || slice_nb[mbnum] != slice_nb[mbnum+1-nMBPerRow]))) rule3 = 1; else rule3 = 0; } if (rule1) { p1x = p1y = 0; } else { p1x = motdata[yin1*nMBPerRow+xin1][vec1].x; p1y = motdata[yin1*nMBPerRow+xin1][vec1].y; //p1x = motxdata[xin1*2+(vec1&0x1) + (yin1*2+(vec1>>1))*xB]; //p1y = motydata[xin1*2+(vec1&0x1) + (yin1*2+(vec1>>1))*xB]; } if (rule2) { p2x = p2y = 0; } else { p2x = motdata[yin2*nMBPerRow+xin2][vec2].x; p2y = motdata[yin2*nMBPerRow+xin2][vec2].y; //p2x = motxdata[xin2*2+(vec2&0x1) + (yin2*2+(vec2>>1))*xB]; //p2y = motydata[xin2*2+(vec2&0x1) + (yin2*2+(vec2>>1))*xB]; } if (rule3) { p3x = p3y = 0; } else { p3x = motdata[yin3*nMBPerRow+xin3][vec3].x; p3y = motdata[yin3*nMBPerRow+xin3][vec3].y; //p3x = motxdata[xin3*2+ (vec3&0x1) + (yin3*2+(vec3>>1))*xB]; //p3y = motydata[xin3*2+ (vec3&0x1) + (yin3*2+(vec3>>1))*xB]; } if (rule1 && rule2 && rule3) { /* all MBs are outside the VOP */ *mvx = *mvy = 0; } else if (rule1 + rule2 + rule3 == 2) { /* two of three are zero */ *mvx = (p1x + p2x + p3x); *mvy = (p1y + p2y + p3y); } else { *mvx = ((p1x + p2x + p3x - PV_MAX(p1x, PV_MAX(p2x, p3x)) - PV_MIN(p1x, PV_MIN(p2x, p3x)))); *mvy = ((p1y + p2y + p3y - PV_MAX(p1y, PV_MAX(p2y, p3y)) - PV_MIN(p1y, PV_MIN(p2y, p3y)))); } return; } Void WriteMVcomponent(Int f_code, Int dmv, BitstreamEncVideo *bs) { Int residual, vlc_code_mag, bits, entry; ScaleMVD(f_code, dmv, &residual, &vlc_code_mag); if (vlc_code_mag < 0) entry = vlc_code_mag + 65; else entry = vlc_code_mag; bits = PutMV(entry, bs); if ((f_code != 1) && (vlc_code_mag != 0)) { BitstreamPutBits(bs, f_code - 1, residual); bits += f_code - 1; } return; } Void ScaleMVD( Int f_code, /* <-- MV range in 1/2 units: 1=32,2=64,...,7=2048 */ Int diff_vector, /* <-- MV Difference commponent in 1/2 units */ Int *residual, /* --> value to be FLC coded */ Int *vlc_code_mag /* --> value to be VLC coded */ ) { Int range; Int scale_factor; Int r_size; Int low; Int high; Int aux; r_size = f_code - 1; scale_factor = 1 << r_size; range = 32 * scale_factor; low = -range; high = range - 1; if (diff_vector < low) diff_vector += 2 * range; else if (diff_vector > high) diff_vector -= 2 * range; if (diff_vector == 0) { *vlc_code_mag = 0; *residual = 0; } else if (scale_factor == 1) { *vlc_code_mag = diff_vector; *residual = 0; } else { aux = PV_ABS(diff_vector) + scale_factor - 1; *vlc_code_mag = aux >> r_size; if (diff_vector < 0) *vlc_code_mag = -*vlc_code_mag; *residual = aux & (scale_factor - 1); } } ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/vlc_encode.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef _VLC_ENCODE_H_ #define _VLC_ENCODE_H_ #include "mp4def.h" #include "mp4enc_api.h" Int PutCoeff_Inter(Int run, Int level, Int last, BitstreamEncVideo *bitstream); Int PutCoeff_Intra(Int run, Int level, Int last, BitstreamEncVideo *bitstream); Int PutCBPY(Int cbpy, Char intra, BitstreamEncVideo *bitstream); Int PutMCBPC_Inter(Int cbpc, Int mode, BitstreamEncVideo *bitstream); Int PutMCBPC_Intra(Int cbpc, Int mode, BitstreamEncVideo *bitstream); Int PutMV(Int mvint, BitstreamEncVideo *bitstream); Int PutDCsize_chrom(Int size, BitstreamEncVideo *bitstream); Int PutDCsize_lum(Int size, BitstreamEncVideo *bitstream); Int PutDCsize_lum(Int size, BitstreamEncVideo *bitstream); Int PutCoeff_Inter_RVLC(Int run, Int level, Int last, BitstreamEncVideo *bitstream); Int PutCoeff_Intra_RVLC(Int run, Int level, Int last, BitstreamEncVideo *bitstream); Int PutRunCoeff_Inter(Int run, Int level, Int last, BitstreamEncVideo *bitstream); Int PutRunCoeff_Intra(Int run, Int level, Int last, BitstreamEncVideo *bitstream); Int PutLevelCoeff_Inter(Int run, Int level, Int last, BitstreamEncVideo *bitstream); Int PutLevelCoeff_Intra(Int run, Int level, Int last, BitstreamEncVideo *bitstream); Void MB_CodeCoeff(VideoEncData *video, BitstreamEncVideo *bs); Void BlockCodeCoeff(RunLevelBlock *RLB, BitstreamEncVideo *bs, Int j_start, UChar Mode, Int rvlc, Int shortVideoHeader); #endif /* _VLC_ENCODE_H_ */ ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/vlc_encode_inline.h ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #ifndef _VLC_ENCODE_INLINE_H_ #define _VLC_ENCODE_INLINE_H_ #if !defined(PV_ARM_GCC_V5) __inline Int zero_run_search(UInt *bitmapzz, Short *dataBlock, RunLevelBlock *RLB, Int nc) { Int idx, run, level, j; UInt end, match; idx = 0; j = 0; run = 0; match = 1 << 31; if (nc > 32) end = 1; else end = 1 << (32 - nc); while (match >= end) { if ((match&bitmapzz[0]) == 0) { run++; j++; match >>= 1; } else { match >>= 1; level = dataBlock[j]; dataBlock[j] = 0; /* reset output */ j++; if (level < 0) { RLB->level[idx] = -level; RLB->s[idx] = 1; RLB->run[idx] = run; run = 0; idx++; } else { RLB->level[idx] = level; RLB->s[idx] = 0; RLB->run[idx] = run; run = 0; idx++; } } } nc -= 32; if (nc > 0) { match = 1 << 31; end = 1 << (32 - nc); while (match >= end) { if ((match&bitmapzz[1]) == 0) { run++; j++; match >>= 1; } else { match >>= 1; level = dataBlock[j]; dataBlock[j] = 0; /* reset output */ j++; if (level < 0) { RLB->level[idx] = -level; RLB->s[idx] = 1; RLB->run[idx] = run; run = 0; idx++; } else { RLB->level[idx] = level; RLB->s[idx] = 0; RLB->run[idx] = run; run = 0; idx++; } } } } return idx; } #elif defined(__CC_ARM) /* only work with arm v5 */ __inline Int zero_run_search(UInt *bitmapzz, Short *dataBlock, RunLevelBlock *RLB, Int nc) { OSCL_UNUSED_ARG(nc); Int idx, run, level, j; UInt end, match; Int zzorder; idx = 0; run = 0; j = -1; __asm { ldr match, [bitmapzz] clz run, match } zzorder = 0; while (run < 32) { __asm { mov end, #0x80000000 mov end, end, lsr run /* mask*/ bic match, match, end /* remove it from bitmap */ mov run, run, lsl #1 /* 05/09/02 */ ldrsh level, [dataBlock, run] /* load data */ strh zzorder, [dataBlock, run] /* reset output */ add j, j, #1 rsb run, j, run, lsr #1 /* delta run */ add j, j, run /* current position */ } if (level < 0) { RLB->level[idx] = -level; RLB->s[idx] = 1; RLB->run[idx] = run; run = 0; idx++; } else { RLB->level[idx] = level; RLB->s[idx] = 0; RLB->run[idx] = run; run = 0; idx++; } __asm { clz run, match } } __asm { ldr match, [bitmapzz, #4] clz run, match } while (run < 32) { __asm { mov end, #0x80000000 mov end, end, lsr run /* mask*/ bic match, match, end /* remove it from bitmap */ add run, run, #32 /* current position */ mov run, run, lsl #1 /* 09/02/05 */ ldrsh level, [dataBlock, run] /* load data */ strh zzorder, [dataBlock, run] /* reset output */ add j, j, #1 rsb run, j, run, lsr #1 /* delta run */ add j, j, run /* current position */ } if (level < 0) { RLB->level[idx] = -level; RLB->s[idx] = 1; RLB->run[idx] = run; run = 0; idx++; } else { RLB->level[idx] = level; RLB->s[idx] = 0; RLB->run[idx] = run; run = 0; idx++; } __asm { clz run, match } } return idx; } #elif (defined(PV_ARM_GCC_V5) ) /* ARM GNU COMPILER */ __inline Int m4v_enc_clz(UInt temp) { register Int rb; register UInt ra = (UInt)temp; asm volatile("clz %0, %1" : "=&r"(rb) : "r"(ra) ); return (rb); } __inline Int zero_run_search(UInt *bitmapzz, Short *dataBlock, RunLevelBlock *RLB, Int nc) { OSCL_UNUSED_ARG(nc); Int idx, run, level = 0, j; UInt end = 0, match; Int zzorder; idx = 0; run = 0; j = -1; match = *bitmapzz; run = m4v_enc_clz(match); zzorder = 0; while (run < 32) { asm volatile("mov %0, #0x80000000\n\t" "mov %0, %0, lsr %1\n\t" "bic %2, %2, %0\n\t" "mov %1, %1, lsl #1\n\t" "ldrsh %3, [%6, %1]\n\t" "strh %5, [%6, %1]\n\t" "add %4, %4, #1\n\t" "rsb %1, %4, %1, lsr #1\n\t" "add %4, %4, %1" : "+r"(end), "+r"(run), "+r"(match), "=r"(level), "+r"(j) : "r"(zzorder), "r"(dataBlock)); if (level < 0) { RLB->level[idx] = -level; RLB->s[idx] = 1; RLB->run[idx] = run; run = 0; idx++; } else { RLB->level[idx] = level; RLB->s[idx] = 0; RLB->run[idx] = run; run = 0; idx++; } run = m4v_enc_clz(match); } match = bitmapzz[1]; run = m4v_enc_clz(match); while (run < 32) { asm volatile("mov %0, #0x80000000\n\t" "mov %0, %0, lsr %1\n\t" "bic %2, %2, %0\n\t" "add %1, %1, #32\n\t" "mov %1, %1, lsl #1\n\t" "ldrsh %3, [%6, %1]\n\t" "strh %5, [%6, %1]\n\t" "add %4, %4, #1\n\t" "rsb %1, %4, %1, lsr #1\n\t" "add %4, %4, %1" : "+r"(end), "+r"(run), "+r"(match), "+r"(level), "+r"(j) : "r"(zzorder), "r"(dataBlock)); if (level < 0) { RLB->level[idx] = -level; RLB->s[idx] = 1; RLB->run[idx] = run; run = 0; idx++; } else { RLB->level[idx] = level; RLB->s[idx] = 0; RLB->run[idx] = run; run = 0; idx++; } run = m4v_enc_clz(match); } return idx; } #endif #endif // _VLC_ENCODE_INLINE_H_ ================================================ FILE: RtspCamera/jni/m4v_h263/enc/src/vop.cpp ================================================ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ #include "mp4def.h" #include "mp4lib_int.h" #include "mp4enc_lib.h" #include "bitstream_io.h" #include "m4venc_oscl.h" PV_STATUS EncodeShortHeader(BitstreamEncVideo *stream, Vop *currVop); PV_STATUS EncodeVOPHeader(BitstreamEncVideo *stream, Vol *currVol, Vop *currVop); PV_STATUS EncodeGOVHeader(BitstreamEncVideo *stream, UInt seconds); PV_STATUS EncodeVop_BXRC(VideoEncData *video); PV_STATUS EncodeVop_NoME(VideoEncData *video); /* ======================================================================== */ /* Function : DecodeVop() */ /* Date : 08/23/2000 */ /* Purpose : Encode VOP Header */ /* In/out : */ /* Return : */ /* Modified : */ /* ======================================================================== */ PV_STATUS EncodeVop(VideoEncData *video) { PV_STATUS status; Int currLayer = video->currLayer; Vol *currVol = video->vol[currLayer]; Vop *currVop = video->currVop; // BitstreamEncVideo *stream=video->bitstream1; UChar *Mode = video->headerInfo.Mode; rateControl **rc = video->rc; // UInt time=0; /*******************/ /* Initialize mode */ /*******************/ switch (currVop->predictionType) { case I_VOP: M4VENC_MEMSET(Mode, MODE_INTRA, sizeof(UChar)*currVol->nTotalMB); break; case P_VOP: M4VENC_MEMSET(Mode, MODE_INTER, sizeof(UChar)*currVol->nTotalMB); break; case B_VOP: /*M4VENC_MEMSET(Mode, MODE_INTER_B,sizeof(UChar)*nTotalMB);*/ return PV_FAIL; default: return PV_FAIL; } /*********************/ /* Motion Estimation */ /* compute MVs, scene change detection, edge padding, */ /* intra refresh, compute block activity */ /*********************/ MotionEstimation(video); /* do ME for the whole frame */ /***************************/ /* rate Control (assign QP) */ /* 4/11/01, clean-up, and put into a separate function */ /***************************/ status = RC_VopQPSetting(video, rc); if (status == PV_FAIL) return PV_FAIL; /**********************/ /* Encode VOP */ /**********************/ if (video->slice_coding) /* end here */ { /* initialize state variable for slice-based APIs */ video->totalSAD = 0; video->mbnum = 0; video->sliceNo[0] = 0; video->numIntra = 0; video->offset = 0; video->end_of_buf = 0; video->hp_guess = -1; return status; } status = EncodeVop_NoME(video); /******************************/ /* rate control (update stat) */ /* 6/2/01 separate function */ /******************************/ RC_VopUpdateStat(video, rc[currLayer]); return status; } /* ======================================================================== */ /* Function : EncodeVop_NoME() */ /* Date : 08/28/2001 */ /* History : */ /* Purpose : EncodeVop without motion est. */ /* In/out : */ /* Return : */ /* Modified : */ /* */ /* ======================================================================== */ PV_STATUS EncodeVop_NoME(VideoEncData *video) { Vop *currVop = video->currVop; Vol *currVol = video->vol[video->currLayer]; BitstreamEncVideo *stream = video->bitstream1; Int time = 0; /* follows EncodeVop value */ PV_STATUS status = PV_SUCCESS; if (currVol->shortVideoHeader) /* Short Video Header = 1 */ { status = EncodeShortHeader(stream, currVop); /* Encode Short Header */ video->header_bits = BitstreamGetPos(stream); /* Header Bits */ status = EncodeFrameCombinedMode(video); } #ifndef H263_ONLY else /* Short Video Header = 0 */ { if (currVol->GOVStart && currVop->predictionType == I_VOP) status = EncodeGOVHeader(stream, time); /* Encode GOV Header */ status = EncodeVOPHeader(stream, currVol, currVop); /* Encode VOP Header */ video->header_bits = BitstreamGetPos(stream); /* Header Bits */ if (currVop->vopCoded) { if (!currVol->scalability) { if (currVol->dataPartitioning) { status = EncodeFrameDataPartMode(video); /* Encode Data Partitioning Mode VOP */ } else { status = EncodeFrameCombinedMode(video); /* Encode Combined Mode VOP */ } } else status = EncodeFrameCombinedMode(video); /* Encode Combined Mode VOP */ } else /* Vop Not coded */ { return status; } } #endif /* H263_ONLY */ return status; } #ifndef NO_SLICE_ENCODE /* ======================================================================== */ /* Function : EncodeSlice() */ /* Date : 04/19/2002 */ /* History : */ /* Purpose : Encode one slice. */ /* In/out : */ /* Return : */ /* Modified : */ /* */ /* ======================================================================== */ PV_STATUS EncodeSlice(VideoEncData *video) { Vop *currVop = video->currVop; Int currLayer = video->currLayer; Vol *currVol = video->vol[currLayer]; BitstreamEncVideo *stream = video->bitstream1; /* different from frame-based */ Int time = 0; /* follows EncodeVop value */ PV_STATUS status = PV_SUCCESS; rateControl **rc = video->rc; if (currVol->shortVideoHeader) /* Short Video Header = 1 */ { if (video->mbnum == 0) { status = EncodeShortHeader(stream, currVop); /* Encode Short Header */ video->header_bits = BitstreamGetPos(stream); /* Header Bits */ } status = EncodeSliceCombinedMode(video); } #ifndef H263_ONLY else /* Short Video Header = 0 */ { if (video->mbnum == 0) { if (currVol->GOVStart) status = EncodeGOVHeader(stream, time); /* Encode GOV Header */ status = EncodeVOPHeader(stream, currVol, currVop); /* Encode VOP Header */ video->header_bits = BitstreamGetPos(stream); /* Header Bits */ } if (currVop->vopCoded) { if (!currVol->scalability) { if (currVol->dataPartitioning) { status = EncodeSliceDataPartMode(video); /* Encode Data Partitioning Mode VOP */ } else { status = EncodeSliceCombinedMode(video); /* Encode Combined Mode VOP */ } } else status = EncodeSliceCombinedMode(video); /* Encode Combined Mode VOP */ } else /* Vop Not coded */ { return status; } } #endif /* H263_ONLY */ if (video->mbnum >= currVol->nTotalMB && status != PV_END_OF_BUF) /* end of Vop */ { /******************************/ /* rate control (update stat) */ /* 6/2/01 separate function */ /******************************/ status = RC_VopUpdateStat(video, rc[currLayer]); } return status; } #endif /* NO_SLICE_ENCODE */ #ifndef H263_ONLY /* ======================================================================== */ /* Function : EncodeGOVHeader() */ /* Date : 08/23/2000 */ /* Purpose : Encode GOV Header */ /* In/out : */ /* Return : */ /* Modified : */ /* ======================================================================== */ PV_STATUS EncodeGOVHeader(BitstreamEncVideo *stream, UInt seconds) { PV_STATUS status; // int temp; UInt tmpvar; /********************************/ /* Group_of_VideoObjectPlane() */ /********************************/ status = BitstreamPutGT16Bits(stream, 32, GROUP_START_CODE); /* time_code */ tmpvar = seconds / 3600; status = BitstreamPutBits(stream, 5, tmpvar); /* Hours*/ tmpvar = (seconds - tmpvar * 3600) / 60; status = BitstreamPutBits(stream, 6, tmpvar); /* Minutes*/ status = BitstreamPut1Bits(stream, 1); /* Marker*/ tmpvar = seconds % 60; status = BitstreamPutBits(stream, 6, tmpvar); /* Seconds*/ status = BitstreamPut1Bits(stream, 1); /* closed_gov */ status = BitstreamPut1Bits(stream, 0); /* broken_link */ /*temp =*/ BitstreamMpeg4ByteAlignStuffing(stream); /* Byte align GOV Header */ return status; } #ifdef ALLOW_VOP_NOT_CODED PV_STATUS EncodeVopNotCoded(VideoEncData *video, UChar *bstream, Int *size, ULong modTime) { PV_STATUS status; Vol *currVol = video->vol[0]; Vop *currVop = video->currVop; BitstreamEncVideo *stream = currVol->stream; UInt frameTick; Int timeInc; stream->bitstreamBuffer = bstream; stream->bufferSize = *size; BitstreamEncReset(stream); status = BitstreamPutGT16Bits(stream, 32, VOP_START_CODE); /*Start Code for VOP*/ status = BitstreamPutBits(stream, 2, P_VOP);/* VOP Coding Type*/ frameTick = (Int)(((double)(modTime - video->modTimeRef) * currVol->timeIncrementResolution + 500) / 1000); timeInc = frameTick - video->refTick[0]; while (timeInc >= currVol->timeIncrementResolution) { timeInc -= currVol->timeIncrementResolution; status = BitstreamPut1Bits(stream, 1); /* do not update refTick and modTimeRef yet, do it after encoding!! */ } status = BitstreamPut1Bits(stream, 0); status = BitstreamPut1Bits(stream, 1); /* marker bit */ status = BitstreamPutBits(stream, currVol->nbitsTimeIncRes, timeInc); /* vop_time_increment */ status = BitstreamPut1Bits(stream, 1); /* marker bit */ status = BitstreamPut1Bits(stream, 0); /* vop_coded bit */ BitstreamMpeg4ByteAlignStuffing(stream); return status; } #endif /* ======================================================================== */ /* Function : EncodeVOPHeader() */ /* Date : 08/23/2000 */ /* Purpose : Encode VOP Header */ /* In/out : */ /* Return : */ /* Modified : */ /* ======================================================================== */ PV_STATUS EncodeVOPHeader(BitstreamEncVideo *stream, Vol *currVol, Vop *currVop) { PV_STATUS status; //int temp; int MTB = currVol->moduloTimeBase; /************************/ /* VideoObjectPlane() */ /************************/ status = BitstreamPutGT16Bits(stream, 32, VOP_START_CODE); /*Start Code for VOP*/ status = BitstreamPutBits(stream, 2, currVop->predictionType);/* VOP Coding Type*/ currVol->prevModuloTimeBase = currVol->moduloTimeBase; while (MTB) { status = BitstreamPut1Bits(stream, 1); MTB--; } status = BitstreamPut1Bits(stream, 0); status = BitstreamPut1Bits(stream, 1); /* marker bit */ status = BitstreamPutBits(stream, currVol->nbitsTimeIncRes, currVop->timeInc); /* vop_time_increment */ status = BitstreamPut1Bits(stream, 1); /* marker bit */ status = BitstreamPut1Bits(stream, currVop->vopCoded); /* vop_coded bit */ if (currVop->vopCoded == 0) { /*temp =*/ BitstreamMpeg4ByteAlignStuffing(stream); /* Byte align VOP Header */ return status; } if (currVop->predictionType == P_VOP) status = BitstreamPut1Bits(stream, currVop->roundingType); /* vop_rounding_type */ status = BitstreamPutBits(stream, 3, currVop->intraDCVlcThr); /* intra_dc_vlc_thr */ status = BitstreamPutBits(stream, 5, currVop->quantizer); /* vop_quant */ if (currVop->predictionType != I_VOP) status = BitstreamPutBits(stream, 3, currVop->fcodeForward); /* vop_fcode_forward */ if (currVop->predictionType == B_VOP) status = BitstreamPutBits(stream, 3, currVop->fcodeBackward);/* vop_fcode_backward */ if (currVol->scalability) /* enhancement_type = 0 */ status = BitstreamPutBits(stream, 2, currVop->refSelectCode); /* ref_select_code */ return status; } #endif /* H263_ONLY */ /* ======================================================================== */ /* Function : EncodeShortHeader() */ /* Date : 08/23/2000 */ /* Purpose : Encode VOP Header */ /* In/out : */ /* Return : */ /* Modified : */ /* ======================================================================== */ PV_STATUS EncodeShortHeader(BitstreamEncVideo *stream, Vop *currVop) { PV_STATUS status; status = BitstreamPutGT16Bits(stream, 22, SHORT_VIDEO_START_MARKER); /* Short_video_start_marker */ status = BitstreamPutBits(stream, 8, currVop->temporalRef); /* temporal_reference */ status = BitstreamPut1Bits(stream, 1); /* marker bit */ status = BitstreamPut1Bits(stream, 0); /* zero bit */ status = BitstreamPut1Bits(stream, 0); /* split_screen_indicator=0*/ status = BitstreamPut1Bits(stream, 0); /* document_camera_indicator=0*/ status = BitstreamPut1Bits(stream, 0); /* full_picture_freeze_release=0*/ switch (currVop->width) { case 128: if (currVop->height == 96) status = BitstreamPutBits(stream, 3, 1); /* source_format = 1 */ else { status = PV_FAIL; return status; } break; case 176: if (currVop->height == 144) status = BitstreamPutBits(stream, 3, 2); /* source_format = 2 */ else { status = PV_FAIL; return status; } break; case 352: if (currVop->height == 288) status = BitstreamPutBits(stream, 3, 3); /* source_format = 3 */ else { status = PV_FAIL; return status; } break; case 704: if (currVop->height == 576) status = BitstreamPutBits(stream, 3, 4); /* source_format = 4 */ else { status = PV_FAIL; return status; } break; case 1408: if (currVop->height == 1152) status = BitstreamPutBits(stream, 3, 5); /* source_format = 5 */ else { status = PV_FAIL; return status; } break; default: status = PV_FAIL; return status; } status = BitstreamPut1Bits(stream, currVop->predictionType); /* picture_coding type */ status = BitstreamPutBits(stream, 4, 0); /* four_reserved_zero_bits */ status = BitstreamPutBits(stream, 5, currVop->quantizer); /* vop_quant*/ status = BitstreamPut1Bits(stream, 0); /* zero_bit*/ status = BitstreamPut1Bits(stream, 0); /* pei=0 */ return status; } #ifndef H263_ONLY /* ======================================================================== */ /* Function : EncodeVideoPacketHeader() */ /* Date : 09/05/2000 */ /* History : */ /* Purpose : Encode a frame of MPEG4 bitstream in Combined mode. */ /* In/out : */ /* Return : */ /* Modified : 04/25/2002 */ /* Add bitstream structure as input argument */ /* */ /* ======================================================================== */ PV_STATUS EncodeVideoPacketHeader(VideoEncData *video, int MB_number, int quant_scale, Int insert) { // PV_STATUS status=PV_SUCCESS; int fcode; Vop *currVop = video->currVop; Vol *currVol = video->vol[video->currLayer]; BitstreamEncVideo *bs, tmp; UChar buffer[30]; if (insert) /* insert packet header to the beginning of bs1 */ { tmp.bitstreamBuffer = buffer; /* use temporary buffer */ tmp.bufferSize = 30; BitstreamEncReset(&tmp); bs = &tmp; } else bs = video->bitstream1; if (currVop->predictionType == I_VOP) BitstreamPutGT16Bits(bs, 17, 1); /* resync_marker I_VOP */ else if (currVop->predictionType == P_VOP) { fcode = currVop->fcodeForward; BitstreamPutGT16Bits(bs, 16 + fcode, 1); /* resync_marker P_VOP */ } else { fcode = currVop->fcodeForward; if (currVop->fcodeBackward > fcode) fcode = currVop->fcodeBackward; BitstreamPutGT16Bits(bs, 16 + fcode, 1); /* resync_marker B_VOP */ } BitstreamPutBits(bs, currVol->nBitsForMBID, MB_number); /* resync_marker */ BitstreamPutBits(bs, 5, quant_scale); /* quant_scale */ BitstreamPut1Bits(bs, 0); /* header_extension_code = 0 */ if (0) /* header_extension_code = 1 */ { /* NEED modulo_time_base code here ... default 0x01 belo*/ /*status =*/ BitstreamPut1Bits(bs, 1); /*status = */ BitstreamPut1Bits(bs, 0); /*status = */ BitstreamPut1Bits(bs, 1); /* marker bit */ /*status = */ BitstreamPutBits(bs, currVol->nbitsTimeIncRes, currVop->timeInc); /* vop_time_increment */ /*status = */ BitstreamPut1Bits(bs, 1); /* marker bit */ /*status = */ BitstreamPutBits(bs, 2, currVop->predictionType);/* VOP Coding Type*/ /*status = */ BitstreamPutBits(bs, 3, currVop->intraDCVlcThr); /* intra_dc_vlc_thr */ if (currVop->predictionType != I_VOP) /*status = */ BitstreamPutBits(bs, 3, currVop->fcodeForward); if (currVop->predictionType == B_VOP) /*status = */ BitstreamPutBits(bs, 3, currVop->fcodeBackward); } #ifndef NO_SLICE_ENCODE if (insert) BitstreamPrependPacket(video->bitstream1, bs); #endif return PV_SUCCESS; } #endif /* H263_ONLY */ ================================================ FILE: RtspCamera/proguard.cfg ================================================ -optimizationpasses 5 -dontusemixedcaseclassnames -dontskipnonpubliclibraryclasses -dontpreverify -verbose -optimizations !code/simplification/arithmetic,!field/*,!class/merging/* -keep public class * extends android.app.Activity -keep public class * extends android.app.Application -keep public class * extends android.app.Service -keep public class * extends android.content.BroadcastReceiver -keep public class * extends android.content.ContentProvider -keep public class * extends android.app.backup.BackupAgentHelper -keep public class * extends android.preference.Preference -keep public class com.android.vending.licensing.ILicensingService -keepclasseswithmembernames class * { native ; } -keepclasseswithmembers class * { public (android.content.Context, android.util.AttributeSet); } -keepclasseswithmembers class * { public (android.content.Context, android.util.AttributeSet, int); } -keepclassmembers class * extends android.app.Activity { public void *(android.view.View); } -keepclassmembers enum * { public static **[] values(); public static ** valueOf(java.lang.String); } -keep class * implements android.os.Parcelable { public static final android.os.Parcelable$Creator *; } ================================================ FILE: RtspCamera/project.properties ================================================ # This file is automatically generated by Android Tools. # Do not modify this file -- YOUR CHANGES WILL BE ERASED! # # This file must be checked in Version Control Systems. # # To customize properties used by the Ant build system use, # "ant.properties", and override values to adapt the script to your # project structure. # Project target. target=android-10 android.library=true ================================================ FILE: RtspCamera/res/layout/cameraapicodecs.xml ================================================ ================================================ FILE: RtspCamera/res/layout/cameranativecodecs.xml ================================================ ================================================ FILE: RtspCamera/res/values/strings.xml ================================================ Spydroid running... RtspCamera ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/CoreException.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core; /** * Core module exception * * @author JM. Auffret */ public class CoreException extends java.lang.Exception { static final long serialVersionUID = 1L; /** * Constructor * * @param error Error message */ public CoreException(String error) { super(error); } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/CodecChain.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp; import com.orangelabs.rcs.core.ims.protocol.rtp.codec.Codec; import com.orangelabs.rcs.core.ims.protocol.rtp.stream.ProcessorOutputStream; import com.orangelabs.rcs.core.ims.protocol.rtp.util.Buffer; import com.orangelabs.rcs.utils.logger.Logger; /** * Codec chain * * @author jexa7410 */ public class CodecChain { /** * List of codecs */ private Codec[] codecs = null; /** * List of buffers */ private Buffer[] buffers = null; /** * Renderer */ private ProcessorOutputStream renderer; /** * The logger */ private Logger logger = Logger.getLogger(this.getClass().getName()); /** * Constructor * * @param codecs Codecs list */ public CodecChain(Codec[] codecs, ProcessorOutputStream renderer) { this.codecs = codecs; this.renderer = renderer; // Create the buffer chain buffers = new Buffer[codecs.length+1]; for (int i = 0; i < codecs.length; i++) { buffers[i] = new Buffer(); } // Prepare codecs for(int i=0; i < codecs.length; i++) { if (logger.isActivated()) { logger.debug("Open codec " + codecs[i].getClass().getName()); } codecs[i].open(); } } /** * Codec chain processing * * @param input Input buffer * @return Result */ public int process(Buffer input) { int codecNo = 0; return doProcess(codecNo, input); } /** * Recursive codec processing * * @param codecNo Codec index * @param input Input buffer * @return Result */ private int doProcess(int codecNo, Buffer input) { if (codecNo == codecs.length) { // End of chain try { // Write data to the output stream renderer.write(input); return Codec.BUFFER_PROCESSED_OK; } catch (Exception e) { return Codec.BUFFER_PROCESSED_FAILED; } } else { // Process this codec Codec codec = codecs[codecNo]; int returnVal; do { try { returnVal = codec.process(input, buffers[codecNo]); } catch (Exception e) { return Codec.BUFFER_PROCESSED_FAILED; } if (returnVal == Codec.BUFFER_PROCESSED_FAILED) return Codec.BUFFER_PROCESSED_FAILED; if ((returnVal & Codec.OUTPUT_BUFFER_NOT_FILLED) == 0) { if (!(buffers[codecNo].isDiscard() || buffers[codecNo].isEOM())) { doProcess(codecNo + 1, buffers[codecNo]); } buffers[codecNo].setOffset(0); buffers[codecNo].setLength(0); buffers[codecNo].setFlags(0); } } while((returnVal & Codec.INPUT_BUFFER_NOT_CONSUMED) != 0); return returnVal; } } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/MediaRegistry.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp; import java.util.Enumeration; import java.util.Hashtable; import java.util.Vector; import com.orangelabs.rcs.core.ims.protocol.rtp.codec.Codec; import com.orangelabs.rcs.core.ims.protocol.rtp.format.Format; import com.orangelabs.rcs.core.ims.protocol.rtp.format.audio.AudioFormat; import com.orangelabs.rcs.core.ims.protocol.rtp.format.audio.PcmuAudioFormat; import com.orangelabs.rcs.core.ims.protocol.rtp.format.video.H263VideoFormat; import com.orangelabs.rcs.core.ims.protocol.rtp.format.video.H264VideoFormat; import com.orangelabs.rcs.core.ims.protocol.rtp.format.video.VideoFormat; /** * Media registry that handles the supported codecs * * @author jexa7410 */ public class MediaRegistry { /** * Supported codecs */ private static Hashtable SUPPORTED_CODECS = new Hashtable(); static { SUPPORTED_CODECS.put(H263VideoFormat.ENCODING.toLowerCase(), new H263VideoFormat()); SUPPORTED_CODECS.put(H264VideoFormat.ENCODING.toLowerCase(), new H264VideoFormat()); SUPPORTED_CODECS.put(PcmuAudioFormat.ENCODING.toLowerCase(), new PcmuAudioFormat()); } /** * Returns the list of the supported video format * * @return List of video formats */ public static Vector getSupportedVideoFormats() { Vector list = new Vector(); for (Enumeration e = SUPPORTED_CODECS.elements() ; e.hasMoreElements() ;) { Format fmt = (Format)e.nextElement(); if (fmt instanceof VideoFormat) { list.addElement((VideoFormat)fmt); } } return list; } /** * Returns the list of the supported audio format * * @return List of audio formats */ public static Vector getSupportedAudioFormats() { Vector list = new Vector(); for (Enumeration e = SUPPORTED_CODECS.elements() ; e.hasMoreElements() ;) { Format fmt = (Format)e.nextElement(); if (fmt instanceof AudioFormat) { list.addElement((AudioFormat)fmt); } } return list; } /** * Generate the format associated to the codec name * * @param codec Codec name * @return Format */ public static Format generateFormat(String codec) { return (Format)SUPPORTED_CODECS.get(codec.toLowerCase()); } /** * Is codec supported * * @param codec Codec name * @return Boolean */ public static boolean isCodecSupported(String codec) { Format format = (Format)SUPPORTED_CODECS.get(codec.toLowerCase()); return (format != null); } /** * Generate the codec encoding chain * * @param encoding Encoding name * @return Codec chain */ public static Codec[] generateEncodingCodecChain(String encoding) { if (encoding.toLowerCase().equalsIgnoreCase(H263VideoFormat.ENCODING)) { // Java H263 packetizer Codec[] chain = { new com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263.JavaPacketizer() }; return chain; } else { // Codec implemented in the native part return new Codec[0]; } } /** * Generate the decoding codec chain * * @param encoding Encoding name * @return Codec chain */ public static Codec[] generateDecodingCodecChain(String encoding) { if (encoding.toLowerCase().equalsIgnoreCase(H263VideoFormat.ENCODING)) { // Java H263 depacketizer Codec[] chain = { new com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263.JavaDepacketizer() }; return chain; } else { // Codec implemented in the native part return new Codec[0]; } } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/MediaRtpReceiver.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp; import com.orangelabs.rcs.core.ims.protocol.rtp.codec.Codec; import com.orangelabs.rcs.core.ims.protocol.rtp.format.Format; import com.orangelabs.rcs.core.ims.protocol.rtp.media.MediaOutput; import com.orangelabs.rcs.core.ims.protocol.rtp.stream.MediaRendererStream; import com.orangelabs.rcs.core.ims.protocol.rtp.stream.RtpInputStream; import com.orangelabs.rcs.utils.logger.Logger; /** * Media RTP receiver */ public class MediaRtpReceiver { /** * Media processor */ private Processor processor = null; /** * Local port number (RTP listening port) */ private int localPort; /** * RTP Input Stream */ private RtpInputStream inputStream = null; /** * The logger */ private Logger logger = Logger.getLogger(this.getClass().getName()); /** * Constructor * * @param localPort Local port number */ public MediaRtpReceiver(int localPort) { this.localPort = localPort; } /** * Prepare the RTP session * * @param renderer Media renderer * @param format Media format * @throws RtpException */ public void prepareSession(MediaOutput renderer, Format format) throws RtpException { try { // Create the input stream inputStream = new RtpInputStream(localPort, format); inputStream.open(); if (logger.isActivated()) { logger.debug("Input stream: " + inputStream.getClass().getName()); } // Create the output stream MediaRendererStream outputStream = new MediaRendererStream(renderer); outputStream.open(); if (logger.isActivated()) { logger.debug("Output stream: " + outputStream.getClass().getName()); } // Create the codec chain Codec[] codecChain = MediaRegistry.generateDecodingCodecChain(format.getCodec()); // Create the media processor processor = new Processor(inputStream, outputStream, codecChain); if (logger.isActivated()) { logger.debug("Session has been prepared with success"); } } catch(Exception e) { if (logger.isActivated()) { logger.error("Can't prepare resources correctly", e); } throw new RtpException("Can't prepare resources"); } } /** * Start the RTP session */ public void startSession() { if (logger.isActivated()) { logger.info("Start the session"); } // Start the media processor if (processor != null) { processor.startProcessing(); } } /** * Stop the RTP session */ public void stopSession() { if (logger.isActivated()) { logger.info("Stop the session"); } // Stop the media processor if (processor != null) { processor.stopProcessing(); } } /** * Returns the RTP input stream * * @return RTP input stream */ public RtpInputStream getInputStream() { return inputStream; } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/Processor.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp; import com.orangelabs.rcs.core.ims.protocol.rtp.codec.Codec; import com.orangelabs.rcs.core.ims.protocol.rtp.stream.ProcessorInputStream; import com.orangelabs.rcs.core.ims.protocol.rtp.stream.ProcessorOutputStream; import com.orangelabs.rcs.core.ims.protocol.rtp.util.Buffer; import com.orangelabs.rcs.utils.logger.Logger; /** * Media processor. A processor receives an input stream, use a codec chain * to filter the data before to send it to the output stream. * * @author jexa7410 */ public class Processor extends Thread { /** * Processor input stream */ private ProcessorInputStream inputStream; /** * Processor output stream */ private ProcessorOutputStream outputStream; /** * Codec chain */ private CodecChain codecChain; /** * Processor status flag */ private boolean interrupted = false; /** * bigger Sequence Number */ private long bigSeqNum = 0; /** * The logger */ private Logger logger = Logger.getLogger(this.getClass().getName()); /** * Constructor * * @param inputStream Input stream * @param outputStream Output stream * @param codecs List of codecs */ public Processor(ProcessorInputStream inputStream, ProcessorOutputStream outputStream, Codec[] codecs) { super(); this.inputStream = inputStream; this.outputStream = outputStream; // Create the codec chain codecChain = new CodecChain(codecs, outputStream); if (logger.isActivated()) { logger.debug("Media processor created"); } } /** * Start processing */ public void startProcessing() { if (logger.isActivated()) { logger.debug("Start media processor"); } interrupted = false; bigSeqNum = 0; start(); } /** * Stop processing */ public void stopProcessing() { if (logger.isActivated()) { logger.debug("Stop media processor"); } interrupted = true; // Close streams outputStream.close(); inputStream.close(); } /** * Background processing */ public void run() { try { if (logger.isActivated()) { logger.debug("Processor processing is started"); } // Start processing while (!interrupted) { // Read data from the input stream Buffer inBuffer = inputStream.read(); if (inBuffer == null) { interrupted = true; if (logger.isActivated()) { logger.debug("Processing terminated: null data received"); } break; } // Drop the old packet long seqNum = inBuffer.getSequenceNumber(); if (seqNum + 3 > bigSeqNum) { /* * don't send a packet twice * with in band SPS/PPS parameters this will break processing otherwise */ if (seqNum==bigSeqNum) continue; if (seqNum > bigSeqNum) { bigSeqNum = seqNum; } // Codec chain processing int result = codecChain.process(inBuffer); if ((result != Codec.BUFFER_PROCESSED_OK) && (result != Codec.OUTPUT_BUFFER_NOT_FILLED)) { interrupted = true; if (logger.isActivated()) { logger.error("Codec chain processing error: " + result); } break; } } } } catch (Exception e) { if (!interrupted) { if (logger.isActivated()) { logger.error("Processor error", e); } } else { if (logger.isActivated()) { logger.debug("Processor processing has been terminated"); } } } } /** * Returns the input stream * * @return Stream */ public ProcessorInputStream getInputStream() { return inputStream; } /** * Returns the output stream * * @return Stream */ public ProcessorOutputStream getOutputStream() { return outputStream; } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/RtpException.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp; /** * RTP exception * * @author JM. Auffret */ public class RtpException extends java.lang.Exception { static final long serialVersionUID = 1L; /** * Constructor * * @param error Error message */ public RtpException(String error) { super(error); } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/codec/Codec.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.codec; import com.orangelabs.rcs.core.ims.protocol.rtp.format.Format; import com.orangelabs.rcs.core.ims.protocol.rtp.util.Buffer; /** * Abstract codec * * @author jexa7410 */ public abstract class Codec { /** * The input buffer was converted successfully to output */ public static final int BUFFER_PROCESSED_OK = 0; /** * The input buffer could not be handled */ public static final int BUFFER_PROCESSED_FAILED = 1 << 0; /** * The input buffer chunk was not fully consumed */ public static final int INPUT_BUFFER_NOT_CONSUMED = 1 << 1; /** * The output buffer chunk was not filled */ public static final int OUTPUT_BUFFER_NOT_FILLED = 1 << 2; /** * Input format */ private Format inputFormat; /** * Ouput format */ private Format outputFormat; /** * Set the input format * * @param input Input format * @return New format */ public Format setInputFormat(Format input) { inputFormat = input; return input; } /** * Set the output format * * @param output Output format * @return New format */ public Format setOutputFormat(Format output) { outputFormat = output; return output; } /** * Return the input format * * @return Format */ public Format getInputFormat() { return inputFormat; } /** * Return the output format * * @return Format */ public Format getOutputFormat() { return outputFormat; } /** * Reset the codec */ public void reset() { } /** * Open the codec */ public void open() { } /** * Close the codec */ public void close() { } /** * Test if it's the end of media * * @return Boolean */ protected boolean isEOM(Buffer inputBuffer) { return inputBuffer.isEOM(); } /** * Propagate EOM to the ouput buffer * * @param outputBuffer Ouput buffer */ protected void propagateEOM(Buffer outputBuffer) { updateOutput(outputBuffer, getOutputFormat(), 0, 0); outputBuffer.setEOM(true); } /** * Update the ouput buffer informations * * @param outputBuffer Ouput buffer * @param format Ouput format * @param length Ouput length * @param offset Ouput offset */ protected void updateOutput(Buffer outputBuffer, Format format, int length, int offset) { outputBuffer.setFormat(format); outputBuffer.setLength(length); outputBuffer.setOffset(offset); } /** * Check the input buffer * * @param inputBuffer Input buffer * @return Boolean */ protected boolean checkInputBuffer(Buffer inputBuffer) { boolean fError = !isEOM(inputBuffer) && (inputBuffer == null || inputBuffer.getFormat() == null); return !fError; } /** * Validate that the Buffer's data size is at least newSize * * @return Array with sufficient capacity */ protected byte[] validateByteArraySize(Buffer buffer, int newSize) { byte[] typedArray = (byte[]) buffer.getData(); if (typedArray != null) { if (typedArray.length >= newSize) { return typedArray; } byte[] tempArray = new byte[newSize]; System.arraycopy(typedArray, 0, tempArray, 0, typedArray.length); typedArray = tempArray; } else { typedArray = new byte[newSize]; } buffer.setData(typedArray); return typedArray; } /** * Performs the media processing defined by this codec * * @param input The buffer that contains the media data to be processed * @param output The buffer in which to store the processed media data * @return Processing result */ public abstract int process(Buffer input, Buffer output); } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/codec/video/VideoCodec.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.codec.video; import com.orangelabs.rcs.core.ims.protocol.rtp.codec.Codec; /** * Video codec abstract class * * @author jexa7410 */ public abstract class VideoCodec extends Codec { } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/codec/video/h263/H263Config.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263; /** * Default H263 settings. * * @author hlxn7157 */ public class H263Config { /** * H263 Codec Name */ public final static String CODEC_NAME = "h263-2000"; /** * Default clock rate */ public final static int CLOCK_RATE = 90000; /** * Default codec params */ public final static String CODEC_PARAMS = "profile=0;level=10"; // public final static String CODEC_PARAMS = "profile=0;level=20"; /** * Default video width */ // public final static int VIDEO_WIDTH = 176; public final static int VIDEO_WIDTH = 352; /** * Default video height */ // public final static int VIDEO_HEIGHT = 144; public final static int VIDEO_HEIGHT = 288; /** * Default video frame rate */ public final static int FRAME_RATE = 12; /** * Default video bit rate */ public final static int BIT_RATE = 128000; } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/codec/video/h263/H263RtpHeader.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263; /** * RFC 4629: a special header is added to each H263+ packet that * immediately follows the RTP header: * * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | RR |P|V| PLEN |PEBIT| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ public class H263RtpHeader{ public int HEADER_SIZE = 2; public byte RR; public boolean P; public boolean V; public int PLEN; public int PEBIT; /** * Constructor * * @param RR * @param P * @param V * @param PLEN * @param PEBIT */ public H263RtpHeader(final byte RR, final boolean P, final boolean V, final int PLEN, final int PEBIT){ this.RR = RR; this.P = P; this.V = V; this.PLEN = PLEN; this.PEBIT = PEBIT; } /** * Constructor * * @param data */ public H263RtpHeader(byte[] data){ RR = (byte)(data[0]>>3); P = (data[0]&0x4) != 0; V = (data[0]&0x2) != 0; PLEN = ((data[0]&0x1)<<5)|(data[1]>>3); PEBIT = data[1]&0x7; } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/codec/video/h263/JavaDepacketizer.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263; import com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.VideoCodec; import com.orangelabs.rcs.core.ims.protocol.rtp.format.Format; import com.orangelabs.rcs.core.ims.protocol.rtp.util.Buffer; /** * Reassembles H263+ RTP packets into H263+ frames, as per RFC 4629 * Complete frames are sent to decoder once reassembled */ public class JavaDepacketizer extends VideoCodec { /** * Collection of frameAssemblers. * Allows the construction of several frames if incoming packets are ouf of order */ FrameAssemblerCollection assemblersCollection = new FrameAssemblerCollection(); /** * Max frame size to give for next module, as some decoder have frame size limits */ private static int MAX_H263P_FRAME_SIZE = 8192; /** * Constructor */ public JavaDepacketizer(){ } /** * Performs the media processing defined by this codec * * @param input The buffer that contains the media data to be processed * @param output The buffer in which to store the processed media data * @return Processing result */ public int process(Buffer input, Buffer output){ if (!input.isDiscard()) { assemblersCollection.put(input); if (assemblersCollection.getLastActiveAssembler().complete()){ assemblersCollection.getLastActiveAssembler().copyToBuffer(output); assemblersCollection.removeOldestThan(input.getTimeStamp()); return BUFFER_PROCESSED_OK; }else{ output.setDiscard(true); return OUTPUT_BUFFER_NOT_FILLED; } }else{ output.setDiscard(true); return OUTPUT_BUFFER_NOT_FILLED; } } /** * Used to assemble fragments with the same timestamp into a single frame. */ static class FrameAssembler{ private boolean rtpMarker = false; // have we received the RTP marker that signifies the end of a frame? private byte[] reassembledData = null; private long timeStamp = -1; private Format format = null; /** * Add the buffer (which contains a fragment) to the assembler. */ public void put(Buffer buffer){ // Read rtpMarker rtpMarker = (buffer.isRTPMarkerSet()); if (buffer.getLength() <= 2){ return; // no actual data in buffer, no need to keep. Typically happens when RTP marker is set. } byte[] currentRtpPacketData = ((byte[])buffer.getData()); H263RtpHeader h263PRtpHeader = new H263RtpHeader(currentRtpPacketData); int headerSize = h263PRtpHeader.HEADER_SIZE; if (h263PRtpHeader.V) { // There's an extra VRC byte at the end of the header: ++headerSize; } if (h263PRtpHeader.PLEN > 0) { // There's an extra picture header at the end: headerSize += h263PRtpHeader.PLEN; } if (h263PRtpHeader.P) { // Prepend two zero bytes to the start of the payload proper // Hack: Do this by shrinking header by 2 bytes headerSize -= 2; currentRtpPacketData[headerSize] = 0x00; currentRtpPacketData[headerSize+1] = 0x00; } if (reassembledData == null){ // First packet timeStamp = buffer.getTimeStamp(); format = buffer.getFormat(); // Copy packet data to reassembledData reassembledData = new byte[currentRtpPacketData.length-headerSize]; System.arraycopy(currentRtpPacketData, headerSize, reassembledData, 0, currentRtpPacketData.length-headerSize); } else { // Concatenate new data to reassembledData byte[] data = new byte[reassembledData.length+buffer.getLength()]; System.arraycopy(reassembledData, 0, data, 0, reassembledData.length); System.arraycopy(currentRtpPacketData, headerSize, data, reassembledData.length, buffer.getLength()); // Copy data to reassembledData reassembledData = new byte[data.length]; System.arraycopy(data, 0, reassembledData, 0, data.length); } } /** * Is the frame complete? */ public boolean complete(){ if (!rtpMarker){ return false; // need an rtp marker to signify end } if (reassembledData.length <= 0){ return false; // need data beyond the header } // TODO: if some of the last ones come in after the marker, there will be blank squares in the lower right. return true; } /** * Assumes that complete() has been called and returns true. */ private void copyToBuffer(Buffer bDest){ if (!rtpMarker) throw new IllegalStateException(); if (reassembledData.length <= 0) throw new IllegalStateException(); if (reassembledData.length<=MAX_H263P_FRAME_SIZE){ // If the frame data can be processed by native module, ie reassembled frame size not too big // Set buffer bDest.setData(reassembledData); bDest.setLength(reassembledData.length); bDest.setOffset(0); bDest.setTimeStamp(timeStamp); bDest.setFormat(format); bDest.setFlags(Buffer.FLAG_RTP_MARKER | Buffer.FLAG_RTP_TIME); } // Set reassembledData to null reassembledData = null; } /** * Get timestamp * * @return long */ public long getTimeStamp(){ return timeStamp; } } /** * Used to manage different timestamps, as packets could be coming not in order. * * Data is an array of FrameAssemblers, sorted by timestamps (oldest is first, newest is last) */ static class FrameAssemblerCollection{ final static int NUMBER_OF_ASSEMBLERS = 5; private FrameAssembler[] assemblers = new FrameAssembler[NUMBER_OF_ASSEMBLERS]; private int activeAssembler = 0; private int numberOfAssemblers = 0; /** * Add the buffer (which contains a fragment) to the right assembler. */ public void put(Buffer buffer){ activeAssembler = getAssembler(buffer.getTimeStamp()); assemblers[activeAssembler].put(buffer); } /** * Get the active frame assembler * * @return frameAssembler Last active assembler */ public FrameAssembler getLastActiveAssembler(){ return assemblers[activeAssembler]; } /** * Create a new frame assembler for given timeStamp * * @param timeStamp * @return assembler number Position of the assembler in the collection */ public int createNewAssembler(long timeStamp){ int spot = -1; if (numberOfAssemblers< NUMBER_OF_ASSEMBLERS-1){ // If there's enough space left to create a new assembler // We search its spot for (int i=0;ispot;i--){ assemblers[i] = assemblers[i-1]; } assemblers[spot] = new FrameAssembler(); }else{ // Not enough space, we destroy the oldest assembler for (int i=1;ispot;i--){ assemblers[i-1] = assemblers[i]; } numberOfAssemblers -=spot+1; } } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/codec/video/h263/JavaPacketizer.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263; import com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.VideoCodec; import com.orangelabs.rcs.core.ims.protocol.rtp.util.Buffer; /** * Reassembles H263+ RTP packets into H263+ frames, as per RFC 4629 */ public class JavaPacketizer extends VideoCodec { /** * Because packets can come out of order, it is possible that some packets for a newer frame * may arrive while an older frame is still incomplete. However, in the case where we get nothing * but incomplete frames, we don't want to keep all of them around forever. */ public JavaPacketizer(){ } public int process(Buffer input, Buffer output){ if (!input.isDiscard()) { // Add H263+ RTP header /* 0 1 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | RR |P|V| PLEN |PEBIT| + 2 null bytes +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ---------------------------------------- |0000 0100|0000 0000|0000 0000|0000 0000| ---------------------------------------- Only bit set is P = 1 */ byte h263header[] = new byte[2]; h263header[0]= 0x04; h263header[1]= 0x00; byte[] bufferData = (byte[])input.getData(); byte data[] = new byte[bufferData.length+h263header.length]; // write h263 payload System.arraycopy(h263header, 0, data, 0, h263header.length); // Write data System.arraycopy(bufferData, 0, data, h263header.length, bufferData.length); if (data.length > 0){ // Copy to buffer output.setFormat(input.getFormat()); output.setData(data); output.setLength(data.length); output.setOffset(0); output.setTimeStamp(input.getTimeStamp()); output.setFlags(Buffer.FLAG_RTP_MARKER | Buffer.FLAG_RTP_TIME); } return BUFFER_PROCESSED_OK; }else{ output.setDiscard(true); return OUTPUT_BUFFER_NOT_FILLED; } } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/codec/video/h263/decoder/NativeH263Decoder.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263.decoder; // Referenced classes of package com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263.decoder: // VideoSample public class NativeH263Decoder { public NativeH263Decoder() { } public static native int InitDecoder(int i, int j); public static native int DeinitDecoder(); public static native int DecodeAndConvert(byte abyte0[], int ai[], long l); public static native int InitParser(String s); public static native int DeinitParser(); public static native int getVideoLength(); public static native int getVideoWidth(); public static native int getVideoHeight(); public static native String getVideoCoding(); public static native VideoSample getVideoSample(int ai[]); static { String libname = "H263Decoder"; try { System.loadLibrary(libname); } catch(Exception exception) { } } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/codec/video/h263/decoder/VideoSample.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263.decoder; public class VideoSample { public byte data[]; public int timestamp; public VideoSample(byte data[], int timestamp) { this.data = null; this.timestamp = 0; this.data = data; this.timestamp = timestamp; } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/codec/video/h263/encoder/NativeH263Encoder.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263.encoder; // Referenced classes of package com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263.encoder: // NativeH263EncoderParams public class NativeH263Encoder { public NativeH263Encoder() { } public static native int InitEncoder(NativeH263EncoderParams nativeh263encoderparams); public static native byte[] EncodeFrame(byte abyte0[], long l); public static native int DeinitEncoder(); static { String libname = "H263Encoder"; try { System.loadLibrary(libname); } catch(UnsatisfiedLinkError unsatisfiedlinkerror) { } } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/codec/video/h263/encoder/NativeH263EncoderParams.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263.encoder; public class NativeH263EncoderParams { public static final int SIMPLE_PROFILE_LEVEL0 = 0; public static final int SIMPLE_PROFILE_LEVEL1 = 1; public static final int SIMPLE_PROFILE_LEVEL2 = 2; public static final int SIMPLE_PROFILE_LEVEL3 = 3; public static final int CORE_PROFILE_LEVEL1 = 4; public static final int CORE_PROFILE_LEVEL2 = 5; public static final int SIMPLE_SCALABLE_PROFILE_LEVEL0 = 6; public static final int SIMPLE_SCALABLE_PROFILE_LEVEL1 = 7; public static final int SIMPLE_SCALABLE_PROFILE_LEVEL2 = 8; public static final int CORE_SCALABLE_PROFILE_LEVEL1 = 10; public static final int CORE_SCALABLE_PROFILE_LEVEL2 = 11; public static final int CORE_SCALABLE_PROFILE_LEVEL3 = 12; public static final int SHORT_HEADER = 0; public static final int SHORT_HEADER_WITH_ERR_RES = 1; public static final int H263_MODE = 2; public static final int H263_MODE_WITH_ERR_RES = 3; public static final int DATA_PARTITIONING_MODE = 4; public static final int COMBINE_MODE_NO_ERR_RES = 5; public static final int COMBINE_MODE_WITH_ERR_RES = 6; public static final int CONSTANT_Q = 0; public static final int CBR_1 = 1; public static final int VBR_1 = 2; public static final int CBR_2 = 3; public static final int VBR_2 = 4; public static final int CBR_LOWDELAY = 5; private int encMode; private int packetSize; private int profile_level; private boolean rvlcEnable; private int gobHeaderInterval; private int numLayers; private int timeIncRes; private int tickPerSrc; private int encHeight; private int encWidth; private float encFrameRate; private int bitRate; private int iQuant; private int pQuant; private int quantType; private int rcType; private float vbvDelay; private boolean noFrameSkipped; private int intraPeriod; private int numIntraMB; private boolean sceneDetect; private int searchRange; private boolean mv8x8Enable; private int intraDCVlcTh; private boolean useACPred; public NativeH263EncoderParams() { encMode = 2; packetSize = 1024; profile_level = 3; rvlcEnable = false; gobHeaderInterval = 0; numLayers = 1; timeIncRes = 1000; tickPerSrc = 125; encHeight = 144; encWidth = 176; encFrameRate = 8F; bitRate = 64000; iQuant = 15; pQuant = 12; quantType = 0; rcType = 1; vbvDelay = 1.5F; noFrameSkipped = false; intraPeriod = -1; numIntraMB = 0; sceneDetect = false; searchRange = 4; mv8x8Enable = true; intraDCVlcTh = 0; useACPred = false; } public int getEncMode() { return encMode; } public int getPacketSize() { return packetSize; } public int getProfile_level() { return profile_level; } public boolean isRvlcEnable() { return rvlcEnable; } public int getGobHeaderInterval() { return gobHeaderInterval; } public int getNumLayers() { return numLayers; } public int getTimeIncRes() { return timeIncRes; } public int getTickPerSrc() { return tickPerSrc; } public int getEncHeight() { return encHeight; } public int getEncWidth() { return encWidth; } public float getEncFrameRate() { return encFrameRate; } public int getBitRate() { return bitRate; } public int getIQuant() { return iQuant; } public int getPQuant() { return pQuant; } public int getQuantType() { return quantType; } public int getRcType() { return rcType; } public boolean isNoFrameSkipped() { return noFrameSkipped; } public int getIntraPeriod() { return intraPeriod; } public int getNumIntraMB() { return numIntraMB; } public boolean isSceneDetect() { return sceneDetect; } public int getSearchRange() { return searchRange; } public boolean isMv8x8Enable() { return mv8x8Enable; } public int getIntraDCVlcTh() { return intraDCVlcTh; } public boolean isUseACPred() { return useACPred; } public void setEncMode(int encMode) { this.encMode = encMode; } public void setPacketSize(int packetSize) { this.packetSize = packetSize; } public void setProfile_level(int profile_level) { this.profile_level = profile_level; } public void setRvlcEnable(boolean rvlcEnable) { this.rvlcEnable = rvlcEnable; } public void setGobHeaderInterval(int gobHeaderInterval) { this.gobHeaderInterval = gobHeaderInterval; } public void setNumLayers(int numLayers) { this.numLayers = numLayers; } public void setTimeIncRes(int timeIncRes) { this.timeIncRes = timeIncRes; } public void setTickPerSrc(int tickPerSrc) { this.tickPerSrc = tickPerSrc; } public void setEncHeight(int encHeight) { this.encHeight = encHeight; } public void setEncWidth(int encWidth) { this.encWidth = encWidth; } public void setEncFrameRate(float encFrameRate) { this.encFrameRate = encFrameRate; } public void setBitRate(int bitRate) { this.bitRate = bitRate; } public void setIQuant(int quant) { iQuant = quant; } public void setPQuant(int quant) { pQuant = quant; } public void setQuantType(int quantType) { this.quantType = quantType; } public void setRcType(int rcType) { this.rcType = rcType; } public void setNoFrameSkipped(boolean noFrameSkipped) { this.noFrameSkipped = noFrameSkipped; } public void setIntraPeriod(int intraPeriod) { this.intraPeriod = intraPeriod; } public void setNumIntraMB(int numIntraMB) { this.numIntraMB = numIntraMB; } public void setSceneDetect(boolean sceneDetect) { this.sceneDetect = sceneDetect; } public void setSearchRange(int searchRange) { this.searchRange = searchRange; } public void setMv8x8Enable(boolean mv8x8Enable) { this.mv8x8Enable = mv8x8Enable; } public void setIntraDCVlcTh(int intraDCVlcTh) { this.intraDCVlcTh = intraDCVlcTh; } public void setUseACPred(boolean useACPred) { this.useACPred = useACPred; } public float getVbvDelay() { return vbvDelay; } public void setVbvDelay(float vbvDelay) { this.vbvDelay = vbvDelay; } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/codec/video/h264/H264Config.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h264; /** * Default H264 Settings * * @author hlxn7157 */ public class H264Config { /** * H264 Codec Name */ public final static String CODEC_NAME = "h264"; /** * Default clock rate */ public final static int CLOCK_RATE = 90000; /** * Default codec params */ public final static String CODEC_PARAMS = "profile-level-id=42900B"; /** * Default video width */ // public final static int VIDEO_WIDTH = 176; public final static int VIDEO_WIDTH = 352; /** * Default video height */ // public final static int VIDEO_HEIGHT = 144; public final static int VIDEO_HEIGHT = 288; /** * Default video frame rate */ public final static int FRAME_RATE = 15; /** * Default video bit rate */ // public final static int BIT_RATE = 64000; public final static int BIT_RATE = 384000; } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/codec/video/h264/decoder/NativeH264Decoder.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h264.decoder; import com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263.decoder.VideoSample; public class NativeH264Decoder { public NativeH264Decoder() { } public static native int InitDecoder(); public static native int DeinitDecoder(); public static synchronized native int DecodeAndConvert(byte abyte0[], int ai[]); public static native int InitParser(String s); public static native int DeinitParser(); public static native int getVideoLength(); public static native int getVideoWidth(); public static native int getVideoHeight(); public static native String getVideoCoding(); public static native VideoSample getVideoSample(int ai[]); static { String libname = "H264Decoder"; try { System.loadLibrary(libname); } catch(Exception exception) { } } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/codec/video/h264/encoder/NativeH264Encoder.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h264.encoder; public class NativeH264Encoder { public NativeH264Encoder() { } public static native int InitEncoder(int i, int j, int k); public static native byte[] EncodeFrame(byte abyte0[], long l); public static native int DeinitEncoder(); public static native int getLastEncodeStatus(); static { String libname = "H264Encoder"; try { System.loadLibrary(libname); } catch(UnsatisfiedLinkError unsatisfiedlinkerror) { } } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpAppPacket.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.core; import java.io.DataOutputStream; import java.io.IOException; /** * RTCP APP packet * * @author jexa7410 */ public class RtcpAppPacket extends RtcpPacket { public int ssrc; public int name; public int subtype; public RtcpAppPacket(RtcpPacket parent) { super(parent); super.type = 204; } public RtcpAppPacket(int ssrc, int name, int subtype, byte data[]) { this.ssrc = ssrc; this.name = name; this.subtype = subtype; this.data = data; super.type = 204; if ((data.length & 3) != 0) { throw new IllegalArgumentException("Bad data length"); } if (subtype < 0 || subtype > 31) { throw new IllegalArgumentException("Bad subtype"); } else { return; } } public int calcLength() { return 12 + data.length; } public void assemble(DataOutputStream out) throws IOException { out.writeByte(128 + subtype); out.writeByte(204); out.writeShort(2 + (data.length >> 2)); out.writeInt(ssrc); out.writeInt(name); out.write(data); } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpByePacket.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.core; import java.io.DataOutputStream; import java.io.IOException; /** * RTCP BYE packet * * @author jexa7410 */ public class RtcpByePacket extends RtcpPacket { public int ssrc[]; public byte reason[]; public RtcpByePacket(RtcpPacket parent) { super(parent); super.type = 203; } public RtcpByePacket(int ssrc[], byte reason[]) { this.ssrc = ssrc; if (reason != null) { this.reason = reason; } else { this.reason = new byte[0]; } if (ssrc.length > 31) { throw new IllegalArgumentException("Too many SSRCs"); } else { return; } } public int calcLength() { return 4 + (ssrc.length << 2) + (reason.length <= 0 ? 0 : reason.length + 4 & -4); } public void assemble(DataOutputStream out) throws IOException { out.writeByte(128 + ssrc.length); out.writeByte(203); out.writeShort(ssrc.length + (reason.length <= 0 ? 0 : reason.length + 4 >> 2)); for (int i = 0; i < ssrc.length; i++) { out.writeInt(ssrc[i]); } if (reason.length > 0) { out.writeByte(reason.length); out.write(reason); for (int i = (reason.length + 4 & -4) - reason.length - 1; i > 0; i--) { out.writeByte(0); } } } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpCompoundPacket.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.core; import java.io.ByteArrayOutputStream; import java.io.DataOutputStream; import java.io.IOException; import com.orangelabs.rcs.core.ims.protocol.rtp.util.Packet; /** * RTCP compound packet * * @author jexa7410 */ public class RtcpCompoundPacket extends RtcpPacket { public RtcpPacket[] packets; public RtcpCompoundPacket(Packet packet) { super(packet); type = -1; } public RtcpCompoundPacket(RtcpPacket[] rtcppackets) { packets = rtcppackets; type = -1; } public void assemble(int i, boolean bool) { length = i; offset = 0; ByteArrayOutputStream bytearrayoutputstream = new ByteArrayOutputStream( i); DataOutputStream dataoutputstream = new DataOutputStream( bytearrayoutputstream); int i_0_; try { if (bool) offset += 4; i_0_ = offset; for (int i_1_ = 0; i_1_ < packets.length; i_1_++) { i_0_ = bytearrayoutputstream.size(); packets[i_1_].assemble(dataoutputstream); } } catch (IOException ioexception) { throw new NullPointerException("Impossible IO Exception"); } int i_2_ = bytearrayoutputstream.size(); data = bytearrayoutputstream.toByteArray(); if (i_2_ > i) throw new NullPointerException("RTCP Packet overflow"); if (i_2_ < i) { if (data.length < i) System.arraycopy(data, 0, data = new byte[i], 0, i_2_); data[i_0_] |= 0x20; data[i - 1] = (byte) (i - i_2_); int i_3_ = (data[i_0_ + 3] & 0xff) + (i - i_2_ >> 2); if (i_3_ >= 256) data[i_0_ + 2] += i - i_2_ >> 10; data[i_0_ + 3] = (byte) i_3_; } } public void assemble(DataOutputStream dataoutputstream) throws IOException { throw new IllegalArgumentException("Recursive Compound Packet"); } public int calcLength() { int i = 0; if (packets == null || packets.length < 1) throw new IllegalArgumentException("Bad RTCP Compound Packet"); for (int i_4_ = 0; i_4_ < packets.length; i_4_++) i += packets[i_4_].calcLength(); return i; } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpPacket.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.core; import com.orangelabs.rcs.core.ims.protocol.rtp.util.Packet; import java.io.DataOutputStream; import java.io.IOException; /** * Abstract RCTP packet * * @author jexa7410 */ public abstract class RtcpPacket extends Packet { /** * Version =2 */ public static final byte VERSION = 2; /** * Padding =0 */ public static final byte PADDING = 0; /** * RTCP SR */ public static final int RTCP_SR = 200; /** * RTCP RR */ public static final int RTCP_RR = 201; /** * RTCP SDES */ public static final int RTCP_SDES = 202; /** * RTCP BYE */ public static final int RTCP_BYE = 203; /** * RTCP APP */ public static final int RTCP_APP = 204; /** * RTCP APP */ public static final int RTCP_COMPOUND = -1; public Packet base; public int type; public RtcpPacket() { } public RtcpPacket(RtcpPacket rtcppacket) { super((Packet)rtcppacket); base = rtcppacket.base; } public RtcpPacket(Packet packet) { super(packet); base = packet; } public abstract void assemble(DataOutputStream dataoutputstream) throws IOException; public abstract int calcLength(); } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpPacketReceiver.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.core; import com.orangelabs.rcs.core.ims.protocol.rtp.event.RtcpApplicationEvent; import com.orangelabs.rcs.core.ims.protocol.rtp.event.RtcpByeEvent; import com.orangelabs.rcs.core.ims.protocol.rtp.event.RtcpEvent; import com.orangelabs.rcs.core.ims.protocol.rtp.event.RtcpEventListener; import com.orangelabs.rcs.core.ims.protocol.rtp.event.RtcpReceiverReportEvent; import com.orangelabs.rcs.core.ims.protocol.rtp.event.RtcpSdesEvent; import com.orangelabs.rcs.core.ims.protocol.rtp.event.RtcpSenderReportEvent; import com.orangelabs.rcs.core.ims.protocol.rtp.util.Packet; import com.orangelabs.rcs.platform.network.DatagramConnection; import com.orangelabs.rcs.platform.network.NetworkFactory; import com.orangelabs.rcs.utils.logger.Logger; import java.io.ByteArrayInputStream; import java.io.DataInputStream; import java.io.IOException; import java.util.Vector; /** * RTCP packet receiver * * @author jexa7410 */ public class RtcpPacketReceiver extends Thread { /** * Datagram connection */ public DatagramConnection datagramConnection = null; /** * Statistics */ private RtcpStatisticsReceiver stats = new RtcpStatisticsReceiver(); /** * RTCP event listeners */ private Vector listeners = new Vector(); /** * RTCP Session */ private RtcpSession rtcpSession = null; /** * The logger */ private final Logger logger = Logger.getLogger(this.getClass().getName()); /** * Constructor * * @param port Listenning port * @param rtcpSession the RTCP session * @throws IOException */ public RtcpPacketReceiver(int port, RtcpSession rtcpSession) throws IOException { super(); this.rtcpSession = rtcpSession; // Create the UDP server datagramConnection = NetworkFactory.getFactory().createDatagramConnection(); datagramConnection.open(port); if (logger.isActivated()) { logger.debug("RTCP receiver created at port " + port); } } /** * Close the receiver * * @throws IOException */ public void close() throws IOException { // Interrup the current thread processing try { interrupt(); } catch(Exception e) {} // Close the datagram connection if (datagramConnection != null) { datagramConnection.close(); datagramConnection = null; } } /** * Background processing */ public void run() { try { while (datagramConnection != null) { // Wait a packet byte[] data = datagramConnection.receive(); // Create a packet object Packet packet = new Packet(); packet.data = data; packet.length = data.length; packet.offset = 0; packet.receivedAt = System.currentTimeMillis(); // Process the received packet handlePacket(packet); } } catch (Exception e) { if (logger.isActivated()) { logger.error("Datagram socket server failed", e); } } } /** * Handle the received packet * * @param packet Packet * @return RTCP packet */ public RtcpPacket handlePacket(Packet p) { // Update statistics stats.numRtcpPkts++; stats.numRtcpBytes += p.length; // Parse the RTCP packet RtcpPacket result; try { result = parseRtcpPacket(p); } catch (Exception e) { stats.numBadRtcpPkts++; return null; } return result; } /** * Parse the RTCP packet * * @param packet RTCP packet not yet parsed * @return RTCP packet */ public RtcpPacket parseRtcpPacket(Packet packet) { RtcpCompoundPacket compoundPacket = new RtcpCompoundPacket(packet); Vector subpackets = new Vector(); DataInputStream in = new DataInputStream( new ByteArrayInputStream(compoundPacket.data, compoundPacket.offset, compoundPacket.length)); try { rtcpSession.updateavgrtcpsize(compoundPacket.length); int length = 0; for (int offset = 0; offset < compoundPacket.length; offset += length) { // Read first byte int firstbyte = in.readUnsignedByte(); if ((firstbyte & 0xc0) != 128) { if (logger.isActivated()) { logger.error("Bad RTCP packet version"); } return null; } // Read type of subpacket int type = in.readUnsignedByte(); // Read length of subpacket length = in.readUnsignedShort(); length = length + 1 << 2; int padlen = 0; if (offset + length > compoundPacket.length) { if (logger.isActivated()) { logger.error("Bad RTCP packet length"); } return null; } if (offset + length == compoundPacket.length) { if ((firstbyte & 0x20) != 0) { padlen = compoundPacket.data[compoundPacket.offset + compoundPacket.length - 1] & 0xff; if (padlen == 0) { if (logger.isActivated()) { logger.error("Bad RTCP packet format"); } return null; } } } else if ((firstbyte & 0x20) != 0) { if (logger.isActivated()) { logger.error("Bad RTCP packet format (P != 0)"); } return null; } int inlength = length - padlen; firstbyte &= 0x1f; // Parse subpacket RtcpPacket subpacket; switch (type) { // RTCP SR event case RtcpPacket.RTCP_SR: stats.numSrPkts++; if (inlength != 28 + 24 * firstbyte) { stats.numMalformedRtcpPkts++; if (logger.isActivated()) { logger.error("Bad RTCP SR packet format"); } return null; } RtcpSenderReportPacket srp = new RtcpSenderReportPacket(compoundPacket); subpacket = srp; srp.ssrc = in.readInt(); srp.ntptimestampmsw = (long) in.readInt() & 0xffffffffL; srp.ntptimestamplsw = (long) in.readInt() & 0xffffffffL; srp.rtptimestamp = (long) in.readInt() & 0xffffffffL; srp.packetcount = (long) in.readInt() & 0xffffffffL; srp.octetcount = (long) in.readInt() & 0xffffffffL; srp.reports = new RtcpReport[firstbyte]; RtpSource sourceSR = rtcpSession.getMySource(); if (sourceSR != null) sourceSR.timeOfLastRTCPArrival = rtcpSession.currentTime(); for (int i = 0; i < srp.reports.length; i++) { RtcpReport report = new RtcpReport(); srp.reports[i] = report; report.ssrc = in.readInt(); long val = in.readInt(); val &= 0xffffffffL; report.fractionlost = (int) (val >> 24); report.packetslost = (int) (val & 0xffffffL); report.lastseq = (long) in.readInt() & 0xffffffffL; report.jitter = in.readInt(); report.lsr = (long) in.readInt() & 0xffffffffL; report.dlsr = (long) in.readInt() & 0xffffffffL; } // Notify event listeners notifyRtcpListeners(new RtcpSenderReportEvent(srp)); break; // RTCP RR event case RtcpPacket.RTCP_RR: if (inlength != 8 + 24 * firstbyte) { stats.numMalformedRtcpPkts++; if (logger.isActivated()) { logger.error("Bad RTCP RR packet format"); } return null; } RtcpReceiverReportPacket rrp = new RtcpReceiverReportPacket(compoundPacket); subpacket = rrp; rrp.ssrc = in.readInt(); rrp.reports = new RtcpReport[firstbyte]; RtpSource sourceRR = rtcpSession.getMySource(); if (sourceRR != null) sourceRR.timeOfLastRTCPArrival = rtcpSession.currentTime(); for (int i = 0; i < rrp.reports.length; i++) { RtcpReport report = new RtcpReport(); rrp.reports[i] = report; report.ssrc = in.readInt(); long val = in.readInt(); val &= 0xffffffffL; report.fractionlost = (int) (val >> 24); report.packetslost = (int) (val & 0xffffffL); report.lastseq = (long) in.readInt() & 0xffffffffL; report.jitter = in.readInt(); report.lsr = (long) in.readInt() & 0xffffffffL; report.dlsr = (long) in.readInt() & 0xffffffffL; } // Notify event listeners notifyRtcpListeners(new RtcpReceiverReportEvent(rrp)); break; // RTCP SDES event case RtcpPacket.RTCP_SDES: RtcpSdesPacket sdesp = new RtcpSdesPacket(compoundPacket); subpacket = sdesp; sdesp.sdes = new RtcpSdesBlock[firstbyte]; int sdesoff = 4; for (int i = 0; i < sdesp.sdes.length; i++) { RtcpSdesBlock chunk = new RtcpSdesBlock(); sdesp.sdes[i] = chunk; chunk.ssrc = in.readInt(); sdesoff += 5; Vector items = new Vector(); boolean gotcname = false; int j; while ((j = in.readUnsignedByte()) != 0) { if (j < 1 || j > 8) { stats.numMalformedRtcpPkts++; if (logger.isActivated()) { logger.error("Bad RTCP SDES packet format"); } return null; } if (j == 1) { gotcname = true; } RtcpSdesItem item = new RtcpSdesItem(); items.addElement(item); item.type = j; int sdeslen = in.readUnsignedByte(); item.data = new byte[sdeslen]; in.readFully(item.data); sdesoff += 2 + sdeslen; } if (!gotcname) { stats.numMalformedRtcpPkts++; if (logger.isActivated()) { logger.error("Bad RTCP SDES packet format"); } return null; } chunk.items = new RtcpSdesItem[items.size()]; items.copyInto(chunk.items); if ((sdesoff & 3) != 0) { if (in.skip(4 - (sdesoff & 3)) != 4 - (sdesoff & 3)) { if (logger.isActivated()) { logger.error("Bad RTCP SDES packet format"); } return null; } sdesoff = sdesoff + 3 & -4; } } if (inlength != sdesoff) { stats.numMalformedRtcpPkts++; if (logger.isActivated()) { logger.error("Bad RTCP SDES packet format"); } return null; } // Notify event listeners notifyRtcpListeners(new RtcpSdesEvent(sdesp)); break; // RTCP BYE event case RtcpPacket.RTCP_BYE: RtcpByePacket byep = new RtcpByePacket(compoundPacket); subpacket = byep; byep.ssrc = new int[firstbyte]; for (int i = 0; i < byep.ssrc.length; i++) { byep.ssrc[i] = in.readInt(); } int reasonlen; if (inlength > 4 + 4 * firstbyte) { reasonlen = in.readUnsignedByte(); byep.reason = new byte[reasonlen]; reasonlen++; } else { reasonlen = 0; byep.reason = new byte[0]; } reasonlen = reasonlen + 3 & -4; if (inlength != 4 + 4 * firstbyte + reasonlen) { stats.numMalformedRtcpPkts++; if (logger.isActivated()) { logger.error("Bad RTCP BYE packet format"); } return null; } in.readFully(byep.reason); int skipBye = reasonlen - byep.reason.length; if (in.skip(skipBye) != skipBye) { if (logger.isActivated()) { logger.error("Bad RTCP BYE packet format"); } return null; } // Notify event listeners notifyRtcpListeners(new RtcpByeEvent(byep)); break; // RTCP APP event case RtcpPacket.RTCP_APP: if (inlength < 12) { if (logger.isActivated()) { logger.error("Bad RTCP APP packet format"); } return null; } RtcpAppPacket appp = new RtcpAppPacket(compoundPacket); subpacket = appp; appp.ssrc = in.readInt(); appp.name = in.readInt(); appp.subtype = firstbyte; appp.data = new byte[inlength - 12]; in.readFully(appp.data); int skipApp = inlength - 12 - appp.data.length; if (in.skip(skipApp) != skipApp) { if (logger.isActivated()) { logger.error("Bad RTCP APP packet format"); } return null; } // Notify event listeners notifyRtcpListeners(new RtcpApplicationEvent(appp)); break; // RTCP unknown event default: stats.numUnknownTypes++; if (logger.isActivated()) { logger.error("Bad RTCP packet format"); } return null; } subpacket.offset = offset; subpacket.length = length; subpackets.addElement(subpacket); if (in.skipBytes(padlen) != padlen) { if (logger.isActivated()) { logger.error("Bad RTCP packet format"); } return null; } } } catch (Exception e) { if (logger.isActivated()) { logger.error("RTCP packet parsing error", e); } return null; } compoundPacket.packets = new RtcpPacket[subpackets.size()]; subpackets.copyInto(compoundPacket.packets); return compoundPacket; } /** * Add a RTCP event listener * * @param listener Listener */ public void addRtcpListener(RtcpEventListener listener) { if (logger.isActivated()) { logger.debug("Add a RTCP event listener"); } listeners.addElement(listener); } /** * Remove a RTCP event listener * * @param listener Listener */ public void removeRtcpListener(RtcpEventListener listener) { if (logger.isActivated()) { logger.debug("Remove a RTCP event listener"); } listeners.removeElement(listener); } /** * Notify RTCP event listeners * * @param event RTCP event */ public void notifyRtcpListeners(RtcpEvent event) { for(int i=0; i < listeners.size(); i++) { RtcpEventListener listener = (RtcpEventListener)listeners.elementAt(i); listener.receiveRtcpEvent(event); } } /** * Returns the statistics of RTCP reception * * @return Statistics */ public RtcpStatisticsReceiver getRtcpReceptionStats() { return stats; } /** * Returns the DatagramConnection of RTCP * * @return DatagramConnection */ public DatagramConnection getConnection() { return datagramConnection; } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpPacketTransmitter.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.core; import com.orangelabs.rcs.platform.network.DatagramConnection; import com.orangelabs.rcs.platform.network.NetworkFactory; import com.orangelabs.rcs.utils.logger.Logger; import java.io.IOException; import java.util.Random; import java.util.Vector; /** * RTCP packet transmitter * * @author jexa7410 */ public class RtcpPacketTransmitter extends Thread { /** * Remote address */ private String remoteAddress; /** * Remote port */ private int remotePort; /** * Statistics */ private RtcpStatisticsTransmitter stats = new RtcpStatisticsTransmitter(); /** * Datagram connection */ public DatagramConnection datagramConnection = null; /** * RTCP Session */ private RtcpSession rtcpSession = null; /** * Flag used to determine when to terminate after sending a BYE */ private boolean waitingForByeBackoff = false; /** * Flag used to properly close */ private boolean closed = false; /** * Random value */ private Random rand = new Random(); /** * The logger */ private final Logger logger = Logger.getLogger(this.getClass().getName()); /** * Constructor * * @param address Remote address * @param port Remote port * @param rtcpSession the RTCP session * @throws IOException */ public RtcpPacketTransmitter(String address, int port, RtcpSession rtcpSession) throws IOException { super(); this.remoteAddress = address; this.remotePort = port; this.rtcpSession = rtcpSession; // Open the connection datagramConnection = NetworkFactory.getFactory().createDatagramConnection(); datagramConnection.open(); if (logger.isActivated()) { logger.debug("RTCP transmitter connected to " + remoteAddress + ":" + remotePort); } } /** * Constructor - used for SYMETRIC_RTP * * @param address Remote address * @param port Remote port * @param rtcpSession the RTCP session * @param DatagramConnection datagram connection of the RtpPacketReceiver * @throws IOException */ public RtcpPacketTransmitter(String address, int port, RtcpSession rtcpSession, DatagramConnection connection) throws IOException { super(); this.remoteAddress = address; this.remotePort = port; this.rtcpSession = rtcpSession; // Open the connection if (connection != null) { this.datagramConnection = connection; } else { this.datagramConnection = NetworkFactory.getFactory().createDatagramConnection(); this.datagramConnection.open(); } if (logger.isActivated()) { logger.debug("RTCP transmitter connected to " + remoteAddress + ":" + remotePort); } } /** * Close the transmitter * * @throws IOException */ public void close() throws IOException { rtcpSession.isByeRequested = true; closed = true; // Close the datagram connection if (datagramConnection != null) { datagramConnection.close(); } if (logger.isActivated()) { logger.debug("RTCP transmitter closed"); } } /** * Background processing */ public void run() { try { // Send a SDES packet // sendSdesPacket(); boolean terminate = false; while (!terminate) { try { // Wait the RTCP report interval. Thread.sleep((long)rtcpSession.getReportInterval()); // Right time to send a RTCP packet or reschedule ? if ((rtcpSession.timeOfLastRTCPSent + rtcpSession.T) <= rtcpSession .currentTime()) { // We know that it is time to send a RTCP packet, is it // a BYE packet if ((rtcpSession.isByeRequested && waitingForByeBackoff)) { // If it is bye then did we ever sent anything if (rtcpSession.timeOfLastRTCPSent > 0 && rtcpSession.timeOfLastRTPSent > 0) { rtcpSession.getMySource().activeSender = false; rtcpSession.timeOfLastRTCPSent = rtcpSession.currentTime(); } else { // We never sent anything and we have to quit :( // do not send BYE terminate = true; } } else { if (!closed) { transmit(assembleRtcpPacket()); if (rtcpSession.isByeRequested && !waitingForByeBackoff) { // We have sent a BYE packet, so terminate terminate = true; } else { rtcpSession.timeOfLastRTCPSent = rtcpSession.currentTime(); } } else { terminate = true; } } } waitingForByeBackoff = false; } catch (InterruptedException e) { waitingForByeBackoff = true; rtcpSession.isByeRequested = true; } } } catch (Exception e) { if (logger.isActivated()) { logger.error("Can't send the RTCP packet", e); } } } /** * assemble RTCP packet */ private byte[] assembleRtcpPacket() { byte data[] = new byte[0]; // Sender or receiver packet RtpSource s = rtcpSession.getMySource(); if ((s.activeSender) && (rtcpSession.timeOfLastRTCPSent < rtcpSession.timeOfLastRTPSent)) { data = RtcpPacketUtils.append(data, assembleSenderReportPacket()); } else { data = RtcpPacketUtils.append(data, assembleReceiverReportPacket()); } // SDES packets Vector repvec = makereports(); for (int i = 0; i < repvec.size(); i++) { if (repvec.elementAt(i).data != null) data = RtcpPacketUtils.append(data, repvec.elementAt(i).data); } // BYE packet RtcpByePacket byepacket = null; if (rtcpSession.isByeRequested) { int ssrc[] = {rtcpSession.SSRC}; byepacket = new RtcpByePacket(ssrc, null); data = RtcpPacketUtils.append(data, byepacket.data); } return data; } /** * assemble RTCP SR packet * @return packet data */ private byte[] assembleSenderReportPacket() { final int FIXED_HEADER_SIZE = 4; byte V_P_RC = (byte)((RtcpPacket.VERSION << 6) | (RtcpPacket.PADDING << 5) | (0x00)); byte ss[] = RtcpPacketUtils.longToBytes(rtcpSession.SSRC, 4); byte PT[] = RtcpPacketUtils.longToBytes((long)RtcpPacket.RTCP_SR, 1); byte NTP_TimeStamp[] = RtcpPacketUtils.longToBytes(rtcpSession.currentTime(), 8); short randomOffset = (short)Math.abs(rand.nextInt() & 0x000000FF); byte RTP_TimeStamp[] = RtcpPacketUtils.longToBytes((long)rtcpSession.tc + randomOffset, 4); byte SenderPacketCount[] = RtcpPacketUtils.longToBytes(rtcpSession.packetCount, 4); byte SenderOctetCount[] = RtcpPacketUtils.longToBytes(rtcpSession.octetCount, 4); // report block byte receptionReportBlocks[] = new byte[0]; receptionReportBlocks = RtcpPacketUtils.append(receptionReportBlocks, assembleRTCPReceptionReport()); byte receptionReports = (byte)(receptionReportBlocks.length / 24); V_P_RC = (byte)(V_P_RC | (byte)(receptionReports & 0x1F)); // Length is 32 bit words contained in the packet -1 byte length[] = RtcpPacketUtils.longToBytes((FIXED_HEADER_SIZE + ss.length + NTP_TimeStamp.length + RTP_TimeStamp.length + SenderPacketCount.length + SenderOctetCount.length + receptionReportBlocks.length) / 4 - 1, 2); // Build RTCP SR Packet byte rtcpSRPacket[] = new byte[1]; rtcpSRPacket[0] = V_P_RC; rtcpSRPacket = RtcpPacketUtils.append(rtcpSRPacket, PT); rtcpSRPacket = RtcpPacketUtils.append(rtcpSRPacket, length); rtcpSRPacket = RtcpPacketUtils.append(rtcpSRPacket, ss); rtcpSRPacket = RtcpPacketUtils.append(rtcpSRPacket, NTP_TimeStamp); rtcpSRPacket = RtcpPacketUtils.append(rtcpSRPacket, RTP_TimeStamp); rtcpSRPacket = RtcpPacketUtils.append(rtcpSRPacket, SenderPacketCount); rtcpSRPacket = RtcpPacketUtils.append(rtcpSRPacket, SenderOctetCount); rtcpSRPacket = RtcpPacketUtils.append(rtcpSRPacket, receptionReportBlocks); return rtcpSRPacket; } /** * assemble RTCP RR packet * @return packet data */ private byte[] assembleReceiverReportPacket() { final int FIXED_HEADER_SIZE = 4; byte V_P_RC = (byte)((RtcpPacket.VERSION << 6) | (RtcpPacket.PADDING << 5) | (0x00)); byte ss[] = RtcpPacketUtils.longToBytes(rtcpSession.SSRC, 4); byte PT[] = RtcpPacketUtils.longToBytes((long)RtcpPacket.RTCP_RR, 1); // report block byte receptionReportBlocks[] = new byte[0]; receptionReportBlocks = RtcpPacketUtils.append(receptionReportBlocks, assembleRTCPReceptionReport()); byte receptionReports = (byte)(receptionReportBlocks.length / 24); V_P_RC = (byte)(V_P_RC | (byte)(receptionReports & 0x1F)); byte length[] = RtcpPacketUtils.longToBytes( (FIXED_HEADER_SIZE + ss.length + receptionReportBlocks.length) / 4 - 1, 2); // Build RTCP RR Packet byte RRPacket[] = new byte[1]; RRPacket[0] = V_P_RC; RRPacket = RtcpPacketUtils.append(RRPacket, PT); RRPacket = RtcpPacketUtils.append(RRPacket, length); RRPacket = RtcpPacketUtils.append(RRPacket, ss); RRPacket = RtcpPacketUtils.append(RRPacket, receptionReportBlocks); return RRPacket; } /** * assemble RTCP Reception report block * @return report data */ private byte[] assembleRTCPReceptionReport() { byte reportBlock[] = new byte[0]; RtpSource source = rtcpSession.getMySource(); source.updateStatistics(); byte SSRC[] = RtcpPacketUtils.longToBytes((long)source.SSRC, 4); byte fraction_lost[] = RtcpPacketUtils.longToBytes((long)source.fraction, 1); byte pkts_lost[] = RtcpPacketUtils.longToBytes((long)source.lost, 3); byte last_seq[] = RtcpPacketUtils.longToBytes((long)source.last_seq, 4); byte jitter[] = RtcpPacketUtils.longToBytes((long)source.jitter, 4); byte lst[] = RtcpPacketUtils.longToBytes((long)source.lst, 4); byte dlsr[] = RtcpPacketUtils.longToBytes((long)source.dlsr, 4); reportBlock = RtcpPacketUtils.append(reportBlock, SSRC); reportBlock = RtcpPacketUtils.append(reportBlock, fraction_lost); reportBlock = RtcpPacketUtils.append(reportBlock, pkts_lost); reportBlock = RtcpPacketUtils.append(reportBlock, last_seq); reportBlock = RtcpPacketUtils.append(reportBlock, jitter); reportBlock = RtcpPacketUtils.append(reportBlock, lst); reportBlock = RtcpPacketUtils.append(reportBlock, dlsr); return reportBlock; } /** * Send a BYE packet */ public void sendByePacket() { // Create a report Vector repvec = makereports(); RtcpPacket[] packets = new RtcpPacket[repvec.size() + 1]; repvec.copyInto(packets); // Create a RTCP bye packet int ssrc[] = {rtcpSession.SSRC}; RtcpByePacket rtcpbyepacket = new RtcpByePacket(ssrc, null); packets[packets.length - 1] = rtcpbyepacket; // Create a RTCP compound packet RtcpCompoundPacket cp = new RtcpCompoundPacket(packets); rtcpSession.getMySource().activeSender = false; // Send the RTCP packet transmit(cp); } /** * Generate a RTCP report * * @return Vector */ public Vector makereports() { Vector packets = new Vector(); RtcpSdesPacket rtcpsdespacket = new RtcpSdesPacket(new RtcpSdesBlock[1]); rtcpsdespacket.sdes[0] = new RtcpSdesBlock(); rtcpsdespacket.sdes[0].ssrc = rtcpSession.SSRC; Vector vector = new Vector(); vector.addElement(new RtcpSdesItem(1, RtpSource.CNAME)); rtcpsdespacket.sdes[0].items = new RtcpSdesItem[vector.size()]; vector.copyInto(rtcpsdespacket.sdes[0].items); packets.addElement(rtcpsdespacket); return packets; } /** * Transmit a RTCP compound packet to the remote destination * * @param packet Compound packet to be sent */ private void transmit(RtcpCompoundPacket packet) { // Prepare data to be sent byte[] data = packet.data; if (packet.offset > 0) { System.arraycopy(data, packet.offset, data = new byte[packet.length], 0, packet.length); } // Update statistics stats.numBytes += packet.length; stats.numPackets++; rtcpSession.updateavgrtcpsize(packet.length); rtcpSession.timeOfLastRTCPSent = rtcpSession.currentTime(); // Send data over UDP try { datagramConnection.send(remoteAddress, remotePort, data); } catch(IOException e) { if (logger.isActivated()) { logger.error("Can't send the RTCP packet", e); } } } /** * Transmit a RTCP compound packet to the remote destination * * @param packet Compound packet to be sent */ private void transmit(byte packet[]) { // Update statistics stats.numBytes += packet.length; stats.numPackets++; rtcpSession.updateavgrtcpsize(packet.length); rtcpSession.timeOfLastRTCPSent = rtcpSession.currentTime(); // Send data over UDP try { datagramConnection.send(remoteAddress, remotePort, packet); } catch (IOException e) { if (logger.isActivated()) { logger.error("Can't send the RTCP packet", e); } } } /** * Returns the statistics of RTCP transmission * * @return Statistics */ public RtcpStatisticsTransmitter getStatistics() { return stats; } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpPacketUtils.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.core; /** * RTCP utils. * * @author hlxn7157 */ public class RtcpPacketUtils { /** * Convert 64 bit long to n bytes. * * @param data data * @param n desired number of bytes to convert the long to. * @return buffer */ public static byte[] longToBytes(long data, int n) { byte buf[] = new byte[n]; for (int i = n - 1; i >= 0; i--) { buf[i] = (byte)data; data = data >> 8; } return buf; } /** * Append two byte arrays. * * @param pck1 first packet. * @param pck2 second packet. * @return concatenated packet. */ public static byte[] append(byte[] pck1, byte[] pck2) { byte packet[] = new byte[pck1.length + pck2.length]; for (int i = 0; i < pck1.length; i++) packet[i] = pck1[i]; for (int i = 0; i < pck2.length; i++) packet[i + pck1.length] = pck2[i]; return packet; } }; ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpReceiverReportPacket.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.core; import java.io.DataOutputStream; import java.io.IOException; /** * RCTP RR packet * * @author jexa7410 */ public class RtcpReceiverReportPacket extends RtcpPacket { public int ssrc; public RtcpReport[] reports; public RtcpReceiverReportPacket(int i, RtcpReport[] rtcpreportblocks) { ssrc = i; reports = rtcpreportblocks; if (rtcpreportblocks.length > 31) throw new IllegalArgumentException("Too many reports"); } public RtcpReceiverReportPacket(RtcpPacket rtcppacket) { super(rtcppacket); type = 201; } public void assemble(DataOutputStream dataoutputstream) throws IOException { dataoutputstream.writeByte(128 + reports.length); dataoutputstream.writeByte(201); dataoutputstream.writeShort(1 + reports.length * 6); dataoutputstream.writeInt(ssrc); for (int i = 0; i < reports.length; i++) { dataoutputstream.writeInt(reports[i].ssrc); dataoutputstream.writeInt((reports[i].packetslost & 0xffffff) + (reports[i].fractionlost << 24)); dataoutputstream.writeInt((int) reports[i].lastseq); dataoutputstream.writeInt(reports[i].jitter); dataoutputstream.writeInt((int) reports[i].lsr); dataoutputstream.writeInt((int) reports[i].dlsr); } } public int calcLength() { return 8 + reports.length * 24; } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpReport.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.core; /** * RTCP report * * @author jexa7410 */ public class RtcpReport { public int ssrc; public int fractionlost; public int packetslost; public long lastseq; public int jitter; public long lsr; public long dlsr; public long receiptTime; public long getDLSR() { return dlsr; } public int getFractionLost() { return fractionlost; } public long getJitter() { return (long) jitter; } public long getLSR() { return lsr; } public long getNumLost() { return (long) packetslost; } public long getSSRC() { return (long) ssrc; } public long getXtndSeqNum() { return lastseq; } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpSdesBlock.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.core; /** * RCTP SDES block * * @author jexa7410 */ public class RtcpSdesBlock { public int ssrc; public RtcpSdesItem[] items; } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpSdesItem.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.core; /** * RCTP SDES item * * @author jexa7410 */ public class RtcpSdesItem { public int type; public byte[] data; public RtcpSdesItem() { } public RtcpSdesItem(int i, String string) { type = i; data = string.getBytes(); } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpSdesPacket.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.core; import java.io.DataOutputStream; import java.io.IOException; /** * RCTP SDES packet * * @author jexa7410 */ public class RtcpSdesPacket extends RtcpPacket { public RtcpSdesBlock sdes[]; public RtcpSdesPacket(RtcpPacket parent) { super(parent); super.type = 202; } public RtcpSdesPacket(RtcpSdesBlock sdes[]) { this.sdes = sdes; if (sdes.length > 31) { throw new IllegalArgumentException("Too many SDESs"); } else { return; } } public int calcLength() { int len = 4; for (int i = 0; i < sdes.length; i++) { int sublen = 5; for (int j = 0; j < sdes[i].items.length; j++) { sublen += 2 + sdes[i].items[j].data.length; } sublen = sublen + 3 & -4; len += sublen; } return len; } public void assemble(DataOutputStream out) throws IOException { out.writeByte(128 + sdes.length); out.writeByte(202); out.writeShort(calcLength() - 4 >> 2); for (int i = 0; i < sdes.length; i++) { out.writeInt(sdes[i].ssrc); int sublen = 0; for (int j = 0; j < sdes[i].items.length; j++) { out.writeByte(sdes[i].items[j].type); out.writeByte(sdes[i].items[j].data.length); out.write(sdes[i].items[j].data); sublen += 2 + sdes[i].items[j].data.length; } for (int j = (sublen + 4 & -4) - sublen; j > 0; j--) { out.writeByte(0); } } } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpSenderReportPacket.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.core; import java.io.DataOutputStream; import java.io.IOException; /** * RCTP SR packet * * @author jexa7410 */ public class RtcpSenderReportPacket extends RtcpPacket { public int ssrc; public long ntptimestampmsw; public long ntptimestamplsw; public long rtptimestamp; public long packetcount; public long octetcount; public RtcpReport[] reports; public RtcpSenderReportPacket(int i, RtcpReport[] rtcpreportblocks) { ssrc = i; reports = rtcpreportblocks; if (rtcpreportblocks.length > 31) throw new IllegalArgumentException("Too many reports"); } public RtcpSenderReportPacket(RtcpPacket rtcppacket) { super(rtcppacket); type = 200; } public void assemble(DataOutputStream dataoutputstream) throws IOException { dataoutputstream.writeByte(128 + reports.length); dataoutputstream.writeByte(200); dataoutputstream.writeShort(6 + reports.length * 6); dataoutputstream.writeInt(ssrc); dataoutputstream.writeInt((int) ntptimestampmsw); dataoutputstream.writeInt((int) ntptimestamplsw); dataoutputstream.writeInt((int) rtptimestamp); dataoutputstream.writeInt((int) packetcount); dataoutputstream.writeInt((int) octetcount); for (int i = 0; i < reports.length; i++) { dataoutputstream.writeInt(reports[i].ssrc); dataoutputstream.writeInt((reports[i].packetslost & 0xffffff) + (reports[i].fractionlost << 24)); dataoutputstream.writeInt((int) reports[i].lastseq); dataoutputstream.writeInt(reports[i].jitter); dataoutputstream.writeInt((int) reports[i].lsr); dataoutputstream.writeInt((int) reports[i].dlsr); } } public int calcLength() { return 28 + reports.length * 24; } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpSession.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.core; import java.util.Date; import java.util.Random; /** * RTCP Session Information * * @author hlxn7157 */ public class RtcpSession { /** * minimum time between RTCP message (ms) */ private static final int RTCP_MIN_TIME = 5000; /** * fraction of RTCP sender messages */ private static final double RTCP_SENDER_BW_FRACTION = 0.25; /** * fraction of RTCP receiver messages */ private static final double RTCP_RCVR_BW_FRACTION = 0.75; /** * bandwidth */ private double bandwidth; /** * rtcp bandwidth */ private double rtcp_bandwidth; /** * minimum time between RTCP message (ms) */ private int rtcp_min_time; /** * RTCP average packet size */ private int avgrtcpsize; /** * no of members */ private int members; /** * no of senders */ private int senders; /** * initial state */ private Boolean initial; /** * is sender ? */ private Boolean isSender; /** * True if session instantiator requested a close. */ public boolean isByeRequested = false; /** * Time this source last sent an RTP Packet */ public double timeOfLastRTPSent = 0; /** * The last time an RTCP packet was transmitted. */ public double timeOfLastRTCPSent = 0; /** * The startup time for the application. */ public long appStartupTime; /** * Ramdomized time interval for next RTCP transmission. */ public double T = 0; /** * Synchronization Source identifier for this source. */ public int SSRC; /** * RTP Source */ RtpSource rtpSource; /** * The current time. */ public double tc = 0; /** * Total Number of RTP data packets sent out by this source since starting transmission. */ public long packetCount; /** * Total Number of payload octets (i.e not including header or padding) * sent out by this source since starting transmission. */ public long octetCount; /** * Initialize the Random Number Generator. */ private Random rnd = new Random(); /** * Constructor. * * @param isSender is sender * @param bandwidth bandwidth (can set 16000 (16kops 128kbps)) */ public RtcpSession(boolean isSender, double bandwidth) { this.isSender = isSender; members = 2; senders = 1; this.bandwidth = bandwidth; rtcp_bandwidth = 0.05 * bandwidth; rtcp_min_time = RTCP_MIN_TIME; avgrtcpsize = 128; initial = true; // Initialize the Session level variables appStartupTime = currentTime(); timeOfLastRTCPSent = appStartupTime; tc = appStartupTime; SSRC = rnd.nextInt(); packetCount = 0; octetCount = 0; // Init RTP source rtpSource = new RtpSource(SSRC); } /** * Setter of members * * @param members no of members */ public void setMembers(int members) { this.members = members; } /** * Setter of senders * * @param senders no of senders */ public void setSenders(int senders) { this.senders = senders; } /** * Get the interval of RTCP message * * @return interval */ public double getReportInterval() { // Interval double t; // no. of members for computation double n; // initial half the min delay for quicker notification if (initial) { initial = false; rtcp_min_time /= 2; } // If there were active senders, give them at least a minimum share of // the RTCP bandwidth. Otherwise all participants share the RTCP // bandwidth equally. n = members; if (senders > 0 && senders < members * RTCP_SENDER_BW_FRACTION) { if (isSender) { rtcp_bandwidth *= RTCP_SENDER_BW_FRACTION; n = senders; } else { rtcp_bandwidth *= RTCP_RCVR_BW_FRACTION; n -= senders; } } // get interval t = (double)avgrtcpsize * n / bandwidth; if (t < rtcp_min_time) t = rtcp_min_time; // add noise to avoid traffic bursts t *= (Math.random() + 0.5); T = t; return t; } /** * Update the average RTCP packet size * * @param size */ public void updateavgrtcpsize(int size) { avgrtcpsize = (int)(0.0625 * (double)size + 0.9375 * (double)avgrtcpsize); } /** * Returns a self source object. * * @return My source object. */ public RtpSource getMySource() { return rtpSource; } /** * Returns current time from the Date().getTime() function. * * @return The current time. */ public long currentTime() { tc = (new Date()).getTime(); return (long)tc; } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpStatisticsReceiver.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.core; /** * RTCP packet statistics receiver * * @author jexa7410 */ public class RtcpStatisticsReceiver { /** * Number of RTCP packets received */ public int numRtcpPkts = 0; /** * Number of RTCP bytes received */ public int numRtcpBytes = 0; /** * Number of RTCP SR packets received */ public int numSrPkts = 0; /** * Number of bad RTCP packets received */ public int numBadRtcpPkts = 0; /** * Number of unknown RTCP packets received */ public int numUnknownTypes = 0; /** * Number of malformed RTCP packets received */ public int numMalformedRtcpPkts = 0; } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpStatisticsTransmitter.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.core; /** * RTCP packet statistics transmitter * * @author jexa7410 */ public class RtcpStatisticsTransmitter { /** * Total number of packets sent */ public int numPackets = 0; /** * Total number of bytes sent */ public int numBytes = 0; } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtpPacket.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.core; import java.io.ByteArrayOutputStream; import java.io.DataOutputStream; import java.io.IOException; import com.orangelabs.rcs.core.ims.protocol.rtp.util.Packet; /** * Abstract RTP packet * * @author jexa7410 */ public class RtpPacket extends Packet { public Packet base; public int marker; public int payloadType; public int seqnum; public long timestamp; public int ssrc; public int payloadoffset; public int payloadlength; public RtpPacket() { super(); } public RtpPacket(Packet packet) { super(packet); base = packet; } public void assemble(int length) throws IOException { this.length = length; this.offset = 0; ByteArrayOutputStream bytearrayoutputstream = new ByteArrayOutputStream(length); DataOutputStream dataoutputstream = new DataOutputStream(bytearrayoutputstream); dataoutputstream.writeByte(128); int i = payloadType; if (marker == 1) { i = payloadType | 0x80; } dataoutputstream.writeByte((byte) i); dataoutputstream.writeShort(seqnum); dataoutputstream.writeInt((int) timestamp); dataoutputstream.writeInt(ssrc); dataoutputstream.write(base.data, payloadoffset, payloadlength); data = bytearrayoutputstream.toByteArray(); } public int calcLength() { return payloadlength + 12; } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtpPacketReceiver.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.core; import java.io.IOException; import com.orangelabs.rcs.platform.network.DatagramConnection; import com.orangelabs.rcs.platform.network.NetworkFactory; import com.orangelabs.rcs.utils.logger.Logger; /** * RTP packet receiver * * @author jexa7410 */ public class RtpPacketReceiver { /** * Max datagram packet size */ private static int DEFAULT_DATAGRAM_SIZE = 4096; /** * Statistics */ private RtpStatisticsReceiver stats = new RtpStatisticsReceiver(); /** * Flag that indicates if the received buffer size has been set or not */ private boolean recvBufSizeSet = false; /** * Buffer size needed to received RTP packet */ private int bufferSize = DEFAULT_DATAGRAM_SIZE; /** * Datagram connection */ public DatagramConnection datagramConnection = null; /** * RTCP Session */ private RtcpSession rtcpSession = null; /** * The logger */ private Logger logger = Logger.getLogger(this.getClass().getName()); /** * Constructor * * @param port Listenning port * @throws IOException */ public RtpPacketReceiver(int port, RtcpSession rtcpSession) throws IOException { this.rtcpSession = rtcpSession; // Create the UDP server datagramConnection = NetworkFactory.getFactory().createDatagramConnection(); datagramConnection.open(port); if (logger.isActivated()) { logger.debug("RTP receiver created on port " + port); } } /** * Close the receiver */ public void close() { // Close the datagram connection if (datagramConnection != null) { try { datagramConnection.close(); } catch(Exception e) { if (logger.isActivated()) { logger.warn("Can't close correctly the datagram connection"); } } datagramConnection = null; } } /** * Read a RTP packet (blocking method) * * @return RTP packet */ public RtpPacket readRtpPacket() { try { // Wait a new packet byte[] data = datagramConnection.receive(bufferSize); // Parse the RTP packet RtpPacket pkt = parseRtpPacket(data); if (pkt.payloadType != 12) { // Update statistics stats.numPackets++; stats.numBytes += data.length; RtpSource s = rtcpSession.getMySource(); s.activeSender = true; s.timeOfLastRTPArrival = rtcpSession.currentTime(); s.updateSeq(pkt.seqnum); if (s.noOfRTPPacketsRcvd == 0) s.base_seq = pkt.seqnum; s.noOfRTPPacketsRcvd++; return pkt; } else { // Drop the keep-alive packets (payload 12) return readRtpPacket(); } } catch (Exception e) { if (logger.isActivated()) { logger.error("Can't parse the RTP packet", e); } stats.numBadRtpPkts++; return null; } } /** * Set the size of the received buffer * * @param size New buffer size */ public void setRecvBufSize(int size) { this.bufferSize = size; } /** * Parse the RTP packet * * @param data RTP packet not yet parsed * @return RTP packet */ private RtpPacket parseRtpPacket(byte[] data) { RtpPacket packet = new RtpPacket(); try { // Read RTP packet length packet.length = data.length; // Set received timestamp packet.receivedAt = System.currentTimeMillis(); // Read marker if ((byte)((data[1] & 0xff) & 0x80) == (byte) 0x80){ packet.marker = 1; }else{ packet.marker = 0; } // Read payload type packet.payloadType = (byte) ((data[1] & 0xff) & 0x7f); // Read seq number packet.seqnum = (short)((data[2] << 8) | (data[3] & 0xff)); // Read timestamp packet.timestamp = (((data[4] & 0xff) << 24) | ((data[5] & 0xff) << 16) | ((data[6] & 0xff) << 8) | (data[7] & 0xff)); // Read SSRC packet.ssrc = (((data[8] & 0xff) << 24) | ((data[9] & 0xff) << 16) | ((data[10] & 0xff) << 8) | (data[11] & 0xff)); // Read media data after the 12 byte header which is constant packet.payloadoffset = 12; packet.payloadlength = packet.length - packet.payloadoffset; packet.data = new byte[packet.payloadlength]; System.arraycopy(data, packet.payloadoffset, packet.data, 0, packet.payloadlength); // Update the buffer size if (!recvBufSizeSet) { recvBufSizeSet = true; switch (packet.payloadType) { case 14: case 26: case 34: case 42: setRecvBufSize(64000); break; case 31: setRecvBufSize(0x1f400); break; case 32: setRecvBufSize(0x1f400); break; default: if ((packet.payloadType >= 96) && (packet.payloadType <= 127)) { setRecvBufSize(64000); } break; } } } catch (Exception e) { if (logger.isActivated()) { logger.error("RTP packet parsing error", e); } return null; } return packet; } /** * Returns the statistics of RTP reception * * @return Statistics */ public RtpStatisticsReceiver getRtpReceptionStats() { return stats; } /** * Returns the DatagramConnection of RTP * * @return DatagramConnection */ public DatagramConnection getConnection() { return datagramConnection; } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtpPacketTransmitter.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.core; import java.io.IOException; import com.orangelabs.rcs.core.ims.protocol.rtp.util.Buffer; import com.orangelabs.rcs.core.ims.protocol.rtp.util.Packet; import com.orangelabs.rcs.platform.network.DatagramConnection; import com.orangelabs.rcs.platform.network.NetworkFactory; import com.orangelabs.rcs.utils.logger.Logger; import de.kp.net.rtp.RtpSender; /** * RTP packet transmitter * * @author jexa7410 */ public class RtpPacketTransmitter { /** * Sequence number */ private int seqNumber = 0; /** * Remote address */ private String remoteAddress; /** * Remote port */ private int remotePort; /** * Statistics */ private RtpStatisticsTransmitter stats = new RtpStatisticsTransmitter(); /** * Datagram connection */ private DatagramConnection datagramConnection = null; /** * RTCP Session */ private RtcpSession rtcpSession = null; /** * The logger */ private final Logger logger = Logger.getLogger(this.getClass().getName()); // TODO: use Transmitter for its buildRtpPacket functionality public RtpPacketTransmitter(RtcpSession rtcpSession) { this.rtcpSession = rtcpSession; if (logger.isActivated()) { logger.debug("RTP broadcast transmitter initiated with SSCR: " + this.rtcpSession.SSRC); } } /** * Constructor * * @param address Remote address * @param port Remote port * @throws IOException */ public RtpPacketTransmitter(String address, int port, RtcpSession rtcpSession) throws IOException { this.remoteAddress = address; this.remotePort = port; this.rtcpSession = rtcpSession; datagramConnection = NetworkFactory.getFactory().createDatagramConnection(); datagramConnection.open(); if (logger.isActivated()) { logger.debug("RTP transmitter connected to " + remoteAddress + ":" + remotePort); } } /** * Constructor - used for SYMETRIC_RTP * * @param address Remote address * @param port Remote port * @param DatagramConnection datagram connection of the RtpPacketReceiver * @throws IOException */ public RtpPacketTransmitter(String address, int port, RtcpSession rtcpSession, DatagramConnection connection) throws IOException { this.remoteAddress = address; this.remotePort = port; this.rtcpSession = rtcpSession; if (connection != null) { this.datagramConnection = connection; } else { this.datagramConnection = NetworkFactory.getFactory().createDatagramConnection(); this.datagramConnection.open(); } if (logger.isActivated()) { logger.debug("RTP transmitter connected to " + remoteAddress + ":" + remotePort); } } /** * Close the transmitter * * @throws IOException */ public void close() throws IOException { // Close the datagram connection if (datagramConnection != null) { datagramConnection.close(); } if (logger.isActivated()) { logger.debug("RTP transmitter closed"); } } /** * Send a RTP packet * * @param buffer Input buffer * @throws IOException */ public void sendRtpPacket(Buffer buffer) throws IOException { // Build a RTP packet RtpPacket packet = buildRtpPacket(buffer); if (packet == null) { return; } // Assemble RTP packet int size = packet.calcLength(); packet.assemble(size); // Send the RTP packet to the remote destination transmit(packet); } /** * Build a RTP packet * * @param buffer Input buffer * @return RTP packet */ private RtpPacket buildRtpPacket(Buffer buffer) { byte data[] = (byte[])buffer.getData(); if (data == null) { return null; } Packet packet = new Packet(); packet.data = data; packet.offset = 0; packet.length = buffer.getLength(); RtpPacket rtppacket = new RtpPacket(packet); if ((buffer.getFlags() & 0x800) != 0) { rtppacket.marker = 1; } else { rtppacket.marker = 0; } rtppacket.payloadType = buffer.getFormat().getPayload(); rtppacket.seqnum = seqNumber++; rtppacket.timestamp = buffer.getTimeStamp(); rtppacket.ssrc = rtcpSession.SSRC; rtppacket.payloadoffset = buffer.getOffset(); rtppacket.payloadlength = buffer.getLength(); return rtppacket; } /** * Transmit a RTCP compound packet to the remote destination * * @param packet RTP packet * @throws IOException */ private void transmit(Packet packet) { // Prepare data to be sent byte[] data = packet.data; if (packet.offset > 0) { System.arraycopy(data, packet.offset, data = new byte[packet.length], 0, packet.length); } // broadcast data try { RtpSender.getInstance().send(data); } catch (IOException e1) { // TODO Auto-generated catch block e1.printStackTrace(); if (logger.isActivated()) { logger.error("Can't broadcast the RTP packet", e1); } } // // Update statistics // stats.numBytes += packet.length; // stats.numPackets++; // // // Send data over UDP // try { // datagramConnection.send(remoteAddress, remotePort, data); // // RtpSource s = rtcpSession.getMySource(); // s.activeSender = true; // rtcpSession.timeOfLastRTPSent = rtcpSession.currentTime(); // rtcpSession.packetCount++; // rtcpSession.octetCount += data.length; // } catch (IOException e) { // if (logger.isActivated()) { // logger.error("Can't send the RTP packet", e); // } // } } /** * Returns the statistics of RTP transmission * * @return Statistics */ public RtpStatisticsTransmitter getStatistics() { return stats; } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtpSource.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.core; import java.util.Date; /** * RTP source * * @author jexa7410 */ public class RtpSource { /** * CNAME value */ public static String CNAME = "anonymous@127.0.0.1"; /** * SSRC */ public int SSRC; /** * Fraction of RTP data packets from source SSRC lost since the previous * SR or RR packet was sent, expressed as a fixed point number with the * binary point at the left edge of the field. To get the actual fraction * multiply by 256 and take the integral part */ public double fraction; /** * Cumulative number of packets lost (signed 24bits). */ public long lost; /** * Extended highest sequence number received. */ public long last_seq; /** * Interarrival jitter. */ public long jitter; /** * Last SR Packet from this source. */ public long lst; /** * Delay since last SR packet. */ public double dlsr; /** * Is this source and ActiveSender. */ public boolean activeSender; /** * Time the last RTCP Packet was received from this source. */ public double timeOfLastRTCPArrival; /** * Time the last RTP Packet was received from this source. */ public double timeOfLastRTPArrival; /** * Time the last Sender Report RTCP Packet was received from this source. */ public double timeofLastSRRcvd; /** * Total Number of RTP Packets Received from this source */ public int noOfRTPPacketsRcvd; /** * Sequence Number of the first RTP packet received from this source */ public long base_seq; /** * Number of RTP Packets Expected from this source */ public long expected; /** * No of RTP Packets expected last time a Reception Report was sent */ public long expected_prior; /** * No of RTP Packets received last time a Reception Report was sent */ public long received_prior; /** * Highest Sequence number received from this source */ public long max_seq; /** * Keep track of the wrapping around of RTP sequence numbers, since RTP Seq No. are * only 16 bits */ public long cycles; /** * Since Packets lost is a 24 bit number, it should be clamped at WRAPMAX = 0xFFFFFFFF */ public long WRAPMAX = 0xFFFFFFFF; /** * Constructor requires an SSRC for it to be a valid source. The constructor initializes * all the source class members to a default value * * @param sourceSSRC SSRC of the new source */ RtpSource(int sourceSSRC) { long time = currentTime(); SSRC = sourceSSRC; fraction = 0; lost = 0; last_seq = 0; jitter = 0; lst = 0; dlsr = 0; activeSender = false; timeOfLastRTCPArrival = time; timeOfLastRTPArrival = time; timeofLastSRRcvd = time; noOfRTPPacketsRcvd = 0; base_seq = 0; expected_prior = 0; received_prior = 0; } /** * Returns the extended maximum sequence for a source * considering that sequences cycle. * * @return Sequence Number */ public long getExtendedMax() { return (cycles + max_seq); } /** * This safe sequence update function will try to * determine if seq has wrapped over resulting in a * new cycle. It sets the cycle -- source level * variable which keeps track of wraparounds. * * @param seq Sequence Number */ public void updateSeq(long seq) { // If the diferrence between max_seq and seq // is more than 1000, then we can assume that // cycle has wrapped around. if (max_seq == 0) max_seq = seq; else { if (max_seq - seq > 0.5 * WRAPMAX) cycles += WRAPMAX; max_seq = seq; } } /** * Updates the various statistics for this source e.g. Packets Lost, Fraction lost * Delay since last SR etc, according to the data gathered since a last SR or RR was sent out. * This method is called prior to sending a Sender Report(SR)or a Receiver Report(RR) * which will include a Reception Report block about this source. */ public int updateStatistics() { // Set all the relevant parameters // Calculate the highest sequence number received in an RTP Data Packet // from this source last_seq = getExtendedMax(); // Number of Packets lost = Number of Packets expected - Number of // Packets actually rcvd expected = getExtendedMax() - base_seq + 1; lost = expected - noOfRTPPacketsRcvd; // Clamping at 0xffffff if (lost > 0xffffff) lost = 0xffffff; // Calculate the fraction lost long expected_interval = expected - expected_prior; expected_prior = expected; long received_interval = noOfRTPPacketsRcvd - received_prior; received_prior = noOfRTPPacketsRcvd; long lost_interval = expected_interval - received_interval; if (expected_interval == 0 || lost_interval <= 0) fraction = 0; else fraction = (lost_interval << 8) / (double) expected_interval; // dlsr - express it in units of 1/65336 seconds dlsr = (timeofLastSRRcvd - currentTime()) / 65536; return 0; } /** * Returns current time from the Date().getTime() function. * * @return The current time. */ private static long currentTime() { return (long)((new Date()).getTime()); } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtpStatisticsReceiver.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.core; /** * RTP statistics receiver * * @author jexa7410 */ public class RtpStatisticsReceiver { /** * Number of RTP packets received */ public int numPackets = 0; /** * Number of RTP bytes received */ public int numBytes = 0; /** * Number of bad RTP packet received */ public int numBadRtpPkts = 0; } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtpStatisticsTransmitter.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.core; /** * RTP statistics transmitter * * @author jexa7410 */ public class RtpStatisticsTransmitter { /** * Total number of packets sent */ public int numPackets = 0; /** * Total number of bytes sent */ public int numBytes = 0; } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/event/RtcpApplicationEvent.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.event; import com.orangelabs.rcs.core.ims.protocol.rtp.core.RtcpAppPacket; /** * RTCP application event * * @author jexa7410 */ public class RtcpApplicationEvent extends RtcpEvent { /** * Constructor * * @param packet RTCP APP packet */ public RtcpApplicationEvent(RtcpAppPacket packet) { super(packet); } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/event/RtcpByeEvent.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.event; import com.orangelabs.rcs.core.ims.protocol.rtp.core.RtcpByePacket; /** * RTCP bye event * * @author jexa7410 */ public class RtcpByeEvent extends RtcpEvent { /** * Constructor * * @param packet RTCP BYE packet */ public RtcpByeEvent(RtcpByePacket packet) { super(packet); } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/event/RtcpEvent.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.event; import com.orangelabs.rcs.core.ims.protocol.rtp.core.RtcpPacket; /** * Abstract RTCP event * * @author jexa7410 */ public abstract class RtcpEvent { /** * RTCP packet */ private RtcpPacket packet; /** * Constructor * * @param packet RTCP packet */ public RtcpEvent(RtcpPacket packet) { this.packet = packet; } /** * Returns the RTCP packet * * @return Packet */ public RtcpPacket getPacket() { return packet; } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/event/RtcpEventListener.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.event; /** * RTCP events listener interface * * @author jexa7410 */ public interface RtcpEventListener { /** * Receive RTCP event * * @param event RTCP event */ void receiveRtcpEvent(RtcpEvent event); } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/event/RtcpReceiverReportEvent.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.event; import com.orangelabs.rcs.core.ims.protocol.rtp.core.RtcpReceiverReportPacket; /** * RTCP receiver report event * * @author jexa7410 */ public class RtcpReceiverReportEvent extends RtcpEvent { /** * Constructor * * @param packet RTCP RR packet */ public RtcpReceiverReportEvent(RtcpReceiverReportPacket packet) { super(packet); } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/event/RtcpSdesEvent.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.event; import com.orangelabs.rcs.core.ims.protocol.rtp.core.RtcpSdesPacket; /** * RTCP session description event * * @author jexa7410 */ public class RtcpSdesEvent extends RtcpEvent { /** * Constructor * * @param packet RTCP SDES packet */ public RtcpSdesEvent(RtcpSdesPacket packet) { super(packet); } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/event/RtcpSenderReportEvent.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.event; import com.orangelabs.rcs.core.ims.protocol.rtp.core.RtcpSenderReportPacket; /** * RTCP sender report event * * @author jexa7410 */ public class RtcpSenderReportEvent extends RtcpEvent { /** * Constructor * * @param packet RTCP SR packet */ public RtcpSenderReportEvent(RtcpSenderReportPacket packet) { super(packet); } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/format/DummyFormat.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.format; /** * Dummy format * * @author jexa7410 */ public class DummyFormat extends Format { /** * Encoding name */ public static final String ENCODING = "dummy"; /** * Payload type */ public static final int PAYLOAD = 12; /** * Constructor */ public DummyFormat() { super(ENCODING, PAYLOAD); } /** * Get the size of a chunk of data from the source * * @return The minimum size of the buffer needed to read a chunk of data */ public int getDataChunkSize() { return 0; } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/format/Format.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.format; /** * Abstract format * * @author jexa7410 */ public abstract class Format { /** * Unknown payload */ public static final int UNKNOWN_PAYLOAD = -1; /** * Codec */ private String codec; /** * Payload type */ private int payload; /** * Constructor * * @param codec Codec * @param payload Payload type */ public Format(String codec, int payload) { this.codec = codec; this.payload = payload; } /** * Get the codec name * * @return Name */ public String getCodec() { return codec; } /** * Get the type of payload * * @return Payload type */ public int getPayload() { return payload; } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/format/audio/AudioFormat.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.format.audio; import com.orangelabs.rcs.core.ims.protocol.rtp.format.Format; /** * Audio format */ public class AudioFormat extends Format { /** * Constructor * * @param codec Codec * @param payload Payload type */ public AudioFormat(String codec, int payload) { super(codec, payload); } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/format/audio/PcmuAudioFormat.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.format.audio; /** * G711 PCMU audio format * * @author jexa7410 */ public class PcmuAudioFormat extends AudioFormat { /** * Encoding name */ public static final String ENCODING = "pcmu"; /** * Payload type */ public static final int PAYLOAD = 0; /** * Constructor */ public PcmuAudioFormat() { super(ENCODING, PAYLOAD); } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/format/video/H263VideoFormat.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.format.video; /** * H263-2000 (h263++) video format * * @author jexa7410 */ public class H263VideoFormat extends VideoFormat { /** * Encoding name */ public static final String ENCODING = "h263-2000"; /** * Payload type */ public static final int PAYLOAD = 97; /** * Constructor */ public H263VideoFormat() { super(ENCODING, PAYLOAD); } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/format/video/H264VideoFormat.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.format.video; /** * H264 video format * * @author jexa7410 */ public class H264VideoFormat extends VideoFormat { /** * Encoding name */ public static final String ENCODING = "h264"; /** * Payload type */ public static final int PAYLOAD = 96; /** * Constructor */ public H264VideoFormat() { super(ENCODING, PAYLOAD); } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/format/video/VideoFormat.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.format.video; import com.orangelabs.rcs.core.ims.protocol.rtp.format.Format; /** * Video format */ public class VideoFormat extends Format { /** * Constructor * * @param codec Codec * @param payload Payload type */ public VideoFormat(String codec, int payload) { super(codec, payload); } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/media/MediaException.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.media; /** * Media exception * * @author JM. Auffret */ public class MediaException extends java.lang.Exception { static final long serialVersionUID = 1L; /** * Constructor * * @param error Error message */ public MediaException(String error) { super(error); } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/media/MediaInput.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.media; /** * Media input (e.g. camera, microphone) * * @author jexa7410 */ public interface MediaInput { /** * Open the player * * @throws MediaException */ public void open() throws MediaException; /** * Close the player */ public void close(); /** * Read a media sample (blocking method) * * @return Media sample * @throws MediaException */ public MediaSample readSample() throws MediaException; } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/media/MediaOutput.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.media; /** * Media output (e.g. screen, headset) * * @author jexa7410 */ public interface MediaOutput { /** * Open the renderer * * @throws MediaException */ public void open() throws MediaException; /** * Close the renderer */ public void close(); /** * Write a media sample * * @param sample Media sample * @throws MediaException */ public void writeSample(MediaSample sample) throws MediaException; } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/media/MediaSample.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.media; /** * Media sample * * @author jexa7410 */ public class MediaSample { /** * Data */ private byte[] data; /** * Time stamp */ private long time; /** * Constructor * * @param data Data * @param time Time stamp */ public MediaSample(byte[] data, long time) { this.data = data; this.time = time; } /** * Returns the data sample * * @return Byte array */ public byte[] getData() { return data; } /** * Returns the length of the data sample * * @return Data sample length */ public int getLength() { if (data != null) { return data.length; } else { return 0; } } /** * Returns the time stamp of the sample * * @return Time in microseconds */ public long getTimeStamp() { return time; } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/stream/DummyPacketSourceStream.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.stream; import com.orangelabs.rcs.core.ims.protocol.rtp.format.DummyFormat; import com.orangelabs.rcs.core.ims.protocol.rtp.format.Format; import com.orangelabs.rcs.core.ims.protocol.rtp.util.Buffer; import com.orangelabs.rcs.core.ims.protocol.rtp.util.SystemTimeBase; import com.orangelabs.rcs.utils.FifoBuffer; import com.orangelabs.rcs.utils.logger.Logger; /** * Dummy packet source stream (used to pass NAT) * * @author jexa7410 */ public class DummyPacketSourceStream extends Thread implements ProcessorInputStream { /** * Source period (in seconds) */ public static int DUMMY_SOURCE_PERIOD = 15; /** * Input format */ private DummyFormat format = new DummyFormat(); /** * Time base */ private SystemTimeBase systemTimeBase = new SystemTimeBase(); /** * Sequence number */ private long seqNo = 0; /** * Message buffer */ private FifoBuffer fifo = new FifoBuffer(); /** * The logger */ private Logger logger = Logger.getLogger(this.getClass().getName()); /** * Interruption flag */ private boolean interrupted = false; /** * Constructor */ public DummyPacketSourceStream() { } /** * Open the input stream * * @throws Exception */ public void open() throws Exception { start(); if (logger.isActivated()) { logger.debug("Dummy source stream openned"); } } /** * Close the input stream */ public void close() { interrupted = true; try { fifo.close(); } catch(Exception e) { // Intentionally blank } if (logger.isActivated()) { logger.debug("Dummy source stream closed"); } } /** * Format of the data provided by the source stream * * @return Format */ public Format getFormat() { return format; } /** * Background processing */ public void run() { while(!interrupted) { try { // Build a new dummy packet Buffer packet = new Buffer(); packet.setData(new byte[0]); packet.setLength(0); packet.setFormat(format); packet.setSequenceNumber(seqNo++); packet.setFlags(Buffer.FLAG_SYSTEM_TIME | Buffer.FLAG_LIVE_DATA); packet.setTimeStamp(systemTimeBase.getTime()); // Post the packet in the FIFO fifo.addObject(packet); // Make a pause Thread.sleep(DUMMY_SOURCE_PERIOD * 1000); } catch(Exception e) { if (logger.isActivated()) { logger.error("Dummy packet source has failed", e); } } } } /** * Read from the stream * * @return Buffer * @throws Exception */ public Buffer read() throws Exception { // Read the FIFO the buffer Buffer buffer = (Buffer)fifo.getObject(); return buffer; } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/stream/MediaCaptureStream.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.stream; import com.orangelabs.rcs.core.ims.protocol.rtp.format.Format; import com.orangelabs.rcs.core.ims.protocol.rtp.media.MediaInput; import com.orangelabs.rcs.core.ims.protocol.rtp.media.MediaSample; import com.orangelabs.rcs.core.ims.protocol.rtp.util.Buffer; import com.orangelabs.rcs.utils.logger.Logger; /** * Media capture stream * * @author jexa7410 */ public class MediaCaptureStream implements ProcessorInputStream { /** * Media player */ private MediaInput player; /** * Media format */ private Format format; /** * Sequence number */ private long seqNo = 0; /** * Input buffer */ private Buffer buffer = new Buffer(); /** * The logger */ private Logger logger = Logger.getLogger(this.getClass().getName()); /** * Constructor * * @param format Input format * @param player Media player */ public MediaCaptureStream(Format format, MediaInput player) { this.format = format; this.player = player; } /** * Open the input stream * * @throws Exception */ public void open() throws Exception { try { player.open(); if (logger.isActivated()) { logger.debug("Media capture stream openned"); } } catch(Exception e) { if (logger.isActivated()) { logger.error("Media capture stream failed", e); } throw e; } } /** * Close the input stream */ public void close() { player.close(); if (logger.isActivated()) { logger.debug("Media capture stream closed"); } } /** * Format of the data provided by the source stream * * @return Format */ public Format getFormat() { return format; } /** * Read from the stream * * @return Buffer * @throws Exception */ public Buffer read() throws Exception { // Read a new sample from the media player MediaSample sample = player.readSample(); if (sample == null) { return null; } // Create a buffer buffer.setData(sample.getData()); buffer.setLength(sample.getLength()); buffer.setFormat(format); buffer.setSequenceNumber(seqNo++); buffer.setFlags(Buffer.FLAG_SYSTEM_TIME | Buffer.FLAG_LIVE_DATA); buffer.setTimeStamp(sample.getTimeStamp()); return buffer; } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/stream/MediaRendererStream.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.stream; import com.orangelabs.rcs.core.ims.protocol.rtp.media.MediaOutput; import com.orangelabs.rcs.core.ims.protocol.rtp.media.MediaSample; import com.orangelabs.rcs.core.ims.protocol.rtp.util.Buffer; import com.orangelabs.rcs.utils.logger.Logger; /** * Media renderer stream * * @author jexa7410 */ public class MediaRendererStream implements ProcessorOutputStream { /** * Media renderer */ private MediaOutput renderer; /** * The logger */ private final Logger logger = Logger.getLogger(this.getClass().getName()); /** * Constructor * * @param renderer Media renderer */ public MediaRendererStream(MediaOutput renderer) { this.renderer = renderer; } /** * Open the output stream * * @throws Exception */ public void open() throws Exception { try { renderer.open(); if (logger.isActivated()) { logger.debug("Media renderer stream openned"); } } catch(Exception e) { if (logger.isActivated()) { logger.error("Media renderer stream failed", e); } throw e; } } /** * Close the output stream */ public void close() { renderer.close(); if (logger.isActivated()) { logger.debug("Media renderer stream closed"); } } /** * Write to the stream without blocking * * @param buffer Input buffer * @throws Exception */ public void write(Buffer buffer) throws Exception { MediaSample sample = new MediaSample((byte[])buffer.getData(), buffer.getTimeStamp()); renderer.writeSample(sample); } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/stream/ProcessorInputStream.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.stream; import com.orangelabs.rcs.core.ims.protocol.rtp.util.Buffer; /** * Processor input stream */ public interface ProcessorInputStream { /** * Open the input stream * * @throws Exception */ public void open() throws Exception; /** * Close the input stream */ public void close(); /** * Read from the input stream without blocking * * @return Buffer * @throws Exception */ public Buffer read() throws Exception; } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/stream/ProcessorOutputStream.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.stream; import com.orangelabs.rcs.core.ims.protocol.rtp.util.Buffer; /** * Processor output stream */ public interface ProcessorOutputStream { /** * Open the output stream * * @throws Exception */ public void open() throws Exception; /** * Close from the output stream */ public void close(); /** * Write to the stream without blocking * * @param buffer Input buffer * @throws Exception */ public void write(Buffer buffer) throws Exception; } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/stream/RtpInputStream.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.stream; import com.orangelabs.rcs.core.ims.protocol.rtp.core.RtcpPacketReceiver; import com.orangelabs.rcs.core.ims.protocol.rtp.core.RtcpSession; import com.orangelabs.rcs.core.ims.protocol.rtp.core.RtpPacket; import com.orangelabs.rcs.core.ims.protocol.rtp.core.RtpPacketReceiver; import com.orangelabs.rcs.core.ims.protocol.rtp.format.Format; import com.orangelabs.rcs.core.ims.protocol.rtp.util.Buffer; import com.orangelabs.rcs.utils.logger.Logger; /** * RTP input stream * * @author jexa7410 */ public class RtpInputStream implements ProcessorInputStream { /** * Local port */ private int localPort; /** * RTP receiver */ private RtpPacketReceiver rtpReceiver = null; /** * RTCP receiver */ private RtcpPacketReceiver rtcpReceiver = null; /** * Input buffer */ private Buffer buffer = new Buffer(); /** * Input format */ private Format inputFormat = null; /** * RTCP Session */ private RtcpSession rtcpSession = null; /** * The logger */ private final Logger logger = Logger.getLogger(this.getClass().getName()); /** * Constructor * * @param localPort Local port * @param inputFormat Input format */ public RtpInputStream(int localPort, Format inputFormat) { this.localPort = localPort; this.inputFormat = inputFormat; rtcpSession = new RtcpSession(false, 16000); } /** * Open the input stream * * @throws Exception */ public void open() throws Exception { // Create the RTP receiver rtpReceiver = new RtpPacketReceiver(localPort, rtcpSession); // Create the RTCP receiver rtcpReceiver = new RtcpPacketReceiver(localPort + 1, rtcpSession); rtcpReceiver.start(); } /** * Close the input stream */ public void close() { try { // Close the RTP receiver if (rtpReceiver != null) { rtpReceiver.close(); } // Close the RTCP receiver if (rtcpReceiver != null) { rtcpReceiver.close(); } } catch(Exception e) { if (logger.isActivated()) { logger.error("Can't close correctly RTP ressources", e); } } } /** * Returns the RTP receiver * * @return RTP receiver */ public RtpPacketReceiver getRtpReceiver() { return rtpReceiver; } /** * Returns the RTCP receiver * * @return RTCP receiver */ public RtcpPacketReceiver getRtcpReceiver() { return rtcpReceiver; } /** * Read from the input stream without blocking * * @return Buffer * @throws Exception */ public Buffer read() throws Exception { // Wait and read a RTP packet RtpPacket rtpPacket = rtpReceiver.readRtpPacket(); if (rtpPacket == null) { return null; } // Create a buffer buffer.setData(rtpPacket.data); buffer.setLength(rtpPacket.payloadlength); buffer.setOffset(0); buffer.setFormat(inputFormat); buffer.setSequenceNumber(rtpPacket.seqnum); buffer.setFlags(Buffer.FLAG_RTP_MARKER | Buffer.FLAG_RTP_TIME); buffer.setRTPMarker(rtpPacket.marker!=0); buffer.setTimeStamp(rtpPacket.timestamp); // Set inputFormat back to null inputFormat = null; return buffer; } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/util/Buffer.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.util; import com.orangelabs.rcs.core.ims.protocol.rtp.format.Format; /** * Buffer * * @author jexa7410 */ public class Buffer { /** * Indicates that this buffer marks the end of media for the * data stream */ public final static int FLAG_EOM = (1 << 0); /** * Indicates that the media data should be ignored */ public final static int FLAG_DISCARD = (1 << 1); /** * Indicates that the buffer carries a time stamp that's relative to * the SystemTimeBase. This flag is generally set for data transferred * from hardware capture source that uses the system clock. */ public final static int FLAG_SYSTEM_TIME = (1 << 7); /** * This is a marker bit for RTP */ public final static int FLAG_RTP_MARKER = (1 << 11); /** * Indicates that the buffer carries a time stamp that's in RTP (NTP) * time units */ public final static int FLAG_RTP_TIME = (1 << 12); /** * Indicates that the data is arriving from a live (real-time) source. */ public final static int FLAG_LIVE_DATA = (1 << 15); /** * Default value if the time stamp of the media is not known */ public final static long TIME_UNKNOWN = -1L; /** * Default value if the sequence number is not known */ public final static long SEQUENCE_UNKNOWN = Long.MAX_VALUE - 1; /** * The time stamp of the data in nanoseconds */ protected long timeStamp = TIME_UNKNOWN; /** * The format of the data chunk */ protected Format format = null; /** * States how many samples are valid in the array of data */ protected int length = 0; /** * Starting point (offset) into the array where the valid data begins */ protected int offset = 0; /** * A flag mask that describes the boolean attributes of the buffer */ protected int flags = 0; /** * The duration of the data in the buffer in nanoseconds */ protected long duration = TIME_UNKNOWN; /** * Media data chunk */ protected Object data = null; /** * The sequence number */ protected long sequenceNumber = SEQUENCE_UNKNOWN; /** * Get the data format * * @return Format */ public Format getFormat() { return format; } /** * Set the data format * * @param format New format */ public void setFormat(Format format) { this.format = format; } /** * Get the flag mask * * @return Flag */ public int getFlags() { return flags; } /** * Set the flag mask * * @param flags New flags */ public void setFlags(int flags) { this.flags = flags; } /** * Check if it's the end of the media stream * * @return Boolean */ public boolean isEOM() { return (flags & FLAG_EOM) != 0; } /** * Set the EOM flag * * @param eom EOM status flag */ public void setEOM(boolean eom) { if (eom) flags |= FLAG_EOM; else flags &= ~FLAG_EOM; } /** * Check if the RTP marker is set * * @return Boolean */ public boolean isRTPMarkerSet() { return (flags & FLAG_RTP_MARKER) != 0; } /** * Set the RTP marker * * @param marker RTP marker flag */ public void setRTPMarker(boolean marker) { if (marker) flags |= FLAG_RTP_MARKER; else flags &= ~FLAG_RTP_MARKER; } /** * Check whether or not this buffer is to be discarded * * @return Boolean */ public boolean isDiscard() { return (flags & FLAG_DISCARD) != 0; } /** * Set the discard flag * * @param discard Discard flag. */ public void setDiscard(boolean discard) { if (discard) flags |= FLAG_DISCARD; else flags &= ~FLAG_DISCARD; } /** * Get the internal data that holds the media chunk * * @return Data */ public Object getData() { return data; } /** * Set the internal data that holds the media chunk * * @param data Data */ public void setData(Object data) { this.data = data; } /** * Get the length of the valid data in the buffer * * @return The length of the valid data */ public int getLength() { return length; } /** * Set the length of the valid data stored in the buffer * * @param length The length of the valid data */ public void setLength(int length) { this.length = length; } /** * Get the offset into the data array where the valid data begins * * @return Offset */ public int getOffset() { return offset; } /** * Set the offset * * @param offset The starting point for the valid data */ public void setOffset(int offset) { this.offset = offset; } /** * Get the time stamp * * @return Time stamp in nanoseconds. */ public long getTimeStamp() { return timeStamp; } /** * Set the time stamp * * @param timeStamp Time stamp in nanoseconds */ public void setTimeStamp(long timeStamp) { this.timeStamp = timeStamp; } /** * Get the duration * * @return Duration in nanoseconds */ public long getDuration() { return duration; } /** * Set the duration * * @param duration Duration */ public void setDuration(long duration) { this.duration = duration; } /** * Set the sequence number * * @param number Sequence number */ public void setSequenceNumber(long number) { sequenceNumber = number; } /** * Ges the sequence number * * @return Sequence number */ public long getSequenceNumber() { return sequenceNumber; } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/util/Packet.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.util; /** * Generic packet * * @author jexa7410 */ public class Packet { /** * Data */ public byte[] data; /** * Packet length */ public int length; /** * Offset */ public int offset; /** * Received at */ public long receivedAt; /** * Constructor */ public Packet() { } /** * Constructor * * @param packet Packet */ public Packet(Packet packet) { data = packet.data; length = packet.length; offset = packet.offset; receivedAt = packet.receivedAt; } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/util/SystemTimeBase.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.core.ims.protocol.rtp.util; /** * Time base */ public class SystemTimeBase { /** * Offset time (start-up time) */ private static long offset = System.currentTimeMillis() * 1000000L; /** * Returns a time base value in nanoseconds * * @return Time */ public long getTime() { return (System.currentTimeMillis() * 1000000L) - offset; } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/platform/AndroidFactory.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.platform; import android.content.Context; // import com.orangelabs.rcs.platform.file.FileFactory; import com.orangelabs.rcs.platform.network.NetworkFactory; import com.orangelabs.rcs.platform.registry.RegistryFactory; /** * Android platform * * @author jexa7410 */ public class AndroidFactory { /** * Android application context */ private static Context context = null; /** * Returns the application context * * @return Context */ public static Context getApplicationContext() { return context; } /** * Load factory * * @param context Context */ public static void setApplicationContext(Context context) { AndroidFactory.context = context; try { NetworkFactory.loadFactory("com.orangelabs.rcs.platform.network.AndroidNetworkFactory"); RegistryFactory.loadFactory("com.orangelabs.rcs.platform.registry.AndroidRegistryFactory"); // FileFactory.loadFactory("com.orangelabs.rcs.platform.file.AndroidFileFactory"); } catch(FactoryException e) { e.printStackTrace(); } } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/platform/FactoryException.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.platform; /** * Factory exception * * @author JM. Auffret */ public class FactoryException extends java.lang.Exception { static final long serialVersionUID = 1L; /** * Constructor * * @param error Error message */ public FactoryException(String error) { super(error); } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/platform/file/FileDescription.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.platform.file; /** * File description * * @author jexa7410 */ public class FileDescription { /** * Name */ private String name; /** * Size */ private long size = -1; /** * Directory */ private boolean directory = false; /** * Constructor */ public FileDescription(String name, long size) { this.name = name; this.size = size; } /** * Constructor */ public FileDescription(String name, long size, boolean directory) { this.name = name; this.size = size; this.directory = directory; } /** * Returns the size of the file * * @return File size */ public long getSize() { return size; } /** * Returns the name of the file * * @return File name */ public String getName() { return name; } /** * Is a directory * * @return Boolean */ public boolean isDirectory() { return directory; } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/platform/file/FileFactory.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.platform.file; import com.orangelabs.rcs.platform.FactoryException; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; /** * File factory * * @author jexa7410 */ public abstract class FileFactory { /** * Current platform factory */ private static FileFactory factory = null; /** * Load the factory * * @param classname Factory classname * @throws Exception */ public static void loadFactory(String classname) throws FactoryException { if (factory != null) { return; } try { factory = (FileFactory)Class.forName(classname).newInstance(); } catch(Exception e) { throw new FactoryException("Can't load the factory " + classname); } } /** * Returns the current factory * * @return Factory */ public static FileFactory getFactory() { return factory; } /** * Open a file input stream * * @param url URL * @return Input stream * @throws IOException */ public abstract InputStream openFileInputStream(String url) throws IOException; /** * Open a file output stream * * @param url URL * @return Output stream * @throws IOException */ public abstract OutputStream openFileOutputStream(String url) throws IOException; /** * Returns the description of a file * * @param url URL of the file * @return File description * @throws IOException */ public abstract FileDescription getFileDescription(String url) throws IOException; /** * Returns the root directory for photos * * @return Directory path */ public abstract String getPhotoRootDirectory(); /** * Returns the root directory for videos * * @return Directory path */ public abstract String getVideoRootDirectory(); /** * Returns the root directory for files * * @return Directory path */ public abstract String getFileRootDirectory(); /** * Update the media storage * * @param url New URL to be added */ public abstract void updateMediaStorage(String url); /** * Returns whether a file exists or not * * @param url Url of the file to check * @return File existence */ public abstract boolean fileExists(String url); /** * Create a directory if not already exist * * @param path Directory path * @return true if the directory exists or is created */ public static boolean createDirectory(String path) { File dir = new File(path); if (!dir.exists()) { if (!dir.mkdirs()) { return false; } } return true; } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/platform/logger/AndroidAppender.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.platform.logger; import android.util.Log; import com.orangelabs.rcs.utils.logger.Appender; import com.orangelabs.rcs.utils.logger.Logger; /** * Android appender * * @author jexa7410 */ public class AndroidAppender extends Appender { /** * Constructor */ public AndroidAppender() { super(); } /** * Print a trace * * @param classname Classname * @param level Trace level * @param trace Trace */ public synchronized void printTrace(String classname, int level, String trace) { classname = "[RCS][" + classname + "]"; if (level == Logger.INFO_LEVEL) { Log.i(classname, trace); } else if (level == Logger.WARN_LEVEL) { Log.w(classname, trace); } else if (level == Logger.ERROR_LEVEL) { Log.e(classname, trace); } else if (level == Logger.FATAL_LEVEL) { Log.e(classname, trace); } else { Log.v(classname, trace); } } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/platform/network/AndroidDatagramConnection.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.platform.network; import java.io.IOException; import java.net.DatagramPacket; import java.net.DatagramSocket; import java.net.InetAddress; /** * Android datagram server connection * * @author jexa7410 */ public class AndroidDatagramConnection implements DatagramConnection { /** * Datagram connection */ private DatagramSocket connection = null; /** * Constructor */ public AndroidDatagramConnection() { } /** * Open the datagram connection * * @throws IOException */ public void open() throws IOException { connection = new DatagramSocket(); } /** * Open the datagram connection * * @param port Local port * @throws IOException */ public void open(int port) throws IOException { connection = new DatagramSocket(port); } /** * Close the datagram connection * * @throws IOException */ public void close() throws IOException { if (connection != null) { connection.close(); connection = null; } } /** * Receive data with a specific buffer size * * @param bufferSize Buffer size * @return Byte array * @throws IOException */ public byte[] receive(int bufferSize) throws IOException { if (connection != null) { byte[] buf = new byte[bufferSize]; DatagramPacket packet = new DatagramPacket(buf, buf.length); connection.receive(packet); int packetLength = packet.getLength(); byte[] bytes = packet.getData(); byte[] data = new byte[packetLength]; System.arraycopy(bytes, 0, data, 0, packetLength); return data; } else { throw new IOException("Connection not openned"); } } /** * Receive data * * @return Byte array * @throws IOException */ public byte[] receive() throws IOException { return receive(DatagramConnection.DEFAULT_DATAGRAM_SIZE); } /** * Send data * * @param remoteAddr Remote address * @param remotePort Remote port * @param data Data as byte array * @throws IOException */ public void send(String remoteAddr, int remotePort, byte[] data) throws IOException { if (data == null) { return; } if (connection != null) { InetAddress address = InetAddress.getByName(remoteAddr); DatagramPacket packet = new DatagramPacket(data, data.length, address, remotePort); connection.send(packet); } else { throw new IOException("Connection not openned"); } } /** * Returns the local address * * @return Address * @throws IOException */ public String getLocalAddress() throws IOException { if (connection != null) { return connection.getLocalAddress().getHostAddress(); } else { throw new IOException("Connection not openned"); } } /** * Returns the local port * * @return Port * @throws IOException */ public int getLocalPort() throws IOException { if (connection != null) { return connection.getLocalPort(); } else { throw new IOException("Connection not openned"); } } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/platform/network/AndroidHttpConnection.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.platform.network; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.net.HttpURLConnection; import java.net.URL; /** * Android HTTP connection * * @author jexa7410 */ public class AndroidHttpConnection implements HttpConnection { /** * HTTP connection */ private HttpURLConnection connection = null; /** * Open the HTTP connection * * @param url Remote URL * @throws IOException */ public void open(String url) throws IOException { URL urlConn = new URL(url); connection = (HttpURLConnection)urlConn.openConnection(); connection.connect(); } /** * Close the HTTP connection * * @throws IOException */ public void close() throws IOException { if (connection != null) { connection.disconnect(); } } /** * HTTP GET request * * @return Response * @throws IOException */ public ByteArrayOutputStream get() throws IOException { if (connection != null) { return sendHttpRequest(HttpConnection.GET_METHOD); } else { throw new IOException("Connection not openned"); } } /** * HTTP POST request * * @return Response * @throws IOException */ public ByteArrayOutputStream post() throws IOException { if (connection != null) { return sendHttpRequest(HttpConnection.POST_METHOD); } else { throw new IOException("Connection not openned"); } } /** * Send HTTP request * * @param method HTTP method * @return Response * @throws IOException */ private ByteArrayOutputStream sendHttpRequest(String method) throws IOException { connection.setRequestMethod(method); int rc = connection.getResponseCode(); if (rc != HttpURLConnection.HTTP_OK) { throw new IOException("HTTP error " + rc); } InputStream inputStream = connection.getInputStream(); ByteArrayOutputStream result = new ByteArrayOutputStream(); int ch; while((ch = inputStream.read()) != -1) { result.write(ch); } inputStream.close(); return result; } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/platform/network/AndroidNetworkFactory.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.platform.network; import java.net.InetAddress; import java.net.NetworkInterface; import java.util.Enumeration; /** * Android network factory * * @author jexa7410 */ public class AndroidNetworkFactory extends NetworkFactory { /** * Returns the local IP address * * @return IP address */ public String getLocalIpAddress() { try { for (Enumeration en = NetworkInterface.getNetworkInterfaces(); en.hasMoreElements();) { NetworkInterface intf = (NetworkInterface)en.nextElement(); for (Enumeration addr = intf.getInetAddresses(); addr.hasMoreElements();) { InetAddress inetAddress = (InetAddress)addr.nextElement(); if (!inetAddress.isLoopbackAddress() && !inetAddress.isLinkLocalAddress()) { return inetAddress.getHostAddress().toString(); } } } return null; } catch(Exception e) { return null; } } /** * Create a datagram connection * * @return Datagram connection */ public DatagramConnection createDatagramConnection() { return new AndroidDatagramConnection(); } /** * Create a socket client connection * * @return Socket connection */ public SocketConnection createSocketClientConnection() { return new AndroidSocketConnection(); } /** * Create a socket server connection * * @return Socket server connection */ public SocketServerConnection createSocketServerConnection() { return new AndroidSocketServerConnection(); } /** * Create an HTTP connection * * @return HTTP connection */ public HttpConnection createHttpConnection() { return new AndroidHttpConnection(); } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/platform/network/AndroidSocketConnection.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.platform.network; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.net.Socket; /** * Android socket connection * * @author jexa7410 */ public class AndroidSocketConnection implements SocketConnection { /** * Socket connection */ private Socket socket = null; /** * Constructor */ public AndroidSocketConnection() { } /** * Constructor * * @param socket Socket */ public AndroidSocketConnection(Socket socket) { this.socket = socket; } /** * Open the socket * * @param remoteAddr Remote address * @param remotePort Remote port * @throws IOException */ public void open(String remoteAddr, int remotePort) throws IOException { socket = new Socket(remoteAddr, remotePort); } /** * Close the socket * * @throws IOException */ public void close() throws IOException { if (socket != null) { socket.close(); socket = null; } } /** * Returns the socket input stream * * @return Input stream * @throws IOException */ public InputStream getInputStream() throws IOException { if (socket != null) { return socket.getInputStream(); } else { throw new IOException("Connection not openned"); } } /** * Returns the socket output stream * * @return Output stream * @throws IOException */ public OutputStream getOutputStream() throws IOException { if (socket != null) { return socket.getOutputStream(); } else { throw new IOException("Connection not openned"); } } /** * Returns the remote address of the connection * * @return Address * @throws IOException */ public String getRemoteAddress() throws IOException { if (socket != null) { return socket.getInetAddress().getHostAddress(); } else { throw new IOException("Connection not openned"); } } /** * Returns the remote port of the connection * * @return Port * @throws IOException */ public int getRemotePort() throws IOException { if (socket != null) { return socket.getPort(); } else { throw new IOException("Connection not openned"); } } /** * Returns the local address of the connection * * @return Address * @throws IOException */ public String getLocalAddress() throws IOException { if (socket != null) { return socket.getLocalAddress().getHostAddress(); } else { throw new IOException("Connection not openned"); } } /** * Returns the local port of the connection * * @return Port * @throws IOException */ public int getLocalPort() throws IOException { if (socket != null) { return socket.getLocalPort(); } else { throw new IOException("Connection not openned"); } } /** * Get the timeout for this socket during which a reading * operation shall block while waiting for data * * @return Timeout in milliseconds * @throws IOException */ public int getSoTimeout() throws IOException { if (socket != null) { return socket.getSoTimeout(); } else { throw new IOException("Connection not openned"); } } /** * Set the timeout for this socket during which a reading * operation shall block while waiting for data * * @param timeout Timeout in milliseconds * @throws IOException */ public void setSoTimeout(int timeout) throws IOException { if (socket != null) { socket.setSoTimeout(timeout); } else { throw new IOException("Connection not openned"); } } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/platform/network/AndroidSocketServerConnection.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.platform.network; import java.io.IOException; import java.net.ServerSocket; import java.net.Socket; import com.orangelabs.rcs.utils.logger.Logger; /** * Android socket connection * * @author jexa7410 */ public class AndroidSocketServerConnection implements SocketServerConnection { /** * Socket server connection */ private ServerSocket acceptSocket = null; /** * The logger */ private Logger logger = Logger.getLogger(this.getClass().getName()); /** * Constructor */ public AndroidSocketServerConnection() { } /** * Open the socket * * @param port Local port * @throws IOException */ public void open(int port) throws IOException { acceptSocket = new ServerSocket(port); } /** * Close the socket * * @throws IOException */ public void close() throws IOException { if (acceptSocket != null) { acceptSocket.close(); acceptSocket = null; } } /** * Accept connection * * @return Socket connection * @throws IOException */ public SocketConnection acceptConnection() throws IOException { if (acceptSocket != null) { if (logger.isActivated()) { logger.debug("Socket serverSocket is waiting for incoming connection"); } Socket socket = acceptSocket.accept(); return new AndroidSocketConnection(socket); } else { throw new IOException("Connection not openned"); } } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/platform/network/DatagramConnection.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.platform.network; import java.io.IOException; /** * Datagram connection * * @author jexa7410 */ public interface DatagramConnection { /** * Default datagram packet size */ public static int DEFAULT_DATAGRAM_SIZE = 4096 * 8; /** * Open the datagram connection * * @throws IOException */ public void open() throws IOException; /** * Open the datagram connection * * @param port Local port * @throws IOException */ public void open(int port) throws IOException; /** * Close the datagram connection * * @throws IOException */ public void close() throws IOException; /** * Send data * * @param remoteAddr Remote address * @param remotePort Remote port * @param data Data as byte array * @throws IOException */ public void send(String remoteAddr, int remotePort, byte[] data) throws IOException; /** * Receive data * * @return Byte array * @throws IOException */ public byte[] receive() throws IOException; /** * Receive data with a specific buffer size * * @param bufferSize Buffer size * @return Byte array * @throws IOException */ public byte[] receive(int bufferSize) throws IOException; /** * Returns the local address * * @return Address * @throws IOException */ public String getLocalAddress() throws IOException; /** * Returns the local port * * @return Port * @throws IOException */ public int getLocalPort() throws IOException; } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/platform/network/HttpConnection.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.platform.network; import java.io.ByteArrayOutputStream; import java.io.IOException; /** * HTTP connection * * @author jexa7410 */ public interface HttpConnection { /** * GET method */ public final static String GET_METHOD = "GET"; /** * POST method */ public final static String POST_METHOD = "POST"; /** * Open the HTTP connection * * @param url Remote URL * @throws IOException */ public void open(String url) throws IOException; /** * Close the HTTP connection * * @throws IOException */ public void close() throws IOException; /** * HTTP GET request * * @return Response * @throws IOException */ public ByteArrayOutputStream get() throws IOException; /** * HTTP POST request * * @return Response * @throws IOException */ public ByteArrayOutputStream post() throws IOException; } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/platform/network/NetworkFactory.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.platform.network; import com.orangelabs.rcs.platform.FactoryException; /** * Network factory * * @author jexa7410 */ public abstract class NetworkFactory { /** * Current platform factory */ private static NetworkFactory factory = null; /** * Load the factory * * @param classname Factory classname * @throws Exception */ public static void loadFactory(String classname) throws FactoryException { if (factory != null) { return; } try { factory = (NetworkFactory)Class.forName(classname).newInstance(); } catch(Exception e) { throw new FactoryException("Can't load the factory " + classname); } } /** * Returns the current factory * * @return Factory */ public static NetworkFactory getFactory() { return factory; } /** * Returns the local IP address * * @return Address */ public abstract String getLocalIpAddress(); /** * Create a datagram connection * * @return Datagram connection */ public abstract DatagramConnection createDatagramConnection(); /** * Create a socket client connection * * @return Socket connection */ public abstract SocketConnection createSocketClientConnection(); /** * Create a socket server connection * * @return Socket server connection */ public abstract SocketServerConnection createSocketServerConnection(); /** * Create an HTTP connection * * @return HTTP connection */ public abstract HttpConnection createHttpConnection(); } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/platform/network/SocketConnection.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.platform.network; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; /** * Socket client connection * * @author jexa7410 */ public interface SocketConnection { /** * Open the socket * * @param remoteAddr Remote address * @param remotePort Remote port * @throws IOException */ public void open(String remoteAddr, int remotePort) throws IOException; /** * Close the socket * * @throws IOException */ public void close() throws IOException; /** * Returns the socket input stream * * @return Input stream * @throws IOException */ public InputStream getInputStream() throws IOException; /** * Returns the socket output stream * * @return Output stream * @throws IOException */ public OutputStream getOutputStream() throws IOException; /** * Returns the remote address of the connection * * @return Address * @throws IOException */ public String getRemoteAddress() throws IOException; /** * Returns the remote port of the connection * * @return Port * @throws IOException */ public int getRemotePort() throws IOException; /** * Returns the local address of the connection * * @return Address * @throws IOException */ public String getLocalAddress() throws IOException; /** * Returns the local port of the connection * * @return Port * @throws IOException */ public int getLocalPort() throws IOException; /** * Get the timeout for this socket during which a reading * operation shall block while waiting for data * * @return Milliseconds * @throws IOException */ public int getSoTimeout() throws IOException; /** * Set the timeout for this socket during which a reading * operation shall block while waiting for data * * @param timeout Timeout in milliseconds * @throws IOException */ public void setSoTimeout(int timeout) throws IOException; } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/platform/network/SocketServerConnection.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.platform.network; import java.io.IOException; /** * Socket server connection * * @author jexa7410 */ public interface SocketServerConnection { /** * Open the socket * * @param port Local port * @throws IOException */ public void open(int port) throws IOException; /** * Close the socket * * @throws IOException */ public void close() throws IOException; /** * Accept connection * * @return Socket connection * @throws IOException */ public SocketConnection acceptConnection() throws IOException; } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/platform/registry/AndroidRegistryFactory.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.platform.registry; import com.orangelabs.rcs.core.CoreException; import com.orangelabs.rcs.platform.AndroidFactory; import android.app.Activity; import android.content.SharedPreferences; /** * Android registry factory * * @author jexa7410 */ public class AndroidRegistryFactory extends RegistryFactory { /** * RCS registry name */ public static final String RCS_PREFS = "RCS"; /** * Shared preference */ private SharedPreferences preferences; /** * Constructor * * @throws CoreException */ public AndroidRegistryFactory() throws CoreException { super(); if (AndroidFactory.getApplicationContext() == null) { throw new CoreException("Application context not initialized"); } preferences = AndroidFactory.getApplicationContext().getSharedPreferences(RCS_PREFS, Activity.MODE_PRIVATE); } /** * Read a string value in the registry * * @param key Key name to be read * @param defaultValue Default value * @return String */ public String readString(String key, String defaultValue) { return preferences.getString(key, defaultValue); } /** * Write a string value in the registry * * @param key Key name to be updated * @param value New value */ public void writeString(String key, String value) { SharedPreferences.Editor editor = preferences.edit(); editor.putString(key, value); editor.commit(); } /** * Read an integer value in the registry * * @param key Key name to be read * @param defaultValue Default value * @return Integer */ public int readInteger(String key, int defaultValue) { return preferences.getInt(key, defaultValue); } /** * Write an integer value in the registry * * @param key Key name to be updated * @param value New value */ public void writeInteger(String key, int value) { SharedPreferences.Editor editor = preferences.edit(); editor.putInt(key, value); editor.commit(); } /** * Read a long value in the registry * * @param key Key name to be read * @param defaultValue Default value * @return Long */ public long readLong(String key, long defaultValue) { return preferences.getLong(key, defaultValue); } /** * Write a long value in the registry * * @param key Key name to be updated * @param value New value */ public void writeLong(String key, long value) { SharedPreferences.Editor editor = preferences.edit(); editor.putLong(key, value); editor.commit(); } /** * Read a boolean value in the registry * * @param key Key name to be read * @param defaultValue Default value * @return Boolean */ public boolean readBoolean(String key, boolean defaultValue) { return preferences.getBoolean(key, defaultValue); } /** * Write a boolean value in the registry * * @param key Key name to be updated * @param value New value */ public void writeBoolean(String key, boolean value) { SharedPreferences.Editor editor = preferences.edit(); editor.putBoolean(key, value); editor.commit(); } /** * Remove a parameter in the registry * * @param key Key name to be removed */ public void removeParameter(String key) { SharedPreferences.Editor editor = preferences.edit(); editor.remove(key); editor.commit(); } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/platform/registry/RegistryFactory.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.platform.registry; import com.orangelabs.rcs.platform.FactoryException; /** * Application registry factory * * @author jexa7410 */ public abstract class RegistryFactory { /** * Current platform factory */ private static RegistryFactory factory = null; /** * Load the factory * * @param classname Factory classname * @throws Exception */ public static void loadFactory(String classname) throws FactoryException { if (factory != null) { return; } try { factory = (RegistryFactory)Class.forName(classname).newInstance(); } catch(Exception e) { throw new FactoryException("Can't load the factory " + classname); } } /** * Returns the current factory * * @return Factory */ public static RegistryFactory getFactory() { return factory; } /** * Read a string value in the registry * * @param key Key name to be read * @param defaultValue Default value * @return String */ public abstract String readString(String key, String defaultValue); /** * Write a string value in the registry * * @param key Key name to be updated * @param value New value */ public abstract void writeString(String key, String value); /** * Read an integer value in the registry * * @param key Key name to be read * @param defaultValue Default value * @return Integer */ public abstract int readInteger(String key, int defaultValue); /** * Write an integer value in the registry * * @param key Key name to be updated * @param value New value */ public abstract void writeInteger(String key, int value); /** * Read a long value in the registry * * @param key Key name to be read * @param defaultValue Default value * @return Long */ public abstract long readLong(String key, long defaultValue); /** * Write a long value in the registry * * @param key Key name to be updated * @param value New value */ public abstract void writeLong(String key, long value); /** * Read a boolean value in the registry * * @param key Key name to be read * @param defaultValue Default value * @return Boolean */ public abstract boolean readBoolean(String key, boolean defaultValue); /** * Write a boolean value in the registry * * @param key Key name to be updated * @param value New value */ public abstract void writeBoolean(String key, boolean value); /** * Remove a parameter in the registry * * @param key Key name to be removed */ public abstract void removeParameter(String key); } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/provider/settings/RcsSettings.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.provider.settings; import com.orangelabs.rcs.service.api.client.capability.Capabilities; import android.content.ContentResolver; import android.content.ContentValues; import android.content.Context; import android.database.Cursor; import android.net.Uri; /** * RCS settings * * @author jexa7410 */ public class RcsSettings { /** * Current instance */ private static RcsSettings instance = null; /** * Content resolver */ private ContentResolver cr; /** * Database URI */ private Uri databaseUri = RcsSettingsData.CONTENT_URI; /** * Create instance * * @param ctx Context */ public static synchronized void createInstance(Context ctx) { if (instance == null) { instance = new RcsSettings(ctx); } } /** * Returns instance * * @return Instance */ public static RcsSettings getInstance() { return instance; } /** * Constructor * * @param ctx Application context */ private RcsSettings(Context ctx) { super(); this.cr = ctx.getContentResolver(); } /** * Read a parameter * * @param key Key * @return Value */ public String readParameter(String key) { String result = null; Cursor c = cr.query(databaseUri, null, RcsSettingsData.KEY_KEY + "='" + key + "'", null, null); if ((c != null) && (c.getCount() > 0)) { if (c.moveToFirst()) { result = c.getString(2); } c.close(); } return result; } /** * Write a parameter * * @param key Key * @param value Value */ public void writeParameter(String key, String value) { ContentValues values = new ContentValues(); values.put(RcsSettingsData.KEY_VALUE, value); String where = RcsSettingsData.KEY_KEY + "='" + key + "'"; cr.update(databaseUri, values, where, null); } /** * Is RCS service activated * * @return Boolean */ public boolean isServiceActivated() { boolean result = false; if (instance != null) { result = Boolean.parseBoolean(readParameter(RcsSettingsData.SERVICE_ACTIVATED)); } return result; } /** * Set the RCS service activation state * * @param state State */ public void setServiceActivationState(boolean state) { if (instance != null) { writeParameter(RcsSettingsData.SERVICE_ACTIVATED, Boolean.toString(state)); } } /** * Is RCS service authorized in roaming * * @return Boolean */ public boolean isRoamingAuthorized() { boolean result = false; if (instance != null) { result = Boolean.parseBoolean(readParameter(RcsSettingsData.ROAMING_AUTHORIZED)); } return result; } /** * Set the roaming authorization state * * @param state State */ public void setRoamingAuthorizationState(boolean state) { if (instance != null) { writeParameter(RcsSettingsData.ROAMING_AUTHORIZED, Boolean.toString(state)); } } /** * Get the ringtone for presence invitation * * @return Ringtone URI or null if there is no ringtone */ public String getPresenceInvitationRingtone() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.PRESENCE_INVITATION_RINGTONE); } return result; } /** * Set the presence invitation ringtone * * @param uri Ringtone URI */ public void setPresenceInvitationRingtone(String uri) { if (instance != null) { writeParameter(RcsSettingsData.PRESENCE_INVITATION_RINGTONE, uri); } } /** * Is phone vibrate for presence invitation * * @return Boolean */ public boolean isPhoneVibrateForPresenceInvitation() { boolean result = false; if (instance != null) { result = Boolean.parseBoolean(readParameter(RcsSettingsData.PRESENCE_INVITATION_VIBRATE)); } return result; } /** * Set phone vibrate for presence invitation * * @param vibrate Vibrate state */ public void setPhoneVibrateForPresenceInvitation(boolean vibrate) { if (instance != null) { writeParameter(RcsSettingsData.PRESENCE_INVITATION_VIBRATE, Boolean.toString(vibrate)); } } /** * Get the ringtone for CSh invitation * * @return Ringtone URI or null if there is no ringtone */ public String getCShInvitationRingtone() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.CSH_INVITATION_RINGTONE); } return result; } /** * Set the CSh invitation ringtone * * @param uri Ringtone URI */ public void setCShInvitationRingtone(String uri) { if (instance != null) { writeParameter(RcsSettingsData.CSH_INVITATION_RINGTONE, uri); } } /** * Is phone vibrate for CSh invitation * * @return Boolean */ public boolean isPhoneVibrateForCShInvitation() { boolean result = false; if (instance != null) { result = Boolean.parseBoolean(readParameter(RcsSettingsData.CSH_INVITATION_VIBRATE)); } return result; } /** * Set phone vibrate for CSh invitation * * @param vibrate Vibrate state */ public void setPhoneVibrateForCShInvitation(boolean vibrate) { if (instance != null) { writeParameter(RcsSettingsData.CSH_INVITATION_VIBRATE, Boolean.toString(vibrate)); } } /** * Is phone beep if the CSh available * * @return Boolean */ public boolean isPhoneBeepIfCShAvailable() { boolean result = false; if (instance != null) { result = Boolean.parseBoolean(readParameter(RcsSettingsData.CSH_AVAILABLE_BEEP)); } return result; } /** * Set phone beep if CSh available * * @param beep Beep state */ public void setPhoneBeepIfCShAvailable(boolean beep) { if (instance != null) { writeParameter(RcsSettingsData.CSH_AVAILABLE_BEEP, Boolean.toString(beep)); } } /** * Get the CSh video format * * @return Video format as string */ public String getCShVideoFormat() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.CSH_VIDEO_FORMAT); } return result; } /** * Set the CSh video format * * @param fmt Video format */ public void setCShVideoFormat(String fmt) { if (instance != null) { writeParameter(RcsSettingsData.CSH_VIDEO_FORMAT, fmt); } } /** * Get the CSh video size * * @return Size (e.g. QCIF, QVGA) */ public String getCShVideoSize() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.CSH_VIDEO_SIZE); } return result; } /** * Set the CSh video size * * @param size Video size */ public void setCShVideoSize(String size) { if (instance != null) { writeParameter(RcsSettingsData.CSH_VIDEO_SIZE, size); } } /** * Get the ringtone for file transfer invitation * * @return Ringtone URI or null if there is no ringtone */ public String getFileTransferInvitationRingtone() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.FILETRANSFER_INVITATION_RINGTONE); } return result; } /** * Set the file transfer invitation ringtone * * @param uri Ringtone URI */ public void setFileTransferInvitationRingtone(String uri) { if (instance != null) { writeParameter(RcsSettingsData.FILETRANSFER_INVITATION_RINGTONE, uri); } } /** * Is phone vibrate for file transfer invitation * * @return Boolean */ public boolean isPhoneVibrateForFileTransferInvitation() { boolean result = false; if (instance != null) { result = Boolean.parseBoolean(readParameter(RcsSettingsData.FILETRANSFER_INVITATION_VIBRATE)); } return result; } /** * Set phone vibrate for file transfer invitation * * @param vibrate Vibrate state */ public void setPhoneVibrateForFileTransferInvitation(boolean vibrate) { if (instance != null) { writeParameter(RcsSettingsData.FILETRANSFER_INVITATION_VIBRATE, Boolean.toString(vibrate)); } } /** * Get the ringtone for chat invitation * * @return Ringtone URI or null if there is no ringtone */ public String getChatInvitationRingtone() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.CHAT_INVITATION_RINGTONE); } return result; } /** * Set the chat invitation ringtone * * @param uri Ringtone URI */ public void setChatInvitationRingtone(String uri) { if (instance != null) { writeParameter(RcsSettingsData.CHAT_INVITATION_RINGTONE, uri); } } /** * Is phone vibrate for chat invitation * * @return Boolean */ public boolean isPhoneVibrateForChatInvitation() { boolean result = false; if (instance != null) { result = Boolean.parseBoolean(readParameter(RcsSettingsData.CHAT_INVITATION_VIBRATE)); } return result; } /** * Set phone vibrate for chat invitation * * @param vibrate Vibrate state */ public void setPhoneVibrateForChatInvitation(boolean vibrate) { if (instance != null) { writeParameter(RcsSettingsData.CHAT_INVITATION_VIBRATE, Boolean.toString(vibrate)); } } /** * Is auto accept mode for chat invitations activated * * @return Boolean */ public boolean isAutoAcceptModeForChatInvitation(){ boolean result = false; if (instance != null) { result = Boolean.parseBoolean(readParameter(RcsSettingsData.CHAT_INVITATION_AUTO_ACCEPT)); } return result; } /** * Set auto accept mode for chat invitations * * @param auto Auto accept mode */ public void setAutoAcceptModeForChatInvitation(boolean auto) { if (instance != null) { writeParameter(RcsSettingsData.CHAT_INVITATION_AUTO_ACCEPT, Boolean.toString(auto)); } } /** * Get the pre-defined freetext 1 * * @return String */ public String getPredefinedFreetext1() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.FREETEXT1); } return result; } /** * Set the pre-defined freetext 1 * * @param txt Text */ public void setPredefinedFreetext1(String txt) { if (instance != null) { writeParameter(RcsSettingsData.FREETEXT1, txt); } } /** * Get the pre-defined freetext 2 * * @return String */ public String getPredefinedFreetext2() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.FREETEXT2); } return result; } /** * Set the pre-defined freetext 2 * * @param txt Text */ public void setPredefinedFreetext2(String txt) { if (instance != null) { writeParameter(RcsSettingsData.FREETEXT2, txt); } } /** * Get the pre-defined freetext 3 * * @return String */ public String getPredefinedFreetext3() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.FREETEXT3); } return result; } /** * Set the pre-defined freetext 3 * * @param txt Text */ public void setPredefinedFreetext3(String txt) { if (instance != null) { writeParameter(RcsSettingsData.FREETEXT3, txt); } } /** * Get the pre-defined freetext 4 * * @return String */ public String getPredefinedFreetext4() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.FREETEXT4); } return result; } /** * Set the pre-defined freetext 4 * * @param txt Text */ public void setPredefinedFreetext4(String txt) { if (instance != null) { writeParameter(RcsSettingsData.FREETEXT4, txt); } } /** * Get user profile username (i.e. username part of the IMPU) * * @return Username part of SIP-URI */ public String getUserProfileImsUserName() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.USERPROFILE_IMS_USERNAME); } return result; } /** * Set user profile IMS username (i.e. username part of the IMPU) * * @param value Value */ public void setUserProfileImsUserName(String value) { if (instance != null) { writeParameter(RcsSettingsData.USERPROFILE_IMS_USERNAME, value); } } /** * Get user profile IMS display name associated to IMPU * * @return String */ public String getUserProfileImsDisplayName() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.USERPROFILE_IMS_DISPLAY_NAME); } return result; } /** * Set user profile IMS display name associated to IMPU * * @param value Value */ public void setUserProfileImsDisplayName(String value) { if (instance != null) { writeParameter(RcsSettingsData.USERPROFILE_IMS_DISPLAY_NAME, value); } } /** * Get user profile IMS private Id (i.e. IMPI) * * @return SIP-URI */ public String getUserProfileImsPrivateId() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.USERPROFILE_IMS_PRIVATE_ID); } return result; } /** * Set user profile IMS private Id (i.e. IMPI) * * @param uri SIP-URI */ public void setUserProfileImsPrivateId(String uri) { if (instance != null) { writeParameter(RcsSettingsData.USERPROFILE_IMS_PRIVATE_ID, uri); } } /** * Get user profile IMS password * * @return String */ public String getUserProfileImsPassword() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.USERPROFILE_IMS_PASSWORD); } return result; } /** * Set user profile IMS password * * @param pwd Password */ public void setUserProfileImsPassword(String pwd) { if (instance != null) { writeParameter(RcsSettingsData.USERPROFILE_IMS_PASSWORD, pwd); } } /** * Get user profile IMS home domain * * @return Domain */ public String getUserProfileImsDomain() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.USERPROFILE_IMS_HOME_DOMAIN); } return result; } /** * Set user profile IMS home domain * * @param domain Domain */ public void setUserProfileImsDomain(String domain) { if (instance != null) { writeParameter(RcsSettingsData.USERPROFILE_IMS_HOME_DOMAIN, domain); } } /** * Get IMS proxy address for mobile access * * @return Address */ public String getImsProxyAddrForMobile() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.IMS_PROXY_ADDR_MOBILE); } return result; } /** * Set IMS proxy address for mobile access * * @param addr Address */ public void setImsProxyAddrForMobile(String addr) { if (instance != null) { writeParameter(RcsSettingsData.IMS_PROXY_ADDR_MOBILE, addr); } } /** * Get IMS proxy port for mobile access * * @return Port */ public int getImsProxyPortForMobile() { int result = 5060; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.IMS_PROXY_PORT_MOBILE)); } catch(Exception e) {} } return result; } /** * Set IMS proxy port for mobile access * * @param port Port number */ public void setImsProxyPortForMobile(int port) { if (instance != null) { writeParameter(RcsSettingsData.IMS_PROXY_PORT_MOBILE, "" + port); } } /** * Get IMS proxy address for Wi-Fi access * * @return Address */ public String getImsProxyAddrForWifi() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.IMS_PROXY_ADDR_WIFI); } return result; } /** * Set IMS proxy address for Wi-Fi access * * @param addr Address */ public void setImsProxyAddrForWifi(String addr) { if (instance != null) { writeParameter(RcsSettingsData.IMS_PROXY_ADDR_WIFI, addr); } } /** * Get IMS proxy port for Wi-Fi access * * @return Port */ public int getImsProxyPortForWifi() { int result = 5060; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.IMS_PROXY_PORT_WIFI)); } catch(Exception e) {} } return result; } /** * Set IMS proxy port for Wi-Fi access * * @param port Port number */ public void setImsProxyPortForWifi(int port) { if (instance != null) { writeParameter(RcsSettingsData.IMS_PROXY_PORT_WIFI, "" + port); } } /** * Get XDM server address * * @return Address as :/ */ public String getXdmServer() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.XDM_SERVER); } return result; } /** * Set XDM server address * * @param addr Address as :/ */ public void setXdmServer(String addr) { if (instance != null) { writeParameter(RcsSettingsData.XDM_SERVER, addr); } } /** * Get XDM server login * * @return String value */ public String getXdmLogin() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.XDM_LOGIN); } return result; } /** * Set XDM server login * * @param value Value */ public void setXdmLogin(String value) { if (instance != null) { writeParameter(RcsSettingsData.XDM_LOGIN, value); } } /** * Get XDM server password * * @return String value */ public String getXdmPassword() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.XDM_PASSWORD); } return result; } /** * Set XDM server password * * @param value Value */ public void setXdmPassword(String value) { if (instance != null) { writeParameter(RcsSettingsData.XDM_PASSWORD, value); } } /** * Get IM conference URI * * @return SIP-URI */ public String getImConferenceUri() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.IM_CONF_URI); } return result; } /** * Set IM conference URI * * @param uri SIP-URI */ public void setImConferenceUri(String uri) { if (instance != null) { writeParameter(RcsSettingsData.IM_CONF_URI, uri); } } /** * Get end user confirmation request URI * * @return SIP-URI */ public String getEndUserConfirmationRequestUri() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.ENDUSER_CONFIRMATION_URI); } return result; } /** * Set end user confirmation request * * @param uri SIP-URI */ public void setEndUserConfirmationRequestUri(String uri) { if (instance != null) { writeParameter(RcsSettingsData.ENDUSER_CONFIRMATION_URI, uri); } } /** * Get country code * * @return Country code */ public String getCountryCode() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.COUNTRY_CODE); } return result; } /** * Set country code * * @param code Country code */ public void setCountryCode(String code) { if (instance != null) { writeParameter(RcsSettingsData.COUNTRY_CODE, code); } } /** * Get country area code * * @return Area code */ public String getCountryAreaCode() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.COUNTRY_AREA_CODE); } return result; } /** * Set country area code * * @param code Area code */ public void setCountryAreaCode(String code) { if (instance != null) { writeParameter(RcsSettingsData.COUNTRY_AREA_CODE, code); } } /** * Get my capabilities * * @return capability */ public Capabilities getMyCapabilities(){ Capabilities capabilities = new Capabilities(); // Add default capabilities capabilities.setCsVideoSupport(isCsVideoSupported()); capabilities.setFileTransferSupport(isFileTransferSupported()); capabilities.setImageSharingSupport(isImageSharingSupported()); capabilities.setImSessionSupport(isImSessionSupported()); capabilities.setPresenceDiscoverySupport(isPresenceDiscoverySupported()); capabilities.setSocialPresenceSupport(isSocialPresenceSupported()); capabilities.setVideoSharingSupport(isVideoSharingSupported()); capabilities.setTimestamp(System.currentTimeMillis()); // Add extensions String exts = getSupportedRcsExtensions(); if ((exts != null) && (exts.length() > 0)) { String[] ext = exts.split(","); for(int i=0; i < ext.length; i++) { capabilities.addSupportedExtension(ext[i]); } } return capabilities; } /** * Get max photo-icon size * * @return Size in kilobytes */ public int getMaxPhotoIconSize() { int result = 256; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.MAX_PHOTO_ICON_SIZE)); } catch(Exception e) {} } return result; } /** * Get max freetext length * * @return Number of char */ public int getMaxFreetextLength() { int result = 100; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.MAX_FREETXT_LENGTH)); } catch(Exception e) {} } return result; } /** * Get max number of participants in a group chat * * @return Number of participants */ public int getMaxChatParticipants() { int result = 5; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.MAX_CHAT_PARTICIPANTS)); } catch(Exception e) {} } return result; } /** * Get max length of a chat message * * @return Number of char */ public int getMaxChatMessageLength() { int result = 100; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.MAX_CHAT_MSG_LENGTH)); } catch(Exception e) {} } return result; } /** * Get idle duration of a chat session * * @return Duration in seconds */ public int getChatIdleDuration() { int result = 120; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.CHAT_IDLE_DURATION)); } catch(Exception e) {} } return result; } /** * Get max file transfer size * * @return Size in kilobytes */ public int getMaxFileTransferSize() { int result = 2048; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.MAX_FILE_TRANSFER_SIZE)); } catch(Exception e) {} } return result; } /** * Get warning threshold for max file transfer size * * @return Size in kilobytes */ public int getWarningMaxFileTransferSize() { int result = 2048; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.WARN_FILE_TRANSFER_SIZE)); } catch(Exception e) {} } return result; } /** * Get max image share size * * @return Size in kilobytes */ public int getMaxImageSharingSize() { int result = 2048; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.MAX_IMAGE_SHARE_SIZE)); } catch(Exception e) {} } return result; } /** * Get max duration of a video share * * @return Duration in seconds */ public int getMaxVideoShareDuration() { int result = 600; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.MAX_VIDEO_SHARE_DURATION)); } catch(Exception e) {} } return result; } /** * Get max number of simultaneous chat sessions * * @return Number of sessions */ public int getMaxChatSessions() { int result = 1; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.MAX_CHAT_SESSIONS)); } catch(Exception e) {} } return result; } /** * Get max number of simultaneous file transfer sessions * * @return Number of sessions */ public int getMaxFileTransferSessions() { int result = 1; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.MAX_FILE_TRANSFER_SESSIONS)); } catch(Exception e) {} } return result; } /** * Is SMS fallback service activated * * @return Boolean */ public boolean isSmsFallbackServiceActivated() { boolean result = false; if (instance != null) { result = Boolean.parseBoolean(readParameter(RcsSettingsData.SMS_FALLBACK_SERVICE)); } return result; } /** * Is Store & Forward service warning activated * * @return Boolean */ public boolean isStoreForwardWarningActivated() { boolean result = false; if (instance != null) { result = Boolean.parseBoolean(readParameter(RcsSettingsData.WARN_SF_SERVICE)); } return result; } /** * Get IM session start mode * * @return Integer (1: The 200 OK is sent when the receiver starts to type a message back * in the chat window. 2: The 200 OK is sent when the receiver sends a message) */ public int getImSessionStartMode() { int result = 1; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.IM_SESSION_START)); } catch(Exception e) {} } return result; } /** * Get max number of entries per contact in the chat log * * @return Number */ public int getMaxChatLogEntriesPerContact() { int result = 200; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.MAX_CHAT_LOG_ENTRIES)); } catch(Exception e) {} } return result; } /** * Get max number of entries per contact in the richcall log * * @return Number */ public int getMaxRichcallLogEntriesPerContact() { int result = 200; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.MAX_RICHCALL_LOG_ENTRIES)); } catch(Exception e) {} } return result; } /** * Get polling period used before each IMS service check (e.g. test subscription state for presence service) * * @return Period in seconds */ public int getImsServicePollingPeriod(){ int result = 300; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.IMS_SERVICE_POLLING_PERIOD)); } catch(Exception e) {} } return result; } /** * Get default SIP listening port * * @return Port */ public int getSipListeningPort() { int result = 5060; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.SIP_DEFAULT_PORT)); } catch(Exception e) {} } return result; } /** * Get default SIP protocol for mobile * * @return Protocol (udp | tcp | tls) */ public String getSipDefaultProtocolForMobile() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.SIP_DEFAULT_PROTOCOL_FOR_MOBILE); } return result; } /** * Get default SIP protocol for wifi * * @return Protocol (udp | tcp | tls) */ public String getSipDefaultProtocolForWifi() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.SIP_DEFAULT_PROTOCOL_FOR_WIFI); } return result; } /** * Get TLS Certificate root * * @return Path of the certificate */ public String getTlsCertificateRoot() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.TLS_CERTIFICATE_ROOT); } return result; } /** * Get TLS Certificate intermediate * * @return Path of the certificate */ public String getTlsCertificateIntermediate() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.TLS_CERTIFICATE_INTERMEDIATE); } return result; } /** * Get SIP transaction timeout used to wait SIP response * * @return Timeout in seconds */ public int getSipTransactionTimeout() { int result = 30; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.SIP_TRANSACTION_TIMEOUT)); } catch(Exception e) {} } return result; } /** * Get default MSRP port * * @return Port */ public int getDefaultMsrpPort() { int result = 20000; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.MSRP_DEFAULT_PORT)); } catch(Exception e) {} } return result; } /** * Get default RTP port * * @return Port */ public int getDefaultRtpPort() { int result = 10000; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.RTP_DEFAULT_PORT)); } catch(Exception e) {} } return result; } /** * Get MSRP transaction timeout used to wait MSRP response * * @return Timeout in seconds */ public int getMsrpTransactionTimeout() { int result = 5; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.MSRP_TRANSACTION_TIMEOUT)); } catch(Exception e) {} } return result; } /** * Get default expire period for REGISTER * * @return Period in seconds */ public int getRegisterExpirePeriod() { int result = 3600; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.REGISTER_EXPIRE_PERIOD)); } catch(Exception e) {} } return result; } /** * Get registration retry base time * * @return Time in seconds */ public int getRegisterRetryBaseTime() { int result = 30; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.REGISTER_RETRY_BASE_TIME)); } catch(Exception e) {} } return result; } /** * Get registration retry max time * * @return Time in seconds */ public int getRegisterRetryMaxTime() { int result = 1800; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.REGISTER_RETRY_MAX_TIME)); } catch(Exception e) {} } return result; } /** * Get default expire period for PUBLISH * * @return Period in seconds */ public int getPublishExpirePeriod() { int result = 3600; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.PUBLISH_EXPIRE_PERIOD)); } catch(Exception e) {} } return result; } /** * Get revoke timeout before to unrevoke a revoked contact * * @return Timeout in seconds */ public int getRevokeTimeout() { int result = 300; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.REVOKE_TIMEOUT)); } catch(Exception e) {} } return result; } /** * Get IMS authentication procedure for mobile access * * @return Authentication procedure */ public String getImsAuhtenticationProcedureForMobile() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.IMS_AUTHENT_PROCEDURE_MOBILE); } return result; } /** * Get IMS authentication procedure for Wi-Fi access * * @return Authentication procedure */ public String getImsAuhtenticationProcedureForWifi() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.IMS_AUTHENT_PROCEDURE_WIFI); } return result; } /** * Is Tel-URI format used * * @return Boolean */ public boolean isTelUriFormatUsed() { boolean result = false; if (instance != null) { result = Boolean.parseBoolean(readParameter(RcsSettingsData.TEL_URI_FORMAT)); } return result; } /** * Get ringing period * * @return Period in seconds */ public int getRingingPeriod() { int result = 120; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.RINGING_SESSION_PERIOD)); } catch(Exception e) {} } return result; } /** * Get default expire period for SUBSCRIBE * * @return Period in seconds */ public int getSubscribeExpirePeriod() { int result = 3600; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.SUBSCRIBE_EXPIRE_PERIOD)); } catch(Exception e) {} } return result; } /** * Get "Is-composing" timeout for chat service * * @return Timer in seconds */ public int getIsComposingTimeout() { int result = 15; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.IS_COMPOSING_TIMEOUT)); } catch(Exception e) {} } return result; } /** * Get default expire period for INVITE (session refresh) * * @return Period in seconds */ public int getSessionRefreshExpirePeriod() { int result = 3600; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.SESSION_REFRESH_EXPIRE_PERIOD)); } catch(Exception e) {} } return result; } /** * Is permanente state mode activated * * @return Boolean */ public boolean isPermanentStateModeActivated() { boolean result = false; if (instance != null) { result = Boolean.parseBoolean(readParameter(RcsSettingsData.PERMANENT_STATE_MODE)); } return result; } /** * Is trace activated * * @return Boolean */ public boolean isTraceActivated() { boolean result = false; if (instance != null) { result = Boolean.parseBoolean(readParameter(RcsSettingsData.TRACE_ACTIVATED)); } return result; } /** * Get trace level * * @return trace level */ public String getTraceLevel() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.TRACE_LEVEL); } return result; } /** * Is media trace activated * * @return Boolean */ public boolean isSipTraceActivated() { boolean result = false; if (instance != null) { result = Boolean.parseBoolean(readParameter(RcsSettingsData.SIP_TRACE_ACTIVATED)); } return result; } /** * Get SIP trace file * * @return SIP trace file */ public String getSipTraceFile() { String result = "/sdcard/sip.txt"; if (instance != null) { try { result = readParameter(RcsSettingsData.SIP_TRACE_FILE); } catch(Exception e) {} } return result; } /** * Is media trace activated * * @return Boolean */ public boolean isMediaTraceActivated() { boolean result = false; if (instance != null) { result = Boolean.parseBoolean(readParameter(RcsSettingsData.MEDIA_TRACE_ACTIVATED)); } return result; } /** * Get capability refresh timeout used to avoid too many requests in a short time * * @return Timeout in seconds */ public int getCapabilityRefreshTimeout() { int result = 1; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.CAPABILITY_REFRESH_TIMEOUT)); } catch(Exception e) {} } return result; } /** * Get capability expiry timeout used to decide when to refresh contact capabilities * * @return Timeout in seconds */ public int getCapabilityExpiryTimeout() { int result = 3600; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.CAPABILITY_EXPIRY_TIMEOUT)); } catch(Exception e) {} } return result; } /** * Get capability polling period used to refresh contacts capabilities * * @return Timeout in seconds */ public int getCapabilityPollingPeriod() { int result = 3600; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.CAPABILITY_POLLING_PERIOD)); } catch(Exception e) {} } return result; } /** * Is CS video supported * * @return Boolean */ public boolean isCsVideoSupported() { boolean result = false; if (instance != null) { result = Boolean.parseBoolean(readParameter(RcsSettingsData.CAPABILITY_CS_VIDEO)); } return result; } /** * Is file transfer supported * * @return Boolean */ public boolean isFileTransferSupported() { boolean result = false; if (instance != null) { result = Boolean.parseBoolean(readParameter(RcsSettingsData.CAPABILITY_FILE_TRANSFER)); } return result; } /** * Is IM session supported * * @return Boolean */ public boolean isImSessionSupported() { boolean result = false; if (instance != null) { result = Boolean.parseBoolean(readParameter(RcsSettingsData.CAPABILITY_IM_SESSION)); } return result; } /** * Is image sharing supported * * @return Boolean */ public boolean isImageSharingSupported() { boolean result = false; if (instance != null) { result = Boolean.parseBoolean(readParameter(RcsSettingsData.CAPABILITY_IMAGE_SHARING)); } return result; } /** * Is video sharing supported * * @return Boolean */ public boolean isVideoSharingSupported() { boolean result = false; if (instance != null) { result = Boolean.parseBoolean(readParameter(RcsSettingsData.CAPABILITY_VIDEO_SHARING)); } return result; } /** * Is presence discovery supported * * @return Boolean */ public boolean isPresenceDiscoverySupported() { boolean result = false; if (instance != null) { result = Boolean.parseBoolean(readParameter(RcsSettingsData.CAPABILITY_PRESENCE_DISCOVERY)); } return result; } /** * Is social presence supported * * @return Boolean */ public boolean isSocialPresenceSupported() { boolean result = false; if (instance != null) { result = Boolean.parseBoolean(readParameter(RcsSettingsData.CAPABILITY_SOCIAL_PRESENCE)); } return result; } /** * Get supported RCS extensions * * @return List of extensions (semicolon separated) */ public String getSupportedRcsExtensions() { String result = null; if (instance != null) { return readParameter(RcsSettingsData.CAPABILITY_RCS_EXTENSIONS); } return result; } /** * Set supported RCS extensions * * @param extensions List of extensions (semicolon separated) */ public void setSupportedRcsExtensions(String extensions) { if (instance != null) { writeParameter(RcsSettingsData.CAPABILITY_RCS_EXTENSIONS, extensions); } } /** * Is IM always-on thanks to the Store & Forward functionality * * @return Boolean */ public boolean isImAlwaysOn() { boolean result = false; if (instance != null) { result = Boolean.parseBoolean(readParameter(RcsSettingsData.IM_CAPABILITY_ALWAYS_ON)); } return result; } /** * Is IM reports activated * * @return Boolean */ public boolean isImReportsActivated() { boolean result = false; if (instance != null) { result = Boolean.parseBoolean(readParameter(RcsSettingsData.IM_USE_REPORTS)); } return result; } /** * Get network access * * @return Network type */ public int getNetworkAccess() { int result = RcsSettingsData.ANY_ACCESS; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.NETWORK_ACCESS)); } catch(Exception e) {} } return result; } /** * Get SIP timer T1 * * @return Timer in milliseconds */ public int getSipTimerT1() { int result = 2000; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.SIP_TIMER_T1)); } catch(Exception e) {} } return result; } /** * Get SIP timer T2 * * @return Timer in milliseconds */ public int getSipTimerT2() { int result = 16000; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.SIP_TIMER_T2)); } catch(Exception e) {} } return result; } /** * Get SIP timer T4 * * @return Timer in milliseconds */ public int getSipTimerT4() { int result = 17000; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.SIP_TIMER_T4)); } catch(Exception e) {} } return result; } /** * Is SIP keep-alive enabled * * @return Boolean */ public boolean isSipKeepAliveEnabled() { boolean result = true; if (instance != null) { result = Boolean.parseBoolean(readParameter(RcsSettingsData.SIP_KEEP_ALIVE)); } return result; } /** * Get SIP keep-alive period * * @return Period in seconds */ public int getSipKeepAlivePeriod() { int result = 60; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.SIP_KEEP_ALIVE_PERIOD)); } catch(Exception e) {} } return result; } /** * Get APN used to connect to RCS platform * * @return APN (null means any APN may be used to connect to RCS) */ public String getNetworkApn() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.RCS_APN); } return result; } /** * Get operator authorized to connect to RCS platform * * @return SIM operator name (null means any SIM operator is authorized to connect to RCS) */ public String getNetworkOperator() { String result = null; if (instance != null) { result = readParameter(RcsSettingsData.RCS_OPERATOR); } return result; } /** * Is GRUU supported * * @return Boolean */ public boolean isGruuSupported() { boolean result = true; if (instance != null) { result = Boolean.parseBoolean(readParameter(RcsSettingsData.GRUU)); } return result; } /** * Is CPU Always_on activated * * @return Boolean */ public boolean isCpuAlwaysOn() { boolean result = false; if (instance != null) { result = Boolean.parseBoolean(readParameter(RcsSettingsData.CPU_ALWAYS_ON)); } return result; } /** * Get auto configuration mode * * @return Mode */ public int getAutoConfigMode() { int result = RcsSettingsData.NO_AUTO_CONFIG; if (instance != null) { try { result = Integer.parseInt(readParameter(RcsSettingsData.AUTO_CONFIG_MODE)); } catch(Exception e) {} } return result; } /** * Remove user profile information */ public void removeUserProfile() { setServiceActivationState(false); setUserProfileImsUserName(""); setUserProfileImsDomain(""); setUserProfileImsPassword(""); setImsProxyAddrForMobile(""); setImsProxyPortForMobile(5060); setImsProxyAddrForWifi(""); setImsProxyPortForWifi(5060); setUserProfileImsDisplayName(""); setUserProfileImsPrivateId(""); setXdmLogin(""); setXdmPassword(""); setXdmServer(""); } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/provider/settings/RcsSettingsData.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.provider.settings; import android.net.ConnectivityManager; import android.net.Uri; /** * RCS settings data constants * * @author jexa7410 */ public class RcsSettingsData { /** * Database URI */ static final Uri CONTENT_URI = Uri.parse("content://com.orangelabs.rcs.settings/settings"); /** * Column name */ static final String KEY_ID = "_id"; /** * Column name */ static final String KEY_KEY = "key"; /** * Column name */ static final String KEY_VALUE = "value"; // --------------------------------------------------------------------------- // Constants // --------------------------------------------------------------------------- /** * Boolean value "true" */ public static final String TRUE = Boolean.toString(true); /** * Boolean value "false" */ public static final String FALSE = Boolean.toString(false); /** * GIBA authentication */ public static final String GIBA_AUTHENT = "GIBA"; /** * HTTP Digest authentication */ public static final String DIGEST_AUTHENT = "DIGEST"; /** * Any access */ public static final int ANY_ACCESS = -1; /** * Mobile access */ public static final int MOBILE_ACCESS = ConnectivityManager.TYPE_MOBILE; /** * Wi-Fi access */ public static final int WIFI_ACCESS = ConnectivityManager.TYPE_WIFI; /** * Folder path for certificate */ public static final String CERTIFICATE_FOLDER_PATH = "/sdcard/"; /** * File type for certificate */ public static final String CERTIFICATE_FILE_TYPE = ".crt"; /** * No auto config mode */ public static final int NO_AUTO_CONFIG = 0; /** * HTTPS auto config mode */ public static final int HTTPS_AUTO_CONFIG = 1; // --------------------------------------------------------------------------- // UI settings // --------------------------------------------------------------------------- /** * Activate or not the RCS service */ public static final String SERVICE_ACTIVATED = "ServiceActivated"; /** * Roaming authorization parameter which indicates if the RCS service may be used or not in roaming */ public static final String ROAMING_AUTHORIZED = "RoamingAuthorized"; /** * Ringtone which is played when a social presence sharing invitation is received */ public static final String PRESENCE_INVITATION_RINGTONE = "PresenceInvitationRingtone"; /** * Vibrate or not when a social presence sharing invitation is received */ public static final String PRESENCE_INVITATION_VIBRATE = "PresenceInvitationVibrate"; /** * Ringtone which is played when a content sharing invitation is received */ public static final String CSH_INVITATION_RINGTONE = "CShInvitationRingtone"; /** * Vibrate or not when a content sharing invitation is received */ public static final String CSH_INVITATION_VIBRATE = "CShInvitationVibrate"; /** * Make a beep or not when content sharing is available during a call */ public static final String CSH_AVAILABLE_BEEP = "CShAvailableBeep"; /** * Video format for video share */ public static final String CSH_VIDEO_FORMAT = "CShVideoFormat"; /** * Video size for video share */ public static final String CSH_VIDEO_SIZE = "CShVideoSize"; /** * Ringtone which is played when a file transfer invitation is received */ public static final String FILETRANSFER_INVITATION_RINGTONE = "FileTransferInvitationRingtone"; /** * Vibrate or not when a file transfer invitation is received */ public static final String FILETRANSFER_INVITATION_VIBRATE = "FileTransferInvitationVibrate"; /** * Ringtone which is played when a chat invitation is received */ public static final String CHAT_INVITATION_RINGTONE = "ChatInvitationRingtone"; /** * Vibrate or not when a chat invitation is received */ public static final String CHAT_INVITATION_VIBRATE = "ChatInvitationVibrate"; /** * Auto-accept mode for chat invitation */ public static final String CHAT_INVITATION_AUTO_ACCEPT = "ChatInvitationAutoAccept"; /** * Predefined freetext */ public static final String FREETEXT1 = "Freetext1"; /** * Predefined freetext */ public static final String FREETEXT2 = "Freetext2"; /** * Predefined freetext */ public static final String FREETEXT3 = "Freetext3"; /** * Predefined freetext */ public static final String FREETEXT4 = "Freetext4"; // --------------------------------------------------------------------------- // Service settings // --------------------------------------------------------------------------- /** * Max photo-icon size */ public static final String MAX_PHOTO_ICON_SIZE = "MaxPhotoIconSize"; /** * Max length of the freetext */ public static final String MAX_FREETXT_LENGTH = "MaxFreetextLength"; /** * Max number of participants in a group chat */ public static final String MAX_CHAT_PARTICIPANTS = "MaxChatParticipants"; /** * Max length of a chat message */ public static final String MAX_CHAT_MSG_LENGTH = "MaxChatMessageLength"; /** * Idle duration of a chat session */ public static final String CHAT_IDLE_DURATION = "ChatIdleDuration"; /** * Max size of a file transfer */ public static final String MAX_FILE_TRANSFER_SIZE = "MaxFileTransferSize"; /** * Warning threshold for file transfer size */ public static final String WARN_FILE_TRANSFER_SIZE = "WarnFileTransferSize"; /** * Max size of an image share */ public static final String MAX_IMAGE_SHARE_SIZE = "MaxImageShareSize"; /** * Max duration of a video share */ public static final String MAX_VIDEO_SHARE_DURATION = "MaxVideoShareDuration"; /** * Max number of simultaneous chat sessions */ public static final String MAX_CHAT_SESSIONS = "MaxChatSessions"; /** * Max number of simultaneous file transfer sessions */ public static final String MAX_FILE_TRANSFER_SESSIONS = "MaxFileTransferSessions"; /** * Activate or not SMS fallback service */ public static final String SMS_FALLBACK_SERVICE = "SmsFallbackService"; /** * Display a warning if Store & Forward service is activated */ public static final String WARN_SF_SERVICE = "StoreForwardServiceWarning"; /** * Define when the chat receiver sends the 200 OK back to the sender */ public static final String IM_SESSION_START = "ImSessionStart"; /** * Max entries for chat log */ public static final String MAX_CHAT_LOG_ENTRIES = "MaxChatLogEntries"; /** * Max entries for richcall log */ public static final String MAX_RICHCALL_LOG_ENTRIES = "MaxRichcallLogEntries"; // --------------------------------------------------------------------------- // User profile settings // --------------------------------------------------------------------------- /** * IMS username or username part of the IMPU (for HTTP Digest only) */ public static final String USERPROFILE_IMS_USERNAME = "ImsUsername"; /** * IMS display name */ public static final String USERPROFILE_IMS_DISPLAY_NAME = "ImsDisplayName"; /** * IMS private URI or IMPI (for HTTP Digest only) */ public static final String USERPROFILE_IMS_PRIVATE_ID = "ImsPrivateId"; /** * IMS password (for HTTP Digest only) */ public static final String USERPROFILE_IMS_PASSWORD = "ImsPassword"; /** * IMS home domain (for HTTP Digest only) */ public static final String USERPROFILE_IMS_HOME_DOMAIN = "ImsHomeDomain"; /** * P-CSCF or outbound proxy address for mobile access */ public static final String IMS_PROXY_ADDR_MOBILE = "ImsOutboundProxyAddrForMobile"; /** * P-CSCF or outbound proxy port for mobile access */ public static final String IMS_PROXY_PORT_MOBILE = "ImsOutboundProxyPortForMobile"; /** * P-CSCF or outbound proxy address for Wi-Fi access */ public static final String IMS_PROXY_ADDR_WIFI = "ImsOutboundProxyAddrForWifi"; /** * P-CSCF or outbound proxy port for Wi-Fi access */ public static final String IMS_PROXY_PORT_WIFI = "ImsOutboundProxyPortForWifi"; /** * XDM server address & port */ public static final String XDM_SERVER = "XdmServerAddr"; /** * XDM server login (for HTTP Digest only) */ public static final String XDM_LOGIN= "XdmServerLogin"; /** * XDM server password (for HTTP Digest only) */ public static final String XDM_PASSWORD = "XdmServerPassword"; /** * IM conference URI for group chat session */ public static final String IM_CONF_URI = "ImConferenceUri"; /** * End user confirmation request URI for terms and conditions */ public static final String ENDUSER_CONFIRMATION_URI = "EndUserConfReqUri"; /** * Country code */ public static final String COUNTRY_CODE = "CountryCode"; /** * Country area code */ public static final String COUNTRY_AREA_CODE = "CountryAreaCode"; // --------------------------------------------------------------------------- // Stack settings // --------------------------------------------------------------------------- /** * Polling period used before each IMS service check (e.g. test subscription state for presence service) */ public static final String IMS_SERVICE_POLLING_PERIOD = "ImsServicePollingPeriod"; /** * Default SIP port */ public static final String SIP_DEFAULT_PORT = "SipListeningPort"; /** * Default SIP protocol */ public static final String SIP_DEFAULT_PROTOCOL_FOR_MOBILE = "SipDefaultProtocolForMobile"; /** * Default SIP protocol */ public static final String SIP_DEFAULT_PROTOCOL_FOR_WIFI = "SipDefaultProtocolForWifi"; /** * TLS Certifcate root */ public static final String TLS_CERTIFICATE_ROOT = "TlsCertificateRoot"; /** * TLS Certifcate intermediate */ public static final String TLS_CERTIFICATE_INTERMEDIATE = "TlsCertificateIntermediate"; /** * SIP transaction timeout used to wait a SIP response */ public static final String SIP_TRANSACTION_TIMEOUT = "SipTransactionTimeout"; /** * Default TCP port for MSRP session */ public static final String MSRP_DEFAULT_PORT = "DefaultMsrpPort"; /** * Default UDP port for RTP session */ public static final String RTP_DEFAULT_PORT = "DefaultRtpPort"; /** * MSRP transaction timeout used to wait MSRP response */ public static final String MSRP_TRANSACTION_TIMEOUT = "MsrpTransactionTimeout"; /** * Registration expire period */ public static final String REGISTER_EXPIRE_PERIOD = "RegisterExpirePeriod"; /** * Registration retry base time */ public static final String REGISTER_RETRY_BASE_TIME = "RegisterRetryBaseTime"; /** * Registration retry max time */ public static final String REGISTER_RETRY_MAX_TIME = "RegisterRetryMaxTime"; /** * Publish expire period */ public static final String PUBLISH_EXPIRE_PERIOD = "PublishExpirePeriod"; /** * Revoke timeout */ public static final String REVOKE_TIMEOUT = "RevokeTimeout"; /** * IMS authentication procedure for mobile access */ public static final String IMS_AUTHENT_PROCEDURE_MOBILE = "ImsAuhtenticationProcedureForMobile"; /** * IMS authentication procedure for Wi-Fi access */ public static final String IMS_AUTHENT_PROCEDURE_WIFI = "ImsAuhtenticationProcedureForWifi"; /** * Activate or not Tel-URI format */ public static final String TEL_URI_FORMAT = "TelUriFormat"; /** * Ringing session period. At the end of the period the session is cancelled */ public static final String RINGING_SESSION_PERIOD = "RingingPeriod"; /** * Subscribe expiration timeout */ public static final String SUBSCRIBE_EXPIRE_PERIOD = "SubscribeExpirePeriod"; /** * "Is-composing" timeout for chat service */ public static final String IS_COMPOSING_TIMEOUT = "IsComposingTimeout"; /** * SIP session refresh expire period */ public static final String SESSION_REFRESH_EXPIRE_PERIOD = "SessionRefreshExpirePeriod"; /** * Activate or not permanent state mode */ public static final String PERMANENT_STATE_MODE = "PermanentState"; /** * Activate or not the traces */ public static final String TRACE_ACTIVATED = "TraceActivated"; /** * Logger trace level */ public static final String TRACE_LEVEL = "TraceLevel"; /** * Activate or not the SIP trace */ public static final String SIP_TRACE_ACTIVATED = "SipTraceActivated"; /** * SIP trace file */ public static final String SIP_TRACE_FILE = "SipTraceFile"; /** * Activate or not the media trace */ public static final String MEDIA_TRACE_ACTIVATED = "MediaTraceActivated"; /** * Capability refresh timeout used to avoid too many requests in a short time */ public static final String CAPABILITY_REFRESH_TIMEOUT = "CapabilityRefreshTimeout"; /** * Capability refresh timeout used to decide when to refresh contact capabilities */ public static final String CAPABILITY_EXPIRY_TIMEOUT = "CapabilityExpiryTimeout"; /** * Polling period used to decide when to refresh contacts capabilities */ public static final String CAPABILITY_POLLING_PERIOD = "CapabilityPollingPeriod"; /** * CS video capability */ public static final String CAPABILITY_CS_VIDEO = "CapabilityCsVideo"; /** * Image sharing capability */ public static final String CAPABILITY_IMAGE_SHARING = "CapabilityImageShare"; /** * Video sharing capability */ public static final String CAPABILITY_VIDEO_SHARING = "CapabilityVideoShare"; /** * Instant Messaging session capability */ public static final String CAPABILITY_IM_SESSION = "CapabilityImSession"; /** * File transfer capability */ public static final String CAPABILITY_FILE_TRANSFER = "CapabilityFileTransfer"; /** * Presence discovery capability */ public static final String CAPABILITY_PRESENCE_DISCOVERY = "CapabilityPresenceDiscovery"; /** * Social presence capability */ public static final String CAPABILITY_SOCIAL_PRESENCE = "CapabilitySocialPresence"; /** * RCS extensions capability */ public static final String CAPABILITY_RCS_EXTENSIONS = "CapabilityRcsExtensions"; /** * Instant messaging is always on (Store & Forward server) */ public static final String IM_CAPABILITY_ALWAYS_ON = "ImAlwaysOn"; /** * Instant messaging use report */ public static final String IM_USE_REPORTS = "ImUseReports"; /** * Network access authorized */ public static final String NETWORK_ACCESS = "NetworkAccess"; /** * SIP stack timer T1 */ public static final String SIP_TIMER_T1 = "SipTimerT1"; /** * SIP stack timer T2 */ public static final String SIP_TIMER_T2 = "SipTimerT2"; /** * SIP stack timer T4 */ public static final String SIP_TIMER_T4 = "SipTimerT4"; /** * Enable SIP keep alive */ public static final String SIP_KEEP_ALIVE = "SipKeepAlive"; /** * SIP keep alive period */ public static final String SIP_KEEP_ALIVE_PERIOD = "SipKeepAlivePeriod"; /** * RCS APN */ public static final String RCS_APN = "RcsApn"; /** * RCS operator */ public static final String RCS_OPERATOR = "RcsOperator"; /** * GRUU support */ public static final String GRUU = "GRUU"; /** * CPU always_on support */ public static final String CPU_ALWAYS_ON = "CpuAlwaysOn"; /** * Auto configuration mode */ public static final String AUTO_CONFIG_MODE = "Autoconfig"; } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/service/api/client/capability/Capabilities.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.service.api.client.capability; import java.util.ArrayList; import android.os.Parcel; import android.os.Parcelable; /** * Capabilities * * @author jexa7410 */ public class Capabilities implements Parcelable { /** * Image sharing support */ private boolean imageSharing = false; /** * Video sharing support */ private boolean videoSharing = false; /** * IM session support */ private boolean imSession = false; /** * File transfer support */ private boolean fileTransfer = false; /** * CS video support */ private boolean csVideo = false; /** * Presence discovery support */ private boolean presenceDiscovery = false; /** * Social presence support */ private boolean socialPresence = false; /** * List of supported extensions */ private ArrayList extensions = new ArrayList(); /** * Last capabilities update */ private long timestamp = System.currentTimeMillis(); /** * Constructor */ public Capabilities() { } /** * Constructor * * @param source Parcelable source */ public Capabilities(Parcel source) { this.imageSharing = source.readInt() != 0; this.videoSharing = source.readInt() != 0; this.imSession = source.readInt() != 0; this.fileTransfer = source.readInt() != 0; this.csVideo = source.readInt() != 0; this.presenceDiscovery = source.readInt() != 0; this.socialPresence = source.readInt() != 0; this.timestamp = source.readLong(); source.readStringList(this.extensions); } /** * Describe the kinds of special objects contained in this Parcelable's * marshalled representation * * @return Integer */ public int describeContents() { return 0; } /** * Write parcelable object * * @param dest The Parcel in which the object should be written * @param flags Additional flags about how the object should be written */ public void writeToParcel(Parcel dest, int flags) { dest.writeInt(imageSharing ? 1 : 0); dest.writeInt(videoSharing ? 1 : 0); dest.writeInt(imSession ? 1 : 0); dest.writeInt(fileTransfer ? 1 : 0); dest.writeInt(csVideo ? 1 : 0); dest.writeInt(presenceDiscovery ? 1 : 0); dest.writeInt(socialPresence ? 1 : 0); dest.writeLong(timestamp); if (extensions!=null && extensions.size()>0){ dest.writeStringList(extensions); } } /** * Parcelable creator */ public static final Parcelable.Creator CREATOR = new Parcelable.Creator() { public Capabilities createFromParcel(Parcel source) { return new Capabilities(source); } public Capabilities[] newArray(int size) { return new Capabilities[size]; } }; /** * Is image sharing supported * * @return Boolean */ public boolean isImageSharingSupported() { return imageSharing; } /** * Set the image sharing support * * @param supported Supported */ public void setImageSharingSupport(boolean supported) { this.imageSharing = supported; } /** * Is video sharing supported * * @return Boolean */ public boolean isVideoSharingSupported() { return videoSharing; } /** * Set the video sharing support * * @param supported Supported */ public void setVideoSharingSupport(boolean supported) { this.videoSharing = supported; } /** * Is IM session supported * * @return Boolean */ public boolean isImSessionSupported() { return imSession; } /** * Set the IM session support * * @param supported Supported */ public void setImSessionSupport(boolean supported) { this.imSession = supported; } /** * Is file transfer supported * * @return Boolean */ public boolean isFileTransferSupported() { return fileTransfer; } /** * Set the file transfer support * * @param supported Supported */ public void setFileTransferSupport(boolean supported) { this.fileTransfer = supported; } /** * Is CS video supported * * @return Boolean */ public boolean isCsVideoSupported() { return csVideo; } /** * Set the CS video support * * @param supported Supported */ public void setCsVideoSupport(boolean supported) { this.csVideo = supported; } /** * Is presence discovery supported * * @return Boolean */ public boolean isPresenceDiscoverySupported() { return presenceDiscovery; } /** * Set the presence discovery support * * @param supported Supported */ public void setPresenceDiscoverySupport(boolean supported) { this.presenceDiscovery = supported; } /** * Is social presence supported * * @return Boolean */ public boolean isSocialPresenceSupported() { return socialPresence; } /** * Set the social presence support * * @param supported Supported */ public void setSocialPresenceSupport(boolean supported) { this.socialPresence = supported; } /** * Add supported extension * * @param tag Feature tag */ public void addSupportedExtension(String tag) { extensions.add(tag); } /** * Get list of supported extensions * * @return List */ public ArrayList getSupportedExtensions() { return extensions; } /** * Get the capabilities timestamp * * @return Timestamp (in milliseconds) */ public long getTimestamp() { return timestamp; } /** * Set capabilities timestamp * * @param Timestamp */ public void setTimestamp(long timestamp) { this.timestamp = timestamp; } /** * Returns a string representation of the object * * @return String */ public String toString() { return "Image_share=" + imageSharing + ", Video_share=" + videoSharing + ", FT=" + fileTransfer + ", IM=" + imSession + ", CS_video=" + csVideo + ", Presence_discovery=" + presenceDiscovery + ", Social_presence=" + socialPresence + ", Timestamp=" + timestamp; } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/service/api/client/media/IMediaEventListener.aidl ================================================ package com.orangelabs.rcs.service.api.client.media; /** * Media event listener */ interface IMediaEventListener { // Media is opened void mediaOpened(); // Media is closed void mediaClosed(); // Media is started void mediaStarted(); // Media is stopped void mediaStopped(); // Media has failed void mediaError(in String error); } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/service/api/client/media/IMediaPlayer.aidl ================================================ package com.orangelabs.rcs.service.api.client.media; import com.orangelabs.rcs.service.api.client.media.IMediaEventListener; import com.orangelabs.rcs.service.api.client.media.MediaCodec; /** * Media RTP player */ interface IMediaPlayer { // Open the player void open(in String remoteHost, in int remotePort); // Close the player void close(); // Start the player void start(); // Stop the player void stop(); // Returns the local RTP port int getLocalRtpPort(); // Add a media listener void addListener(in IMediaEventListener listener); // Remove media listeners void removeAllListeners(); // Get supported media codecs MediaCodec[] getSupportedMediaCodecs(); // Get media codec MediaCodec getMediaCodec(); // Set media codec void setMediaCodec(in MediaCodec mediaCodec); } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/service/api/client/media/IMediaRenderer.aidl ================================================ package com.orangelabs.rcs.service.api.client.media; import com.orangelabs.rcs.service.api.client.media.IMediaEventListener; import com.orangelabs.rcs.service.api.client.media.MediaCodec; /** * Media RTP renderer */ interface IMediaRenderer { // Open the renderer void open(in String remoteHost, in int remotePort); // Close the renderer void close(); // Start the renderer void start(); // Stop the renderer void stop(); // Returns the local RTP port int getLocalRtpPort(); // Add a media listener void addListener(in IMediaEventListener listener); // Remove media listeners void removeAllListeners(); // Get supported media codecs MediaCodec[] getSupportedMediaCodecs(); // Get media codec MediaCodec getMediaCodec(); // Set media codec void setMediaCodec(in MediaCodec mediaCodec); } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/service/api/client/media/MediaCodec.aidl ================================================ package com.orangelabs.rcs.service.api.client.media; parcelable MediaCodec; ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/service/api/client/media/MediaCodec.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.service.api.client.media; import android.os.Bundle; import android.os.Parcel; import android.os.Parcelable; import java.util.Enumeration; import java.util.Hashtable; import java.util.Iterator; import java.util.Set; /** * Media Codec * * @author hlxn7157 */ public class MediaCodec implements Parcelable { /** * Codec name */ private String codecName; /** * Codec parameters */ private Hashtable parameters = new Hashtable(); /** * Constructor * * @param codecName Codec name */ public MediaCodec(String codecName) { this.codecName = codecName; } /** * Constructor * * @param source Parcelable source */ public MediaCodec(Parcel source) { this.codecName = source.readString(); Bundle parametersBundle = source.readBundle(); Set keys = parametersBundle.keySet(); Iterator i = keys.iterator(); while (i.hasNext()) { String key = i.next().toString(); String value = parametersBundle.getString(key); this.parameters.put(key, value); } } /** * Describe the kinds of special objects contained in this Parcelable's * marshalled representation * * @return Integer */ public int describeContents() { return 0; } /** * Write parcelable object * * @param dest The Parcel in which the object should be written * @param flags Additional flags about how the object should be written */ public void writeToParcel(Parcel dest, int flags) { Bundle parametersBundle = new Bundle(); Enumeration e = parameters.keys(); while (e.hasMoreElements()) { String key = e.nextElement().toString(); parametersBundle.putString(key, parameters.get(key)); } dest.writeString(codecName); dest.writeBundle(parametersBundle); } /** * Parcelable creator */ public static final Parcelable.Creator CREATOR = new Parcelable.Creator() { public MediaCodec createFromParcel(Parcel source) { return new MediaCodec(source); } public MediaCodec[] newArray(int size) { return new MediaCodec[size]; } }; /** * Get codec name * * @return Codec name */ public String getCodecName() { return codecName; } /** * Set codec name * * @param codecName Codec name */ public void setCodecName(String codecName) { this.codecName = codecName; } /** * Get a codec parameter as string * * @param key Parameter key * @return Parameter value */ public String getStringParam(String key) { if (key != null) { return parameters.get(key); } else { return null; } } /** * Get a codec parameter as integer * * @param key Parameter key * @param defaultValue default value * @return Parameter value */ public int getIntParam(String key, int defaultValue) { String value = getStringParam(key); try { return Integer.parseInt(value); } catch(Exception e) { return defaultValue; } } /** * Set a codec parameter * * @param key Parameter key * @param value Parameter value */ public void setParam(String key, String value) { parameters.put(key, value); } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/service/api/client/media/video/VideoCodec.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.service.api.client.media.video; import com.orangelabs.rcs.service.api.client.media.MediaCodec; /** * Video codec * * @author hlxn7157 */ public class VideoCodec { /** * Media codec */ private MediaCodec mediaCodec; /** * Payload key */ private static final String PAYLOAD = "payload"; /** * Clock rate key */ private static final String CLOCKRATE = "clockRate"; /** * Codec param key */ private static final String CODECPARAMS = "codecParams"; /** * Frame rate key */ private static final String FRAMERATE = "framerate"; /** * Bit rate key */ private static final String BITRATE = "bitrate"; /** * Codec width key */ private static final String CODECWIDTH = "codecWidth"; /** * Codec height key */ private static final String CODECHEIGHT = "codecHeight"; /** * Constructor * * @param codecName Codec name * @param clockRate Clock rate * @param codecParams Codec parameters * @param framerate Frame rate * @param bitrate Bit rate * @param width Video width * @param height Video height */ public VideoCodec(String codecName, int payload, int clockRate, String codecParams, int framerate, int bitrate, int width, int height) { mediaCodec = new MediaCodec(codecName); mediaCodec.setParam(PAYLOAD, "" + payload); mediaCodec.setParam(CLOCKRATE, "" + clockRate); mediaCodec.setParam(CODECPARAMS, codecParams); mediaCodec.setParam(FRAMERATE, "" + framerate); mediaCodec.setParam(BITRATE, "" + bitrate); mediaCodec.setParam(CODECWIDTH, "" + width); mediaCodec.setParam(CODECHEIGHT, "" + height); } /** * Constructor * * @param mediaCodec Media codec */ public VideoCodec(MediaCodec mediaCodec) { this.mediaCodec = mediaCodec; } /** * Get media codec * * @return media codec */ public MediaCodec getMediaCodec() { return mediaCodec; } /** * Get codec name * * @return Codec name */ public String getCodecName() { return mediaCodec.getCodecName(); } /** * Get payload * * @return payload */ public int getPayload() { return mediaCodec.getIntParam(PAYLOAD, 96); } /** * Get video clock rate * * @return Video clock rate */ public int getClockRate() { return mediaCodec.getIntParam(CLOCKRATE, 90000); } /** * Get video codec parameters * * @return Video codec parameters */ public String getCodecParams() { return mediaCodec.getStringParam(CODECPARAMS); } /** * Get video frame rate * * @return Video frame rate */ public int getFramerate() { return mediaCodec.getIntParam(FRAMERATE, 15); } /** * Get video bitrate * * @return Video bitrate */ public int getBitrate() { return mediaCodec.getIntParam(BITRATE, 0); } /** * Get video width * * @return Video width */ public int getWidth() { return mediaCodec.getIntParam(CODECWIDTH, 176); } /** * Get video height * * @return Video height */ public int getHeight() { return mediaCodec.getIntParam(CODECHEIGHT, 144); } /** * Compare codec encodings and resolutions * * @param codec Codec to compare * @return True if codecs are equals */ public boolean compare(VideoCodec codec) { if (getCodecName().equalsIgnoreCase(codec.getCodecName()) && getWidth() == codec.getWidth() && getHeight() == codec.getHeight()) return true; return false; } /** * Check if a codec is in a list * * @param supportedCodecs list of supported codec * @param codec selected codec * @return True if the codec is in the list */ public static boolean checkVideoCodec(MediaCodec[] supportedCodecs, VideoCodec codec) { for (int i = 0; i < supportedCodecs.length; i++) { if (codec.compare(new VideoCodec(supportedCodecs[i]))) return true; } return false; } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/service/api/client/media/video/VideoSurfaceView.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.service.api.client.media.video; import android.content.Context; import android.graphics.Bitmap; import android.graphics.Canvas; import android.util.AttributeSet; import android.view.SurfaceHolder; import android.view.SurfaceView; /** * Video surface view * * @author jexa7410 */ public class VideoSurfaceView extends SurfaceView { /** * No aspect ratio */ public static float NO_RATIO = 0.0f; /** * Display area aspect ratio */ private float aspectRatio = NO_RATIO; /** * Surface has been created state */ private boolean surfaceCreated = false; /** * Surface holder */ private SurfaceHolder holder; /** * Constructor * * @param context Context */ public VideoSurfaceView(Context context) { super(context); init(); } /** * Constructor * * @param context Context * @param attrs Attributes */ public VideoSurfaceView(Context context, AttributeSet attrs) { super(context, attrs); init(); } /** * Constructor * * @param context Context * @param attrs Attributes * @param defStyle Style */ public VideoSurfaceView(Context context, AttributeSet attrs, int defStyle) { super(context, attrs, defStyle); init(); } /** * Set aspect ration according to desired width and height * * @param width Width * @param height Height */ public void setAspectRatio(int width, int height) { setAspectRatio((float)width / (float)height); } /** * Set aspect ratio * * @param ratio Ratio */ public void setAspectRatio(float ratio) { if (aspectRatio != ratio) { aspectRatio = ratio; requestLayout(); invalidate(); } } /** * Ensure aspect ratio * * @param widthMeasureSpec Width * @param heightMeasureSpec Heigh */ protected void onMeasure(int widthMeasureSpec, int heightMeasureSpec) { if (aspectRatio != NO_RATIO) { int widthSpecSize = MeasureSpec.getSize(widthMeasureSpec); int heightSpecSize = MeasureSpec.getSize(heightMeasureSpec); int width = widthSpecSize; int height = heightSpecSize; if (width > 0 && height > 0) { float defaultRatio = ((float) width) / ((float) height); if (defaultRatio < aspectRatio) { // Need to reduce height height = (int) (width / aspectRatio); } else if (defaultRatio > aspectRatio) { width = (int) (height * aspectRatio); } width = Math.min(width, widthSpecSize); height = Math.min(height, heightSpecSize); setMeasuredDimension(width, height); return; } } super.onMeasure(widthMeasureSpec, heightMeasureSpec); } /** * Set image from a bitmap * * @param bmp Bitmap */ public void setImage(Bitmap bmp) { if (surfaceCreated) { Canvas canvas = null; try { synchronized(holder) { canvas = holder.lockCanvas(); } } finally { if (canvas != null) { // First clear screen canvas.drawARGB(255, 0, 0, 0); // Then draw bmp canvas.drawBitmap(bmp, null, canvas.getClipBounds(), null); holder.unlockCanvasAndPost(canvas); } } } } public void clearImage() { if (surfaceCreated) { Canvas canvas = null; try { synchronized(holder) { canvas = holder.lockCanvas(); } } finally { if (canvas != null) { // Clear screen canvas.drawARGB(255, 0, 0, 0); holder.unlockCanvasAndPost(canvas); } } } } /** * Init the view */ private void init() { // Get a surface holder holder = this.getHolder(); holder.addCallback(surfaceCallback); } /** * Surface holder callback */ private SurfaceHolder.Callback surfaceCallback = new SurfaceHolder.Callback() { public void surfaceChanged(SurfaceHolder _holder, int format, int w,int h) { } public void surfaceCreated(SurfaceHolder _holder) { surfaceCreated = true; } public void surfaceDestroyed(SurfaceHolder _holder) { surfaceCreated = false; } }; } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/utils/FifoBuffer.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.utils; import java.util.Vector; /** * FIFO buffer * * @author JM. Auffret */ public class FifoBuffer { /** * Number of objects in the buffer */ private int nbObjects = 0; /** * Buffer of objects */ private Vector fifo = new Vector(); /** * Add an object in the buffer * * @param obj Message */ public synchronized void addObject(Object obj) { fifo.addElement(obj); nbObjects++; notifyAll(); } /** * Read an object in the buffer. This is a blocking method until an object is read. * * @return Object */ public synchronized Object getObject() { Object obj = null; if (nbObjects == 0) { try { wait(); } catch (InterruptedException e) { // Nothing to do } } if (nbObjects != 0) { obj = fifo.elementAt(0); fifo.removeElementAt(0); nbObjects--; notifyAll(); } return obj; } /** * Read an object in the buffer. This is a blocking method until a timeout * occurs or an object is read. * * @param timeout Timeout * @return Message */ public synchronized Object getObject(int timeout) { Object obj = null; if (nbObjects == 0) { try { wait(timeout); } catch (InterruptedException e) { // Nothing to do } } if (nbObjects != 0) { obj = fifo.elementAt(0); fifo.removeElementAt(0); nbObjects--; notifyAll(); } return obj; } /** * Close the buffer */ public synchronized void close() { // Free the semaphore this.notifyAll(); } } ================================================ FILE: RtspCamera/src/com/orangelabs/rcs/utils/NetworkRessourceManager.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.orangelabs.rcs.utils; import com.orangelabs.rcs.platform.network.DatagramConnection; import com.orangelabs.rcs.platform.network.NetworkFactory; import com.orangelabs.rcs.platform.network.SocketServerConnection; import com.orangelabs.rcs.provider.settings.RcsSettings; import java.io.IOException; /** * Network ressource manager * * @author jexa7410 */ public class NetworkRessourceManager { /** * Default SIP port base */ public static int DEFAULT_LOCAL_SIP_PORT_BASE = RcsSettings.getInstance().getSipListeningPort(); /** * Default RTP port base */ public static int DEFAULT_LOCAL_RTP_PORT_BASE = RcsSettings.getInstance().getDefaultRtpPort(); /** * Default MSRP port base */ public static int DEFAULT_LOCAL_MSRP_PORT_BASE = RcsSettings.getInstance().getDefaultMsrpPort(); /** * Generate a default free SIP port number * * @return Local SIP port */ public static synchronized int generateLocalSipPort() { return generateLocalUdpPort(DEFAULT_LOCAL_SIP_PORT_BASE); } /** * Generate a default free RTP port number * * @return Local RTP port */ public static synchronized int generateLocalRtpPort() { return generateLocalUdpPort(DEFAULT_LOCAL_RTP_PORT_BASE); } /** * Generate a default free MSRP port number * * @return Local MSRP port */ public static synchronized int generateLocalMsrpPort() { return generateLocalTcpPort(DEFAULT_LOCAL_MSRP_PORT_BASE); } /** * Generate a free UDP port number from a specific port base * * @param portBase UDP port base * @return Local UDP port */ private static int generateLocalUdpPort(int portBase) { int resp = -1; int port = portBase; while((resp == -1) && (port < Integer.MAX_VALUE)) { if (isLocalUdpPortFree(port)) { // Free UDP port found resp = port; } else { // +2 needed for RTCP port port += 2; } } return resp; } /** * Test if the given local UDP port is really free (not used by * other applications) * * @param port Port to check * @return Boolean */ private static boolean isLocalUdpPortFree(int port) { boolean res = false; try { DatagramConnection conn = NetworkFactory.getFactory().createDatagramConnection(); conn.open(port); conn.close(); res = true; } catch(IOException e) { res = false; } return res; } /** * Generate a free TCP port number * * @param portBase TCP port base * @return Local TCP port */ private static int generateLocalTcpPort(int portBase) { int resp = -1; int port = portBase; while(resp == -1) { if (isLocalTcpPortFree(port)) { // Free TCP port found resp = port; } else { port++; } } return resp; } /** * Test if the given local TCP port is really free (not used by * other applications) * * @param port Port to check * @return Boolean */ private static boolean isLocalTcpPortFree(int port) { boolean res = false; try { SocketServerConnection conn = NetworkFactory.getFactory().createSocketServerConnection(); conn.open(port); conn.close(); res = true; } catch(IOException e) { res = false; } return res; } /** * Is a valid IP address * * @param ipAddress IP address * @return Boolean */ public static boolean isValidIpAddress(String ipAddress) { boolean result = false; if ((ipAddress != null) && (!ipAddress.equals("127.0.0.1")) && (!ipAddress.equals("localhost"))) { result = true; } return result; } /** * Convert an IP address to its integer representation * * @param addr IP address * @return Integer */ public static int ipToInt(String addr) { String[] addrArray = addr.split("\\."); int num = 0; for (int i=0; i= traceLevel)) { for(int i=0; i < appenders.length; i++) { appenders[i].printTrace(classname, level, trace); } } } /** * Set the list of appenders * * @param appenders List of appenders */ public static void setAppenders(Appender[] appenders) { Logger.appenders = appenders; } /** * Create a static instance * * @param classname Classname * @return Instance */ public static synchronized Logger getLogger(String classname) { return new Logger(classname); } /** * Get the current appenders * * @return Array of appender */ public static synchronized Appender[] getAppenders() { return appenders; } } ================================================ FILE: RtspCamera/src/de/kp/net/rtp/RtpPacket.java ================================================ package de.kp.net.rtp; /* * Copyright (C) 2009 The Sipdroid Open Source Project * Copyright (C) 2005 Luca Veltri - University of Parma - Italy * * This file is part of Sipdroid (http://www.sipdroid.org) * * Sipdroid is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This source code is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this source code; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /** * RtpPacket implements a RTP packet. */ public class RtpPacket { /* RTP packet buffer containing both the RTP header and payload */ byte[] packet; /* RTP packet length */ int packet_len; /* RTP header length */ // int header_len; /** Gets the RTP packet */ public byte[] getPacket() { return packet; } /** Gets the RTP packet length */ public int getLength() { return packet_len; } /** Gets the RTP header length */ public int getHeaderLength() { if (packet_len >= 12) return 12 + 4 * getCscrCount(); else return packet_len; // broken packet } /** Gets the RTP header length */ public int getPayloadLength() { if (packet_len >= 12) return packet_len - getHeaderLength(); else return 0; // broken packet } /** Sets the RTP payload length */ public void setPayloadLength(int len) { packet_len = getHeaderLength() + len; } // version (V): 2 bits // padding (P): 1 bit // extension (X): 1 bit // CSRC count (CC): 4 bits // marker (M): 1 bit // payload type (PT): 7 bits // sequence number: 16 bits // timestamp: 32 bits // SSRC: 32 bits // CSRC list: 0 to 15 items, 32 bits each /** Gets the version (V) */ public int getVersion() { if (packet_len >= 12) return (packet[0] >> 6 & 0x03); else return 0; // broken packet } /** Sets the version (V) */ public void setVersion(int v) { if (packet_len >= 12) packet[0] = (byte) ((packet[0] & 0x3F) | ((v & 0x03) << 6)); } /** Whether has padding (P) */ public boolean hasPadding() { if (packet_len >= 12) return getBit(packet[0], 5); else return false; // broken packet } /** Set padding (P) */ public void setPadding(boolean p) { if (packet_len >= 12) packet[0] = setBit(p, packet[0], 5); } /** Whether has extension (X) */ public boolean hasExtension() { if (packet_len >= 12) return getBit(packet[0], 4); else return false; // broken packet } /** Set extension (X) */ public void setExtension(boolean x) { if (packet_len >= 12) packet[0] = setBit(x, packet[0], 4); } /** Gets the CSCR count (CC) */ public int getCscrCount() { if (packet_len >= 12) return (packet[0] & 0x0F); else return 0; // broken packet } /** Whether has marker (M) */ public boolean hasMarker() { if (packet_len >= 12) return getBit(packet[1], 7); else return false; // broken packet } /** Set marker (M) */ public void setMarker(boolean m) { if (packet_len >= 12) packet[1] = setBit(m, packet[1], 7); } /** Gets the payload type (PT) */ public int getPayloadType() { if (packet_len >= 12) return (packet[1] & 0x7F); else return -1; // broken packet } /** Sets the payload type (PT) */ public void setPayloadType(int pt) { if (packet_len >= 12) packet[1] = (byte) ((packet[1] & 0x80) | (pt & 0x7F)); } /** Gets the sequence number */ public int getSequenceNumber() { if (packet_len >= 12) return getInt(packet, 2, 4); else return 0; // broken packet } /** Sets the sequence number */ public void setSequenceNumber(int sn) { if (packet_len >= 12) setInt(sn, packet, 2, 4); } /** Gets the timestamp */ public long getTimestamp() { if (packet_len >= 12) return getLong(packet, 4, 8); else return 0; // broken packet } /** Sets the timestamp */ public void setTimestamp(long timestamp) { if (packet_len >= 12) setLong(timestamp, packet, 4, 8); } /** Gets the SSCR */ public long getSscr() { if (packet_len >= 12) return getLong(packet, 8, 12); else return 0; // broken packet } /** Sets the SSCR */ public void setSscr(long ssrc) { if (packet_len >= 12) setLong(ssrc, packet, 8, 12); } /** Gets the CSCR list */ public long[] getCscrList() { int cc = getCscrCount(); long[] cscr = new long[cc]; for (int i = 0; i < cc; i++) cscr[i] = getLong(packet, 12 + 4 * i, 16 + 4 * i); return cscr; } /** Sets the CSCR list */ public void setCscrList(long[] cscr) { if (packet_len >= 12) { int cc = cscr.length; if (cc > 15) cc = 15; packet[0] = (byte) (((packet[0] >> 4) << 4) + cc); cscr = new long[cc]; for (int i = 0; i < cc; i++) setLong(cscr[i], packet, 12 + 4 * i, 16 + 4 * i); // header_len=12+4*cc; } } /** Sets the payload */ public void setPayload(byte[] payload, int len) { if (packet_len >= 12) { int header_len = getHeaderLength(); for (int i = 0; i < len; i++) packet[header_len + i] = payload[i]; packet_len = header_len + len; } } /** Gets the payload */ public byte[] getPayload() { int header_len = getHeaderLength(); int len = packet_len - header_len; byte[] payload = new byte[len]; for (int i = 0; i < len; i++) payload[i] = packet[header_len + i]; return payload; } /** Creates a new RTP packet */ public RtpPacket(byte[] buffer, int packet_length) { packet = buffer; packet_len = packet_length; if (packet_len < 12) packet_len = 12; init(0x0F); } /** init the RTP packet header (only PT) */ public void init(int ptype) { init(ptype, RtpRandom.nextLong()); } /** init the RTP packet header (PT and SSCR) */ public void init(int ptype, long sscr) { init(ptype, RtpRandom.nextInt(), RtpRandom.nextLong(), sscr); } /** init the RTP packet header (PT, SQN, TimeStamp, SSCR) */ public void init(int ptype, int seqn, long timestamp, long sscr) { setVersion(2); setPayloadType(ptype); setSequenceNumber(seqn); setTimestamp(timestamp); setSscr(sscr); } // *********************** Private and Static *********************** /** Gets int value */ //private static int getInt(byte b) { // return ((int) b + 256) % 256; //} /** Gets long value */ private static long getLong(byte[] data, int begin, int end) { long n = 0; for (; begin < end; begin++) { n <<= 8; n += data[begin] & 0xFF; } return n; } /** Sets long value */ private static void setLong(long n, byte[] data, int begin, int end) { for (end--; end >= begin; end--) { data[end] = (byte) (n % 256); n >>= 8; } } /** Gets Int value */ private static int getInt(byte[] data, int begin, int end) { return (int) getLong(data, begin, end); } /** Sets Int value */ private static void setInt(int n, byte[] data, int begin, int end) { setLong(n, data, begin, end); } /** Gets bit value */ private static boolean getBit(byte b, int bit) { return (b >> bit) == 1; } /** Sets bit value */ private static byte setBit(boolean value, byte b, int bit) { if (value) return (byte) (b | (1 << bit)); else return (byte) ((b | (1 << bit)) ^ (1 << bit)); } } ================================================ FILE: RtspCamera/src/de/kp/net/rtp/RtpRandom.java ================================================ /* * Copyright (C) 2005 Luca Veltri - University of Parma - Italy * * This file is part of MjSip (http://www.mjsip.org) * * MjSip is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * MjSip is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with MjSip; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Author(s): * Luca Veltri (luca.veltri@unipr.it) */ package de.kp.net.rtp; /** * Class Random collects some static methods for generating random numbers and * other stuff. */ public class RtpRandom { /** The random seed */ static final long seed = System.currentTimeMillis(); // static final long seed=0; static java.util.Random rand = new java.util.Random(seed); // static java.util.Random rand=new java.util.Random(); /** Returns a random integer between 0 and n-1 */ /* * static public int nextInt(int n) { seed=(seed*37)%987654321; return * (int)(seed%n); } */ /** Returns true or false respectively with probability p/100 and (1-p/100) */ /* * static boolean percent(int p) { return integer(100) receivers; private RtpSender() { receivers = new Vector(); } public int getReceiverCount() { return receivers.size(); } public static RtpSender getInstance() { if (instance == null) instance = new RtpSender(); return instance; } /** * Register RTP packet consumer * * @param receiver */ public void addReceiver(RtpSocket receiver) { receivers.add(receiver); } /** * De-register RTP packet consumer * @param receiver */ public void removeReceiver(RtpSocket receiver) { receivers.remove(receiver); } /** * Send RTP packet to all registered RTP * packet consumers. * * @param rtpPacket * @throws IOException */ public synchronized void send(RtpPacket rtpPacket) throws IOException { for (RtpSocket receiver:receivers) { receiver.send(rtpPacket); } } /** * Send RTP packet to all registered RTP * packet consumers. * * @param rtpPacket * @throws IOException */ public synchronized void send(byte[] data) throws IOException { for (RtpSocket receiver:receivers) { receiver.send(data); } } /** * De-register all registered RTP consumers */ public void clear() { receivers.clear(); } public void stop() { // TODO } } ================================================ FILE: RtspCamera/src/de/kp/net/rtp/RtpSocket.java ================================================ package de.kp.net.rtp; /* * Copyright (C) 2009 The Sipdroid Open Source Project * Copyright (C) 2005 Luca Veltri - University of Parma - Italy * * This file is part of Sipdroid (http://www.sipdroid.org) * * Sipdroid is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This source code is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this source code; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ import java.net.DatagramSocket; import java.net.InetAddress; import java.net.DatagramPacket; import java.net.SocketException; import java.io.IOException; /** * RtpSocket implements a RTP socket for receiving and sending RTP packets. *

* RtpSocket is associated to a DatagramSocket that is used to send and/or * receive RtpPackets. */ public class RtpSocket { /** UDP socket */ DatagramSocket socket; DatagramPacket datagram; /** Remote address */ InetAddress remoteAddress; /** Remote port */ int remotePort; /** * An RtpSocket may be suspended from sending or receiving * UDP data packets */ boolean suspended = false; /** Creates a new RTP socket (sender and receiver) * @throws SocketException */ public RtpSocket(InetAddress remoteAddress, int remotePort) throws SocketException { this.socket = new DatagramSocket(); this.socket.connect(remoteAddress, remotePort); this.remoteAddress = remoteAddress; this.remotePort = remotePort; datagram = new DatagramPacket(new byte[1],1); } /** Creates a new RTP socket (sender and receiver) **/ public RtpSocket(DatagramSocket socket, InetAddress remoteAddress, int remotePort) { this.socket = socket; // initialize receiver address & port this.remoteAddress = remoteAddress; this.remotePort = remotePort; datagram = new DatagramPacket(new byte[1],1); } /** Returns the RTP DatagramSocket */ public DatagramSocket getSocket() { return this.socket; } /** Receives a RTP packet from this socket */ public void receive(RtpPacket rtpPacket) throws IOException { datagram.setData(rtpPacket.getPacket()); datagram.setLength(rtpPacket.packet.length); socket.receive(datagram); if (!socket.isConnected()) socket.connect(datagram.getAddress(), datagram.getPort()); rtpPacket.packet_len = datagram.getLength(); } /** Sends a RTP packet from this socket */ public void send(RtpPacket rtpPacket) throws IOException { if (this.suspended == true) return; datagram.setData(rtpPacket.getPacket()); datagram.setLength(rtpPacket.getLength()); datagram.setAddress(remoteAddress); datagram.setPort(remotePort); socket.send(datagram); } /** Sends a RTP packet from this socket */ public void send(byte[] data) throws IOException { if (this.suspended == true) return; datagram.setData(data); datagram.setLength(data.length); datagram.setAddress(remoteAddress); datagram.setPort(remotePort); socket.send(datagram); } public void suspend(boolean suspended) { this.suspended = suspended; } /** Closes this socket */ public void close() { // socket.close(); } } ================================================ FILE: RtspCamera/src/de/kp/net/rtp/packetizer/AbstractPacketizer.java ================================================ package de.kp.net.rtp.packetizer; import java.io.IOException; import java.io.InputStream; import de.kp.net.rtp.RtpSender; abstract public class AbstractPacketizer extends Thread { protected InputStream fis; protected RtpSender rtpSender; protected boolean running = false; public AbstractPacketizer() { super(); } public AbstractPacketizer(Runnable runnable) { super(runnable); } public AbstractPacketizer(String threadName) { super(threadName); } public AbstractPacketizer(Runnable runnable, String threadName) { super(runnable, threadName); } public AbstractPacketizer(ThreadGroup group, Runnable runnable) { super(group, runnable); } public AbstractPacketizer(ThreadGroup group, String threadName) { super(group, threadName); } public AbstractPacketizer(ThreadGroup group, Runnable runnable, String threadName) { super(group, runnable, threadName); } public AbstractPacketizer(ThreadGroup group, Runnable runnable, String threadName, long stackSize) { super(group, runnable, threadName, stackSize); } public void startStreaming() { running = true; start(); } public void stopStreaming() { try { fis.close(); } catch (IOException e) { } running = false; } } ================================================ FILE: RtspCamera/src/de/kp/net/rtp/packetizer/H263Packetizer.java ================================================ package de.kp.net.rtp.packetizer; import java.io.IOException; import java.io.InputStream; import java.net.SocketException; import android.os.SystemClock; import android.util.Log; import de.kp.net.rtp.RtpPacket; import de.kp.net.rtp.RtpSender; import de.kp.net.rtsp.RtspConstants; public class H263Packetizer extends AbstractPacketizer implements Runnable { private String TAG = "H263Sender"; private boolean videoQualityHigh = true; // private int fps; private boolean change; public H263Packetizer(InputStream fis) throws SocketException { this.fis = fis; this.rtpSender = RtpSender.getInstance(); } public void run() { int frame_size = 1400; byte[] buffer = new byte[frame_size + 14]; buffer[12] = 4; RtpPacket rtpPacket = new RtpPacket(buffer, 0); int seqn = 0; int num, number = 0, src, dest, len = 0, head = 0, lasthead = 0, lasthead2 = 0, cnt = 0, stable = 0; long now, lasttime = 0; double avgrate = videoQualityHigh ? 45000 : 24000; double avglen = avgrate / 20; rtpPacket.setPayloadType(RtspConstants.RTP_H263_PAYLOADTYPE); // while (Receiver.listener_video != null && videoValid()) { while (running) { num = -1; try { num = fis.read(buffer, 14 + number, frame_size - number); } catch (IOException e) { Log.w(TAG , e.getMessage()); break; } if (num < 0) { try { sleep(20); } catch (InterruptedException e) { break; } continue; } number += num; head += num; try { now = SystemClock.elapsedRealtime(); if (lasthead != head + fis.available() && ++stable >= 5 && now - lasttime > 700) { if (cnt != 0 && len != 0) avglen = len / cnt; if (lasttime != 0) { // fps = (int) ((double) cnt * 1000 / (now - lasttime)); avgrate = (double) ((head + fis.available()) - lasthead2) * 1000 / (now - lasttime); } lasttime = now; lasthead = head + fis.available(); lasthead2 = head; len = cnt = stable = 0; } } catch (IOException e1) { Log.w(TAG, e1.getMessage()); break; } for (num = 14; num <= 14 + number - 2; num++) if (buffer[num] == 0 && buffer[num + 1] == 0) break; if (num > 14 + number - 2) { num = 0; rtpPacket.setMarker(false); } else { num = 14 + number - num; rtpPacket.setMarker(true); } rtpPacket.setSequenceNumber(seqn++); rtpPacket.setPayloadLength(number - num + 2); if (seqn > 10) try { rtpSender.send(rtpPacket); len += number - num; } catch (IOException e) { Log.w(TAG, "RTP packet sent failed"); break; } if (num > 0) { num -= 2; dest = 14; src = 14 + number - num; if (num > 0 && buffer[src] == 0) { src++; num--; } number = num; while (num-- > 0) buffer[dest++] = buffer[src++]; buffer[12] = 4; cnt++; try { if (avgrate != 0) Thread.sleep((int) (avglen / avgrate * 1000)); } catch (Exception e) { break; } rtpPacket.setTimestamp(SystemClock.elapsedRealtime() * 90); } else { number = 0; buffer[12] = 0; } if (change) { change = false; long time = SystemClock.elapsedRealtime(); try { while (fis.read(buffer, 14, frame_size) > 0 && SystemClock.elapsedRealtime() - time < 3000) ; } catch (Exception e) { } number = 0; buffer[12] = 0; } } rtpSender.stop(); try { while (fis.read(buffer, 0, frame_size) > 0) ; } catch (IOException e) { } } } ================================================ FILE: RtspCamera/src/de/kp/net/rtp/packetizer/H264Fifo.java ================================================ /* * Copyright (C) 2011-2012 GUIGUI Simon, fyhertz@gmail.com * * This file is part of Spydroid (http://code.google.com/p/spydroid-ipcamera/) * * Spydroid is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This source code is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this source code; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ package de.kp.net.rtp.packetizer; public class H264Fifo { private int length = 0, tail = 0, head = 0; private byte[] buffer; public H264Fifo(int length) { this.length = length; buffer = new byte[length]; } public void write(byte[] buffer, int offset, int length) { if (tail+lengthavailable() ? available() : length; if (head+length=head) ? tail-head : this.length-(head-tail) ; } } ================================================ FILE: RtspCamera/src/de/kp/net/rtp/packetizer/H264Packetizer.java ================================================ package de.kp.net.rtp.packetizer; import java.io.IOException; import java.io.InputStream; import java.net.SocketException; import de.kp.net.rtp.RtpPacket; import de.kp.net.rtp.RtpSender; import de.kp.net.rtsp.RtspConstants; import android.os.SystemClock; import android.util.Log; public class H264Packetizer extends AbstractPacketizer implements Runnable { private final int packetSize = 1400; private long oldtime = SystemClock.elapsedRealtime(), delay = 20; private long latency, oldlat = oldtime; private int available = 0, oldavailable = 0, nalUnitLength = 0, numberNalUnit = 0, len = 0; private H264Fifo fifo = new H264Fifo(500000); protected InputStream fis = null; protected byte[] buffer = new byte[16384 * 2]; protected final int rtpHeaderLength = 12; // Rtp header length private String TAG = "H264Packetizer"; public H264Packetizer(InputStream fis) throws SocketException { this.fis = fis; this.rtpSender = RtpSender.getInstance(); } public void run() { int seqn = 0; byte[] buffer = new byte[16384*2]; RtpPacket rtpPacket = new RtpPacket(buffer, 0); rtpPacket.setPayloadType(RtspConstants.RTP_H264_PAYLOADTYPE); // skip the mpeg4 header try { // skip all atoms preceding mdat atom skipMDAT(); // some phones do not set length correctly when stream is not // seekable, still we need to skip the header if (len <= 0) { while (true) { while (fis.read() != 'm') ; fis.read(buffer, rtpHeaderLength, 3); if (buffer[rtpHeaderLength] == 'd' && buffer[rtpHeaderLength + 1] == 'a' && buffer[rtpHeaderLength + 2] == 't') break; } } len = 0; } catch (IOException e) { Log.w(TAG , e.getMessage()); return; } while (running) { /* If there are NAL units in the FIFO ready to be sent, we send one */ // send(); /* * Read a NAL unit in the FIFO and send it If it is too big, we * split it in FU-A units (RFC 3984) */ int sum = 1, len = 0, nalUnitLength; if (numberNalUnit != 0) { /* Read nal unit length (4 bytes) and nal unit header (1 byte) */ len = fifo.read(buffer, rtpHeaderLength, 5); nalUnitLength = (buffer[rtpHeaderLength + 3] & 0xFF) + (buffer[rtpHeaderLength + 2] & 0xFF) * 256 + (buffer[rtpHeaderLength + 1] & 0xFF) * 65536; // Log.d(TAG ,"send- NAL unit length: " + nalUnitLength); // rsock.updateTimestamp(SystemClock.elapsedRealtime() * 90); rtpPacket.setTimestamp(SystemClock.elapsedRealtime() * 90); /* Small nal unit => Single nal unit */ if (nalUnitLength <= packetSize - rtpHeaderLength - 2) { buffer[rtpHeaderLength] = buffer[rtpHeaderLength + 4]; len = fifo.read(buffer, rtpHeaderLength + 1, nalUnitLength - 1); rtpPacket.setMarker(true); try { rtpPacket.setSequenceNumber(seqn++); rtpPacket.setPayloadLength(nalUnitLength); rtpSender.send(rtpPacket); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } /* Large nal unit => Split nal unit */ else { /* Set FU-A indicator */ buffer[rtpHeaderLength] = 28; buffer[rtpHeaderLength] += (buffer[rtpHeaderLength + 4] & 0x60) & 0xFF; // FU indicator // NRI // buffer[rtphl] += 0x80; /* Set FU-A header */ buffer[rtpHeaderLength + 1] = (byte) (buffer[rtpHeaderLength + 4] & 0x1F); // FU header // type buffer[rtpHeaderLength + 1] += 0x80; // Start bit while (sum < nalUnitLength) { if (!running) break; len = fifo.read(buffer, rtpHeaderLength + 2, nalUnitLength - sum > packetSize - rtpHeaderLength - 2 ? packetSize - rtpHeaderLength - 2 : nalUnitLength - sum); sum += len; if (len < 0) break; /* Last packet before next NAL */ if (sum >= nalUnitLength) { // End bit on buffer[rtpHeaderLength + 1] += 0x40; // rsock.markNextPacket(); rtpPacket.setMarker(true); } try { // rsock.send(len + rtpHeaderLength + 2); rtpPacket.setSequenceNumber(seqn++); rtpPacket.setPayloadLength(len + 2); rtpSender.send(rtpPacket); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } /* Switch start bit */ buffer[rtpHeaderLength + 1] = (byte) (buffer[rtpHeaderLength + 1] & 0x7F); // Log.d(TAG,"send--- FU-A unit, end:"+(boolean)(sum >= nalUnitLength)); } } numberNalUnit--; // Log.d(TAG,"NAL UNIT SENT> " + numberNalUnit); } /* * If the camera has delivered new NAL units we copy them in the * FIFO Then, the delay between two send call is latency/nbNalu * with: latency: how long it took to the camera to output new data * nbNalu: number of NAL units in the FIFO */ fillFifo(); try { Thread.sleep(delay); } catch (InterruptedException e) { return; } } } // skip all atoms preceeding mdat atom private void skipMDAT() throws IOException { while (true) { fis.read(buffer, rtpHeaderLength, 8); if (buffer[rtpHeaderLength + 4] == 'm' && buffer[rtpHeaderLength + 5] == 'd' && buffer[rtpHeaderLength + 6] == 'a' && buffer[rtpHeaderLength + 7] == 't') break; len = (buffer[rtpHeaderLength + 3] & 0xFF) + (buffer[rtpHeaderLength + 2] & 0xFF) * 256 + (buffer[rtpHeaderLength + 1] & 0xFF) * 65536; if (len <= 0) break; fis.read(buffer, rtpHeaderLength, len - 8); } } private void fillFifo() { try { available = fis.available(); if (available > oldavailable) { long now = SystemClock.elapsedRealtime(); latency = now - oldlat; oldlat = now; oldavailable = available; } if (numberNalUnit == 0 && available > 4) { numberNalUnit = nalUnitLength - len == 0 ? numberNalUnit : numberNalUnit + 1; } else return; while ((available = fis.available()) >= 4) { fis.read(buffer, rtpHeaderLength, nalUnitLength - len); fifo.write(buffer, rtpHeaderLength, nalUnitLength - len); /* Read NAL unit and copy it in the fifo */ len = fis.read(buffer, rtpHeaderLength, 4); nalUnitLength = (buffer[rtpHeaderLength + 3] & 0xFF) + (buffer[rtpHeaderLength + 2] & 0xFF) * 256 + (buffer[rtpHeaderLength + 1] & 0xFF) * 65536; len = fis.read(buffer, rtpHeaderLength + 4, nalUnitLength); fifo.write(buffer, rtpHeaderLength, len + 4); if (len == nalUnitLength) numberNalUnit++; // Log.i(TAG,"fifo- available: " + available + ", len: " + len + ", naluLength: " + nalUnitLength); if (fis.available() < 4) { delay = latency / numberNalUnit; oldavailable = fis.available(); // Log.i(TAG,"fifo- latency: "+latency+", nbNalu: "+numberNalUnit+", delay: "+delay+" avfifo: "+fifo.available()); } } } catch (IOException e) { return; } } // Useful for debug protected String printBuffer(int start,int end) { String str = ""; for (int i=start;i listeners = new Vector(); /** * The logger */ private Logger logger = Logger.getLogger(this.getClass().getName()); private String TAG = "RtspVideoRecorder"; /** * Constructor */ public RtspVideoRecorder() { } /** * Constructor. Force a video codec. * * @param codec Video codec */ public RtspVideoRecorder(VideoCodec codec) { // Set the media codec setMediaCodec(codec.getMediaCodec()); } /** * Constructor. Force a video codec. * * @param codec Video codec name */ public RtspVideoRecorder(String codec) { // Set the media codec for (int i = 0; i < supportedMediaCodecs.length ; i++) { if (codec.toLowerCase().contains(supportedMediaCodecs[i].getCodecName().toLowerCase())) { setMediaCodec(supportedMediaCodecs[i]); break; } } } /** * Returns the local RTP port * * @return Port */ public int getLocalRtpPort() { return localRtpPort; } /** * Return the video start time * * @return Milliseconds */ public long getVideoStartTime() { return videoStartTime; } /** * Is player opened * * @return Boolean */ public boolean isOpened() { return opened; } /** * Is player started * * @return Boolean */ public boolean isStarted() { return started; } /** * Open the player * * @param remoteHost Remote host * @param remotePort Remote port */ public void open(String remoteHost, int remotePort) { // This is an interface method, that is no longer // used with the actual context } public void open() { if (opened) { // Already opened return; } // Check video codec if (selectedVideoCodec == null) { if (logger.isActivated()) { logger.debug("Player error: Video Codec not selected"); } return; } // Init video encoder try { if (selectedVideoCodec.getCodecName().equalsIgnoreCase(H264Config.CODEC_NAME)) { // H264 NativeH264Encoder.InitEncoder(selectedVideoCodec.getWidth(), selectedVideoCodec.getHeight(), selectedVideoCodec.getFramerate()); } else if (selectedVideoCodec.getCodecName().equalsIgnoreCase(H263Config.CODEC_NAME)) { // Default H263 NativeH263EncoderParams params = new NativeH263EncoderParams(); params.setEncFrameRate(selectedVideoCodec.getFramerate()); params.setBitRate(selectedVideoCodec.getBitrate()); // set width/height parameters for native encoding, too params.setEncHeight(selectedVideoCodec.getHeight()); params.setEncWidth(selectedVideoCodec.getWidth()); params.setTickPerSrc(params.getTimeIncRes() / selectedVideoCodec.getFramerate()); params.setIntraPeriod(-1); params.setNoFrameSkipped(false); int result = NativeH263Encoder.InitEncoder(params); if (result != 1) { if (logger.isActivated()) { logger.debug("Player error: Encoder init failed with error code " + result); } return; } } } catch (UnsatisfiedLinkError e) { if (logger.isActivated()) { logger.debug("Player error: " + e.getMessage()); } return; } // Init the RTP layer try { rtpInput = new MediaRtpInput(); rtpInput.open(); rtpMediaSender = new MediaRtpSender(videoFormat); rtpMediaSender.prepareSession(rtpInput); } catch (Exception e) { if (logger.isActivated()) { logger.debug("Player error: " + e.getMessage()); } return; } // Player is opened opened = true; } /** * Close the player */ public void close() { if (!opened) { // Already closed return; } // Close the RTP layer rtpInput.close(); rtpMediaSender.stopSession(); try { // Close the video encoder if (selectedVideoCodec.getCodecName().equalsIgnoreCase(H264Config.CODEC_NAME)) { NativeH264Encoder.DeinitEncoder(); } else if (selectedVideoCodec.getCodecName().equalsIgnoreCase(H263Config.CODEC_NAME)) { NativeH263Encoder.DeinitEncoder(); } } catch (UnsatisfiedLinkError e) { if (logger.isActivated()) { logger.error("Can't close correctly the video encoder", e); } } // Player is closed opened = false; } /** * Start the player */ public synchronized void start() { Log.d(TAG , "start"); if ((opened == false) || (started == true)) { return; } started = true; // Start RTP layer rtpMediaSender.startSession(); // Start capture captureThread.start(); // Player is started videoStartTime = SystemClock.uptimeMillis(); } /** * Stop the player */ public void stop() { if ((opened == false) || (started == false)) { return; } // Stop capture try { captureThread.interrupt(); } catch (Exception e) { } // Player is stopped videoStartTime = 0L; started = false; } /** * Add a media event listener * * @param listener Media event listener */ public void addListener(IMediaEventListener listener) { listeners.addElement(listener); } /** * Remove all media event listeners */ public void removeAllListeners() { listeners.removeAllElements(); } /** * Get supported media codecs * * @return media Codecs list */ public MediaCodec[] getSupportedMediaCodecs() { return supportedMediaCodecs; } /** * Get media codec * * @return Media Codec */ public MediaCodec getMediaCodec() { if (selectedVideoCodec == null) return null; else return selectedVideoCodec.getMediaCodec(); } /** * Set media codec * * @param mediaCodec Media codec */ public void setMediaCodec(MediaCodec mediaCodec) { if (VideoCodec.checkVideoCodec(supportedMediaCodecs, new VideoCodec(mediaCodec))) { selectedVideoCodec = new VideoCodec(mediaCodec); videoFormat = (VideoFormat) MediaRegistry.generateFormat(mediaCodec.getCodecName()); // Initialize frame buffer if (frameBuffer == null) { frameBuffer = new CameraBuffer(); } } else { if (logger.isActivated()) { logger.debug("Player error: Codec not supported"); } } } /** * Preview frame from the camera * * @param data Frame * @param camera Camera */ public void onPreviewFrame(byte[] data, Camera camera) { if (frameBuffer != null) frameBuffer.setFrame(data); } /** * Camera buffer */ private class CameraBuffer { /** * YUV frame where frame size is always (videoWidth*videoHeight*3)/2 */ private byte frame[] = new byte[(selectedVideoCodec.getWidth() * selectedVideoCodec.getHeight() * 3) / 2]; /** * Set the last captured frame * * @param frame Frame */ public void setFrame(byte[] frame) { this.frame = frame; } /** * Return the last captured frame * * @return Frame */ public byte[] getFrame() { return frame; } } /** * Video capture thread */ private Thread captureThread = new Thread() { /** * Timestamp */ private long timeStamp = 0; /** * Processing */ public void run() { // if (rtpInput == null) { // return; // } int timeToSleep = 1000 / selectedVideoCodec.getFramerate(); int timestampInc = 90000 / selectedVideoCodec.getFramerate(); byte[] frameData; byte[] encodedFrame; long encoderTs = 0; long oldTs = System.currentTimeMillis(); while (started) { // Set timestamp long time = System.currentTimeMillis(); encoderTs = encoderTs + (time - oldTs); // Get data to encode frameData = frameBuffer.getFrame(); // Encode frame int encodeResult; if (selectedVideoCodec.getCodecName().equalsIgnoreCase(H264Config.CODEC_NAME)) { encodedFrame = NativeH264Encoder.EncodeFrame(frameData, encoderTs); encodeResult = NativeH264Encoder.getLastEncodeStatus(); } else { encodedFrame = NativeH263Encoder.EncodeFrame(frameData, encoderTs); encodeResult = 0; } System.out.println("RtpVideoRecorder: captureThread: encodeResult == " + encodeResult); /* * accept additional status * EAVCEI_MORE_NAL -- there is more NAL to be retrieved */ if ((encodeResult == 0 || encodeResult == 6) && encodedFrame.length > 0) { if (encodeResult == 6) System.out.println("RtpVideoRecorder: captureThread: Status == EAVCEI_MORE_NAL"); // Send encoded frame rtpInput.addFrame(encodedFrame, timeStamp += timestampInc); } // Sleep between frames if necessary long delta = System.currentTimeMillis() - time; if (delta < timeToSleep) { try { Thread.sleep((timeToSleep - delta) - (((timeToSleep - delta) * 10) / 100)); } catch (InterruptedException e) { } } // Update old timestamp oldTs = time; } } }; /** * Media RTP input */ private static class MediaRtpInput implements MediaInput { /** * Received frames */ private FifoBuffer fifo = null; /** * Constructor */ public MediaRtpInput() { } /** * Add a new video frame * * @param data Data * @param timestamp Timestamp */ public void addFrame(byte[] data, long timestamp) { if (fifo != null) { fifo.addObject(new MediaSample(data, timestamp)); } } /** * Open the player */ public void open() { fifo = new FifoBuffer(); } /** * Close the player */ public void close() { if (fifo != null) { fifo.close(); fifo = null; } } /** * Read a media sample (blocking method) * * @return Media sample * @throws MediaException */ public MediaSample readSample() throws MediaException { try { if (fifo != null) { return (MediaSample)fifo.getObject(); } else { throw new MediaException("Media input not opened"); } } catch (Exception e) { throw new MediaException("Can't read media sample"); } } } } ================================================ FILE: RtspCamera/src/de/kp/net/rtp/stream/RtpOutputStream.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package de.kp.net.rtp.stream; import com.orangelabs.rcs.core.ims.protocol.rtp.core.RtcpSession; import com.orangelabs.rcs.core.ims.protocol.rtp.core.RtpPacket; import com.orangelabs.rcs.core.ims.protocol.rtp.stream.ProcessorOutputStream; import com.orangelabs.rcs.core.ims.protocol.rtp.util.Buffer; import com.orangelabs.rcs.core.ims.protocol.rtp.util.Packet; import com.orangelabs.rcs.utils.logger.Logger; import de.kp.net.rtp.RtpSender; import java.io.IOException; /** * RTP output stream * * @author Peter Arwanitis (arwanitis@dr-kruscheundpartner.de) * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ public class RtpOutputStream implements ProcessorOutputStream { /** * Sequence number */ private int seqNumber = 0; /** * RTCP Session */ private RtcpSession rtcpSession = null; /** * The logger */ private final Logger logger = Logger.getLogger(this.getClass().getName()); public RtpOutputStream() { // Used to build SSCR rtcpSession = new RtcpSession(true, 16000); } public void open() throws Exception { } public void close() { } /** * Write to the stream without blocking * * @param buffer Input buffer * @throws IOException */ public void write(Buffer buffer) throws IOException { // Build a RTP packet RtpPacket packet = buildRtpPacket(buffer); if (packet == null) return; // Assemble RTP packet int size = packet.calcLength(); packet.assemble(size); // Send the RTP packet to the remote destination transmit(packet); } /** * Build a RTP packet * * @param buffer Input buffer * @return RTP packet */ private RtpPacket buildRtpPacket(Buffer buffer) { byte data[] = (byte[])buffer.getData(); if (data == null) return null; Packet packet = new Packet(); packet.data = data; packet.offset = 0; packet.length = buffer.getLength(); RtpPacket rtpPacket = new RtpPacket(packet); if ((buffer.getFlags() & 0x800) != 0) { rtpPacket.marker = 1; } else { rtpPacket.marker = 0; } rtpPacket.payloadType = buffer.getFormat().getPayload(); rtpPacket.seqnum = seqNumber++; rtpPacket.timestamp = buffer.getTimeStamp(); rtpPacket.ssrc = rtcpSession.SSRC; rtpPacket.payloadoffset = buffer.getOffset(); rtpPacket.payloadlength = buffer.getLength(); return rtpPacket; } /** * Transmit a RTCP compound packet to the remote destination * * @param packet RTP packet * @throws IOException */ private void transmit(Packet packet) { // Prepare data to be sent byte[] data = packet.data; if (packet.offset > 0) { System.arraycopy(data, packet.offset, data = new byte[packet.length], 0, packet.length); } // broadcast data try { RtpSender.getInstance().send(data); } catch (IOException e) { e.printStackTrace(); if (logger.isActivated()) { logger.error("Can't broadcast the RTP packet", e); } } } } ================================================ FILE: RtspCamera/src/de/kp/net/rtp/viewer/RtpVideoRenderer.java ================================================ /******************************************************************************* * Software Name : RCS IMS Stack * * Copyright (C) 2010 France Telecom S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package de.kp.net.rtp.viewer; import com.orangelabs.rcs.core.ims.protocol.rtp.MediaRegistry; import com.orangelabs.rcs.core.ims.protocol.rtp.MediaRtpReceiver; import com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263.H263Config; import com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263.decoder.NativeH263Decoder; import com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h264.H264Config; import com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h264.decoder.NativeH264Decoder; import com.orangelabs.rcs.core.ims.protocol.rtp.format.video.H263VideoFormat; import com.orangelabs.rcs.core.ims.protocol.rtp.format.video.H264VideoFormat; import com.orangelabs.rcs.core.ims.protocol.rtp.format.video.VideoFormat; import com.orangelabs.rcs.core.ims.protocol.rtp.media.MediaOutput; import com.orangelabs.rcs.core.ims.protocol.rtp.media.MediaSample; import com.orangelabs.rcs.platform.network.DatagramConnection; import com.orangelabs.rcs.platform.network.NetworkFactory; import com.orangelabs.rcs.service.api.client.media.IMediaEventListener; import com.orangelabs.rcs.service.api.client.media.IMediaRenderer; import com.orangelabs.rcs.service.api.client.media.MediaCodec; import com.orangelabs.rcs.service.api.client.media.video.VideoCodec; import com.orangelabs.rcs.service.api.client.media.video.VideoSurfaceView; import com.orangelabs.rcs.utils.logger.Logger; import de.kp.net.rtsp.RtspConstants; import de.kp.net.rtsp.client.RtspControl; import de.kp.net.rtsp.client.message.RtspDescriptor; import de.kp.net.rtsp.client.message.RtspMedia; import android.graphics.Bitmap; import android.os.RemoteException; import android.os.SystemClock; import java.io.IOException; import java.util.List; import java.util.Vector; /** * Video RTP renderer. Supports only H.263 and H264 QCIF formats. * * @author jexa7410 */ public class RtpVideoRenderer extends IMediaRenderer.Stub { /** * List of supported video codecs */ public static MediaCodec[] supportedMediaCodecs = { new VideoCodec(H264Config.CODEC_NAME, H264VideoFormat.PAYLOAD, H264Config.CLOCK_RATE, H264Config.CODEC_PARAMS, H264Config.FRAME_RATE, H264Config.BIT_RATE, H264Config.VIDEO_WIDTH, H264Config.VIDEO_HEIGHT).getMediaCodec(), new VideoCodec(H263Config.CODEC_NAME, H263VideoFormat.PAYLOAD, H263Config.CLOCK_RATE, H263Config.CODEC_PARAMS, H263Config.FRAME_RATE, H263Config.BIT_RATE, H263Config.VIDEO_WIDTH, H263Config.VIDEO_HEIGHT).getMediaCodec() }; /** * Selected video codec */ private VideoCodec selectedVideoCodec = null; /** * Video format */ private VideoFormat videoFormat; /** * Local RTP port */ private int localRtpPort; /** * RTP receiver session */ private MediaRtpReceiver rtpReceiver = null; /** * RTP media output */ private MediaRtpOutput rtpOutput = null; /** * Is player opened */ private boolean opened = false; /** * Is player started */ private boolean started = false; /** * Video start time */ private long videoStartTime = 0L; /** * Video surface */ private VideoSurfaceView surface = null; /** * Media event listeners */ private Vector listeners = new Vector(); /** * The logger */ private Logger logger = Logger.getLogger(this.getClass().getName()); /** * Temporary connection to reserve the port */ private DatagramConnection temporaryConnection = null; /** * RTSP Control */ private RtspControl rtspControl; /** * Constructor Force a RTSP Server Uri * @throws Exception */ public RtpVideoRenderer(String uri) throws Exception { /* * The RtspControl opens a connection to an RtspServer, that * is determined by the URI provided. */ rtspControl = new RtspControl(uri); /* * wait unit the rtspControl has achieved status READY; in this * state, an SDP file is present and is ready to get evaluated */ while (rtspControl.getState() != RtspConstants.READY) { ; // blocking } /* * Set the local RTP port: this is the (socket) * port, the RtspVideoRenderer is listening to * (UDP) RTP packets. */ // localRtpPort = NetworkRessourceManager.generateLocalRtpPort(); localRtpPort = rtspControl.getClientPort(); reservePort(localRtpPort); /* * The media resources associated with the SDP descriptor are * evaluated and the respective video encoding determined */ RtspDescriptor rtspDescriptor = rtspControl.getDescriptor(); List mediaList = rtspDescriptor.getMediaList(); if (mediaList.size() == 0) throw new Exception("The session description contains no media resource."); RtspMedia videoResource = null; for (RtspMedia mediaItem:mediaList) { if (mediaItem.getMediaType().equals(RtspConstants.SDP_VIDEO_TYPE)) { videoResource = mediaItem; break; } } if (videoResource == null) throw new Exception("The session description contains no video resource."); String codec = videoResource.getEncoding(); if (codec == null) throw new Exception("No encoding provided for video resource."); // Set the media codec for (int i = 0; i < supportedMediaCodecs.length; i++) { if (codec.toLowerCase().contains(supportedMediaCodecs[i].getCodecName().toLowerCase())) { setMediaCodec(supportedMediaCodecs[i]); break; } } } /** * Set the surface to render video * * @param surface Video surface */ public void setVideoSurface(VideoSurfaceView surface) { this.surface = surface; } /** * Return the video start time * * @return Milliseconds */ public long getVideoStartTime() { return videoStartTime; } /** * Returns the local RTP port * * @return Port */ public int getLocalRtpPort() { return localRtpPort; } /** * Reserve a port. * * @param port the port to reserve */ private void reservePort(int port) { if (temporaryConnection != null) return; try { temporaryConnection = NetworkFactory.getFactory().createDatagramConnection(); temporaryConnection.open(port); } catch (IOException e) { temporaryConnection = null; } } /** * Release the reserved port; this method * is invoked while preparing the RTP layer */ private void releasePort() { if (temporaryConnection == null) return; try { temporaryConnection.close(); } catch (IOException e) { temporaryConnection = null; } } /** * Is player opened * * @return Boolean */ public boolean isOpened() { return opened; } /** * Is player started * * @return Boolean */ public boolean isStarted() { return started; } /** * Open the renderer */ public void open() { if (opened) { // Already opened return; } // Check video codec if (selectedVideoCodec == null) { if (logger.isActivated()) { logger.debug("Player error: Video Codec not selected"); } return; } try { // Init the video decoder int result; if (selectedVideoCodec.getCodecName().equalsIgnoreCase(H264Config.CODEC_NAME)) { result = NativeH264Decoder.InitDecoder(); } else { // default H263 result = NativeH263Decoder.InitDecoder(selectedVideoCodec.getWidth(), selectedVideoCodec.getHeight()); } if (result == 0) { if (logger.isActivated()) { logger.debug("Player error: Decoder init failed with error code " + result); } return; } } catch (UnsatisfiedLinkError e) { if (logger.isActivated()) { logger.debug("Player error: " + e.getMessage()); } return; } try { // initialize RTP layer releasePort(); rtpOutput = new MediaRtpOutput(); rtpOutput.open(); rtpReceiver = new MediaRtpReceiver(localRtpPort); rtpReceiver.prepareSession(rtpOutput, videoFormat); } catch (Exception e) { if (logger.isActivated()) { logger.debug("Player error: " + e.getMessage()); } return; } // Player is opened opened = true; } /** * Close the renderer */ public void close() { if (opened == false) return; // Send TEARDOWN request to RTSP Server rtspControl.stop(); // Close the RTP layer rtpReceiver.stopSession(); rtpOutput.close(); // Close the video decoder closeVideoDecoder(); // Player is closed opened = false; } public void closeVideoDecoder() { try { // Close the video decoder if (selectedVideoCodec.getCodecName().equalsIgnoreCase(H264Config.CODEC_NAME)) { NativeH264Decoder.DeinitDecoder(); } else { // default H263 NativeH263Decoder.DeinitDecoder(); } } catch (UnsatisfiedLinkError e) { if (logger.isActivated()) { logger.error("Can't close correctly the video decoder", e); } } } /** * Start the RTP layer (i.e listen to the reserved local * port for RTP packets), and send a PLAY request to the * RTSP server */ public void start() { if ((opened == false) || (started == true)) { return; } // Start RTP layer rtpReceiver.startSession(); // Send PLAY request to RTSP Server rtspControl.play(); /* * wait unit the rtspControl has achieved status PLAYING */ while (rtspControl.getState() != RtspConstants.PLAYING) { ; // blocking } // Renderer is started videoStartTime = SystemClock.uptimeMillis(); started = true; } /** * Stop the renderer */ public void stop() { if (started == false) return; // Send TEARDOWN request to RTSP Server rtspControl.stop(); // Stop RTP layer if (rtpReceiver != null) rtpReceiver.stopSession(); if (rtpOutput != null) rtpOutput.close(); // Force black screen surface.clearImage(); // Close the video decoder closeVideoDecoder(); // Renderer is stopped started = false; videoStartTime = 0L; } /** * Add a media event listener * * @param listener Media event listener */ public void addListener(IMediaEventListener listener) { listeners.addElement(listener); } /** * Remove all media event listeners */ public void removeAllListeners() { listeners.removeAllElements(); } /** * Get supported media codecs * * @return media Codecs list */ public MediaCodec[] getSupportedMediaCodecs() { return supportedMediaCodecs; } /** * Get media codec * * @return Media codec */ public MediaCodec getMediaCodec() { if (selectedVideoCodec == null) return null; else return selectedVideoCodec.getMediaCodec(); } /** * Set media codec * * @param mediaCodec Media codec */ public void setMediaCodec(MediaCodec mediaCodec) { if (VideoCodec.checkVideoCodec(supportedMediaCodecs, new VideoCodec(mediaCodec))) { selectedVideoCodec = new VideoCodec(mediaCodec); videoFormat = (VideoFormat) MediaRegistry.generateFormat(mediaCodec.getCodecName()); } else { if (logger.isActivated()) { logger.debug("Player error: Codec not supported"); } } } /** * Media RTP output */ private class MediaRtpOutput implements MediaOutput { /** * Video frame */ private int decodedFrame[]; /** * Bitmap frame */ private Bitmap rgbFrame; /** * Constructor */ public MediaRtpOutput() { decodedFrame = new int[selectedVideoCodec.getWidth() * selectedVideoCodec.getHeight()]; rgbFrame = Bitmap.createBitmap(selectedVideoCodec.getWidth(), selectedVideoCodec.getHeight(), Bitmap.Config.RGB_565); } /** * Open the renderer */ public void open() { } /** * Close the renderer */ public void close() { } /** * Write a media sample * * @param sample Sample */ public void writeSample(MediaSample sample) { if (selectedVideoCodec.getCodecName().equalsIgnoreCase(H264Config.CODEC_NAME)) { if (NativeH264Decoder.DecodeAndConvert(sample.getData(), decodedFrame) == 1) { rgbFrame.setPixels(decodedFrame, 0, selectedVideoCodec.getWidth(), 0, 0, selectedVideoCodec.getWidth(), selectedVideoCodec.getHeight()); if (surface != null) { surface.setImage(rgbFrame); } } else { System.out.println("MediaRtpOutput.writeSample: cannot decode sample >len:" + sample.getLength()); } } else { // default H263 if (NativeH263Decoder.DecodeAndConvert(sample.getData(), decodedFrame, sample.getTimeStamp()) == 1) { rgbFrame.setPixels(decodedFrame, 0, selectedVideoCodec.getWidth(), 0, 0, selectedVideoCodec.getWidth(), selectedVideoCodec.getHeight()); if (surface != null) { surface.setImage(rgbFrame); } } } } } @Override public void open(String remoteHost, int remotePort) throws RemoteException { } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/RtspConstants.java ================================================ package de.kp.net.rtsp; import java.net.InetAddress; import java.net.NetworkInterface; import java.net.SocketException; import java.util.Enumeration; import android.util.Log; public class RtspConstants { // rtsp states public static int INIT = 0; public static int READY = 1; public static int PLAYING = 2; public static int UNDEFINED = 3; // rtsp message types public static int OPTIONS = 3; public static int DESCRIBE = 4; public static int SETUP = 5; public static int PLAY = 6; public static int PAUSE = 7; public static int TEARDOWN = 8; public static String SDP_AUDIO_TYPE = "audio"; public static String SDP_VIDEO_TYPE = "video"; // the payload type is part of the SDP description // sent back as an answer to a DESCRIBE request. // android actually supports video streaming from // the camera using H.263-1998 // TODO: sync with // com.orangelabs.rcs.core.ims.protocol.rtp.format.video.H263VideoFormat.PAYLOAD = 97 // com.orangelabs.rcs.core.ims.protocol.rtp.format.video.H264VideoFormat.PAYLOAD = 96 public static int RTP_H264_PAYLOADTYPE = 96; // dynamic range public static int RTP_H263_PAYLOADTYPE = 97; // dynamic range public static String H263_1998 = "H263-1998/90000"; public static String H263_2000 = "H263-2000/90000"; public static String H264 = "H264/90000"; public static enum VideoEncoder { H263_ENCODER, H264_ENCODER }; // TODO: synchronize settings // com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263.H263Config // com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h264.H264Config // QCIF // public static String WIDTH = "176"; // public static String HEIGHT = "144"; // QCIF public static String WIDTH = "352"; public static String HEIGHT = "288"; public static final int FPS = 15; public static final int BITRATE = 128000; // h263-2000 //public static final int BITRATE = 64000; // for h264 public static final String SEP = " "; // default client ports for audio and video streaming; // the port is usually provided with an RTSP request public static final int CLIENT_AUDIO_PORT = 2000; public static final int CLIENT_VIDEO_PORT = 4000; // public static String SERVER_IP = "spexhd2:8080"; public static int SERVER_PORT = 8080; public static String SERVER_IP = getLocalIpAddress() + ":" + SERVER_PORT; public static String SERVER_NAME = "KuP RTSP Server"; public static String SERVER_VERSION = "0.1"; public static int PORT_BASE = 3000; public static int[] PORTS_RTSP_RTP = {PORT_BASE, (PORT_BASE + 1)}; public static final String DIR_MULTIMEDIA = "../"; // tags for logging public static String SERVER_TAG = "RtspServer"; public static String getLocalIpAddress() { // http://www.droidnova.com/get-the-ip-address-of-your-device,304.html try { for (Enumeration en = NetworkInterface.getNetworkInterfaces(); en.hasMoreElements();) { NetworkInterface intf = en.nextElement(); for (Enumeration enumIpAddr = intf.getInetAddresses(); enumIpAddr.hasMoreElements();) { InetAddress inetAddress = enumIpAddr.nextElement(); if (!inetAddress.isLoopbackAddress()) { return inetAddress.getHostAddress().toString(); } } } } catch (SocketException ex) { Log.e("RtspConstants", ex.toString()); } return null; } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/RtspClient.java ================================================ package de.kp.net.rtsp.client; /** * Copyright 2010 Voice Technology Ind. e Com. Ltda. * * RTSPClientLib is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * RTSPClientLib is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see . * * * This class has been adapted to the needs of the RtspCamera project * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ import java.io.IOException; import java.net.SocketException; import java.net.URI; import java.net.URISyntaxException; import java.util.HashMap; import java.util.Map; import de.kp.net.rtsp.client.api.RequestListener; import de.kp.net.rtsp.client.api.Message; import de.kp.net.rtsp.client.api.MessageFactory; import de.kp.net.rtsp.client.api.Request; import de.kp.net.rtsp.client.api.Response; import de.kp.net.rtsp.client.api.Transport; import de.kp.net.rtsp.client.api.TransportListener; import de.kp.net.rtsp.client.header.RtspHeader; import de.kp.net.rtsp.client.header.SessionHeader; import de.kp.net.rtsp.client.header.TransportHeader; import de.kp.net.rtsp.client.header.TransportHeader.LowerTransport; import de.kp.net.rtsp.client.message.MessageBuffer; import de.kp.net.rtsp.client.message.RtspMessageFactory; import de.kp.net.rtsp.client.request.RtspOptionsRequest; import de.kp.net.rtsp.client.request.RtspRequest; public class RtspClient implements TransportListener { private Transport transport; private MessageFactory messageFactory; private MessageBuffer messageBuffer; private volatile int cseq; private SessionHeader session; /** * URI kept from last setup. */ private URI uri; private Map outstanding; private RequestListener clientListener; public RtspClient() { cseq = 0; messageFactory = new RtspMessageFactory(); messageBuffer = new MessageBuffer(); outstanding = new HashMap(); } public Transport getTransport() { return transport; } public void setSession(SessionHeader session) { this.session = session; } public MessageFactory getMessageFactory() { return messageFactory; } public URI getURI() { return uri; } public void options(String uri, URI endpoint) { try { RtspOptionsRequest message = (RtspOptionsRequest) messageFactory.outgoingRequest(uri, RtspRequest.Method.OPTIONS, nextCSeq()); // if (getTransport().isConnected() == false) message.addHeader(new RtspHeader("Connection", "close")); send(message, endpoint); } catch(Exception e) { if(clientListener != null) clientListener.onError(this, e); } } public void play() { try { send(messageFactory.outgoingRequest(uri.toString(), RtspRequest.Method.PLAY, nextCSeq(), session)); } catch(Exception e) { if(clientListener != null) clientListener.onError(this, e); } } public void pause() { try { send(messageFactory.outgoingRequest(uri.toString(), RtspRequest.Method.PAUSE, nextCSeq(), session)); } catch(Exception e) { if(clientListener != null) clientListener.onError(this, e); } } public void record() throws IOException { throw new UnsupportedOperationException("Recording is not supported in current version."); } public void setRequestListener(RequestListener listener) { clientListener = listener; } public RequestListener getRequestListener() { return clientListener; } public void setTransport(Transport transport) { this.transport = transport; transport.setTransportListener(this); } public void describe(URI uri, String resource) { this.uri = uri; String finalURI = uri.toString(); if ((resource != null) && (resource.equals("*") == false)) finalURI += '/' + resource; try { send(messageFactory.outgoingRequest(finalURI, RtspRequest.Method.DESCRIBE, nextCSeq(), new RtspHeader("Accept", "application/sdp"))); } catch(Exception e) { if(clientListener != null) clientListener.onError(this, e); } } public void setup(URI uri, int localPort) { this.uri = uri; try { String portParam = "client_port=" + localPort + "-" + (1 + localPort); send(getSetup(uri.toString(), localPort, new TransportHeader(LowerTransport.DEFAULT, "unicast", portParam), session)); } catch(Exception e) { if(clientListener != null) clientListener.onError(this, e); } } public void setup(URI uri, int localPort, String resource) { this.uri = uri; try { String portParam = "client_port=" + localPort + "-" + (1 + localPort); String finalURI = uri.toString(); if ((resource != null) && (resource.equals("*") == false)) finalURI += '/' + resource; send(getSetup(finalURI, localPort, new TransportHeader(LowerTransport.DEFAULT, "unicast", portParam), session)); } catch(Exception e) { if(clientListener != null) clientListener.onError(this, e); } } public void teardown() { if(session == null) return; try { send(messageFactory.outgoingRequest(uri.toString(), RtspRequest.Method.TEARDOWN, nextCSeq(), session, new RtspHeader("Connection", "close"))); } catch(Exception e) { if(clientListener != null) clientListener.onError(this, e); } } public void dataReceived(Transport t, byte[] data, int size) throws Throwable { messageBuffer.addData(data, size); while(messageBuffer.getLength() > 0) try { messageFactory.incomingMessage(messageBuffer); messageBuffer.discardData(); Message message = messageBuffer.getMessage(); if(message instanceof RtspRequest) send(messageFactory.outgoingResponse(405, "Method Not Allowed", message.getCSeq().getValue())); else { RtspRequest request = null; synchronized(outstanding) { request = outstanding.remove(message.getCSeq().getValue()); } Response response = (Response) message; request.handleResponse(this, response); clientListener.onSuccess(this, request, response); } } catch(Exception e) { messageBuffer.discardData(); if(clientListener != null) clientListener.onError(this, e.getCause()); } } @Override public void dataSent(Transport t) throws Throwable { } @Override public void error(Transport t, Throwable error) { clientListener.onError(this, error); } @Override public void error(Transport t, Message message, Throwable error) { clientListener.onFailure(this, (RtspRequest) message, error); } @Override public void remoteDisconnection(Transport t) throws Throwable { synchronized(outstanding) { for(Map.Entry request : outstanding.entrySet()) clientListener.onFailure(this, request.getValue(), new SocketException("Socket has been closed")); } } public int nextCSeq() { return cseq++; } public void send(Message message) throws Exception { send(message, uri); } private void send(Message message, URI endpoint) throws Exception { if(!transport.isConnected()) transport.connect(endpoint); if(message instanceof RtspRequest) { RtspRequest request = (RtspRequest) message; synchronized(outstanding) { outstanding.put(message.getCSeq().getValue(), request); } try { transport.sendMessage(message); } catch(IOException e) { clientListener.onFailure(this, request, e); } } else transport.sendMessage(message); } private Request getSetup(String uri, int localPort, RtspHeader... headers) throws URISyntaxException { return getMessageFactory().outgoingRequest(uri, RtspRequest.Method.SETUP, nextCSeq(), headers); } @Override public void connected(Transport t) throws Throwable { // TODO Auto-generated method stub } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/RtspControl.java ================================================ package de.kp.net.rtsp.client; import java.net.URI; import de.kp.net.rtsp.RtspConstants; import de.kp.net.rtsp.client.api.RequestListener; import de.kp.net.rtsp.client.api.Request; import de.kp.net.rtsp.client.api.Response; import de.kp.net.rtsp.client.message.RtspDescriptor; import de.kp.net.rtsp.client.message.RtspMedia; import de.kp.net.rtsp.client.transport.TCPTransport; public class RtspControl implements RequestListener { // reference to the RTSP client private RtspClient client; // flag to indicate whether there is a connection // established to a remote RTSP server private boolean connected = false; // reference to the RTSP server URI private URI uri; private int port; private String resource; // reference to the SDP file returned as a response // to a DESCRIBE request private RtspDescriptor rtspDescriptor; private int state; /** * This constructor is invoked with an uri that * describes the server uri and also a certain * resource */ public RtspControl(String uri) { int pos = uri.lastIndexOf("/"); try { this.uri = new URI(uri.substring(0, pos)); this.resource = uri.substring(pos+1); // initialize the RTSP communication this.client = new RtspClient(); this.client.setTransport(new TCPTransport()); this.client.setRequestListener(this); this.state = RtspConstants.UNDEFINED; // the OPTIONS request is used to invoke and // test the connection to the RTSP server, // specified with the URI provided this.client.options("*", this.uri); } catch (Exception e) { if (this.client != null) { onError(this.client, e); } else { e.printStackTrace(); } } } public RtspControl(String uri, String resource) { try { this.uri = new URI(uri); this.resource = resource; // initialize the RTSP communication this.client = new RtspClient(); this.client.setTransport(new TCPTransport()); this.client.setRequestListener(this); this.state = RtspConstants.UNDEFINED; // the OPTIONS request is used to invoke and // test the connection to the RTSP server, // specified with the URI provided this.client.options("*", this.uri); } catch (Exception e) { if (this.client != null) { onError(this.client, e); } else { e.printStackTrace(); } } } public void play() { if ((this.client == null) || (this.connected == false)) return; if (this.state == RtspConstants.READY) { this.client.play(); } } public void pause() { if ((this.client == null) || (this.connected == false)) return; if (this.state == RtspConstants.PLAYING) { this.client.pause(); } } public void stop() { if ((this.client == null) || (this.connected == false)) return; // send TEARDOWN request this.client.teardown(); } public boolean isConnected() { return this.connected; } public int getState() { return this.state; } public int getClientPort() { return this.port; } public RtspDescriptor getDescriptor() { return this.rtspDescriptor; } @Override public void onError(RtspClient client, Throwable error) { if ((this.client != null) && (this.connected == true)) { this.client.teardown(); } this.state = RtspConstants.UNDEFINED; this.connected = false; this.client = null; } // register SDP file public void onDescriptor(RtspClient client, String descriptor) { this.rtspDescriptor = new RtspDescriptor(descriptor); } public void onFailure(RtspClient client, Request request, Throwable cause) { if ((this.client != null) && (this.connected == true)) { this.client.teardown(); } this.state = RtspConstants.UNDEFINED; this.connected = false; this.client = null; } public void onSuccess(RtspClient client, Request request, Response response) { try { if ((this.client != null) && (response.getStatusCode() == 200)) { Request.Method method = request.getMethod(); if (method == Request.Method.OPTIONS) { // the response to an OPTIONS request this.connected = true; // send DESCRIBE request this.client.describe(this.uri, this.resource); } else if (method == Request.Method.DESCRIBE) { // set state to INIT this.state = RtspConstants.INIT; /* * onSuccess is called AFTER onDescriptor method; * this implies, that a media resource is present * with a certain client port specified by the RTSP * server */ RtspMedia video = this.rtspDescriptor.getFirstVideo(); if (video != null) { this.port = Integer.valueOf(video.getTransportPort()); // send SETUP request this.client.setup(this.uri, this.port, this.resource); } } else if (method == Request.Method.SETUP) { // set state to READY this.state = RtspConstants.READY; } else if (method == Request.Method.PLAY) { // set state to PLAYING this.state = RtspConstants.PLAYING; } else if (method == Request.Method.PAUSE) { // set state to READY this.state = RtspConstants.READY; } else if (method == Request.Method.TEARDOWN) { this.connected = false; // set state to UNDEFINED this.state = RtspConstants.UNDEFINED; } } else { } } catch (Exception e) { onError(this.client, e); } } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/api/EntityMessage.java ================================================ package de.kp.net.rtsp.client.api; /** * Copyright 2010 Voice Technology Ind. e Com. Ltda. * * RTSPClientLib is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * RTSPClientLib is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see . * * * This class has been adapted to the needs of the RtspCamera project * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ import de.kp.net.rtsp.client.header.RtspContent; public interface EntityMessage { public RtspContent getContent(); public void setContent(RtspContent content); public Message getMessage(); public byte[] getBytes() throws Exception; public boolean isEntity(); } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/api/Message.java ================================================ package de.kp.net.rtsp.client.api; /** * Copyright 2010 Voice Technology Ind. e Com. Ltda. * * RTSPClientLib is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * RTSPClientLib is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see . * * * This class has been adapted to the needs of the RtspCamera project * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ import de.kp.net.rtsp.client.header.CSeqHeader; import de.kp.net.rtsp.client.header.RtspHeader; public interface Message { static String RTSP_TOKEN = "RTSP/"; static String RTSP_VERSION = "1.0"; static String RTSP_VERSION_TOKEN = RTSP_TOKEN + RTSP_VERSION; /** * * @return the Message line (the first line of the message) */ public String getLine(); /** * Returns a header, if exists * * @param name * Name of the header to be searched * @return value of that header * @throws Exception */ public RtspHeader getHeader(String name) throws Exception; /** * Convenience method to get CSeq. * * @return */ public CSeqHeader getCSeq(); /** * * @return all headers in the message, except CSeq */ public RtspHeader[] getHeaders(); /** * Adds a new header or replaces if one already exists. If header to be added * is a CSeq, implementation MUST keep reference of this header. * * @param header */ public void addHeader(RtspHeader header); /** * * @return message as a byte array, ready for transmission. */ public byte[] getBytes() throws Exception; /** * * @return Entity part of message, it exists. */ public EntityMessage getEntityMessage(); /** * * @param entity * adds an entity part to the message. * @return this, for easier construction. */ public Message setEntityMessage(EntityMessage entity); } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/api/MessageFactory.java ================================================ package de.kp.net.rtsp.client.api; /** * Copyright 2010 Voice Technology Ind. e Com. Ltda. * * RTSPClientLib is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * RTSPClientLib is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see . * * * This class has been adapted to the needs of the RtspCamera project * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ import java.net.URISyntaxException; import de.kp.net.rtsp.client.header.RtspContent; import de.kp.net.rtsp.client.header.RtspHeader; import de.kp.net.rtsp.client.message.MessageBuffer; public interface MessageFactory { public void incomingMessage(MessageBuffer message) throws Exception; public Request outgoingRequest(String uri, Request.Method method, int cseq, RtspHeader... extras) throws URISyntaxException; public Request outgoingRequest(RtspContent body, String uri, Request.Method method, int cseq, RtspHeader... extras) throws URISyntaxException; public Response outgoingResponse(int code, String message, int cseq, RtspHeader... extras); public Response outgoingResponse(RtspContent body, int code, String text, int cseq, RtspHeader... extras); } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/api/Request.java ================================================ package de.kp.net.rtsp.client.api; /** * Copyright 2010 Voice Technology Ind. e Com. Ltda. * * RTSPClientLib is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * RTSPClientLib is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see . * * * This class has been adapted to the needs of the RtspCamera project * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ import java.net.URISyntaxException; import de.kp.net.rtsp.client.RtspClient; public interface Request extends Message { enum Method { OPTIONS, DESCRIBE, SETUP, PLAY, PAUSE, RECORD, TEARDOWN }; public void setLine(Method method, String uri) throws URISyntaxException; public Method getMethod(); public String getURI(); public void handleResponse(RtspClient client, Response response); } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/api/RequestListener.java ================================================ package de.kp.net.rtsp.client.api; /** * Copyright 2010 Voice Technology Ind. e Com. Ltda. * * RTSPClientLib is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * RTSPClientLib is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see . * * * This class has been adapted to the needs of the RtspCamera project * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ import de.kp.net.rtsp.client.RtspClient; public interface RequestListener { public void onDescriptor(RtspClient client, String descriptor); public void onError(RtspClient client, Throwable error); public void onFailure(RtspClient client, Request request, Throwable cause); public void onSuccess(RtspClient client, Request request, Response response); } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/api/Response.java ================================================ package de.kp.net.rtsp.client.api; /** * Copyright 2010 Voice Technology Ind. e Com. Ltda. * * RTSPClientLib is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * RTSPClientLib is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see . * * * This class has been adapted to the needs of the RtspCamera project * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ public interface Response extends Message { public void setLine(int statusCode, String statusPhrase); public int getStatusCode(); public String getStatusText(); } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/api/Transport.java ================================================ package de.kp.net.rtsp.client.api; /** * Copyright 2010 Voice Technology Ind. e Com. Ltda. * * RTSPClientLib is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * RTSPClientLib is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see . * * * This class has been adapted to the needs of the RtspCamera project * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ import java.io.IOException; import java.net.URI; /** * This interface defines a transport protocol (TCP, UDP) or method (HTTP * tunneling). Transport also MUST enqueue a command if a connection is busy at * the moment it is issued. */ public interface Transport { public void connect(URI to) throws IOException; public void disconnect(); public void sendMessage(Message message) throws Exception; public void setTransportListener(TransportListener listener); public void setUserData(Object data); public boolean isConnected(); } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/api/TransportListener.java ================================================ package de.kp.net.rtsp.client.api; /** * Copyright 2010 Voice Technology Ind. e Com. Ltda. * * RTSPClientLib is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * RTSPClientLib is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see . * * * This class has been adapted to the needs of the RtspCamera project * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ /** * Listener for transport events. Implementations of {@link Transport}, when * calling a listener method, must catch all errors and submit them to the * error() method. */ public interface TransportListener { public void connected(Transport t) throws Throwable; public void error(Transport t, Throwable error); public void error(Transport t, Message message, Throwable error); public void remoteDisconnection(Transport t) throws Throwable; public void dataReceived(Transport t, byte[] data, int size) throws Throwable; public void dataSent(Transport t) throws Throwable; } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/header/CSeqHeader.java ================================================ package de.kp.net.rtsp.client.header; /** * Copyright 2010 Voice Technology Ind. e Com. Ltda. * * RTSPClientLib is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * RTSPClientLib is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see . * * * This class has been adapted to the needs of the RtspCamera project * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ public class CSeqHeader extends RtspBaseIntegerHeader { public static final String NAME = "CSeq"; public CSeqHeader() { super(NAME); } public CSeqHeader(int cseq) { super(NAME, cseq); } public CSeqHeader(String line) { super(line); } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/header/ContentEncodingHeader.java ================================================ package de.kp.net.rtsp.client.header; /** * Copyright 2010 Voice Technology Ind. e Com. Ltda. * * RTSPClientLib is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * RTSPClientLib is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see . * * * This class has been adapted to the needs of the RtspCamera project * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ public class ContentEncodingHeader extends RtspBaseStringHeader { public static final String NAME = "Content-Encoding"; public ContentEncodingHeader() { super(NAME); } public ContentEncodingHeader(String header) { super(NAME, header); } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/header/ContentLengthHeader.java ================================================ package de.kp.net.rtsp.client.header; /** * Copyright 2010 Voice Technology Ind. e Com. Ltda. * * RTSPClientLib is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * RTSPClientLib is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see . * * * This class has been adapted to the needs of the RtspCamera project * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ public class ContentLengthHeader extends RtspBaseIntegerHeader { public static final String NAME = "Content-Length"; public ContentLengthHeader() { super(NAME); } public ContentLengthHeader(int value) { super(NAME, value); } public ContentLengthHeader(String header) throws Exception { super(NAME, header); } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/header/ContentTypeHeader.java ================================================ package de.kp.net.rtsp.client.header; /** * Copyright 2010 Voice Technology Ind. e Com. Ltda. * * RTSPClientLib is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * RTSPClientLib is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see . * * * This class has been adapted to the needs of the RtspCamera project * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ public class ContentTypeHeader extends RtspBaseStringHeader { public static final String NAME = "Content-Type"; public ContentTypeHeader() { super(NAME); } public ContentTypeHeader(String header) { super(NAME, header); } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/header/RtspBaseIntegerHeader.java ================================================ package de.kp.net.rtsp.client.header; /** * Copyright 2010 Voice Technology Ind. e Com. Ltda. * * RTSPClientLib is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * RTSPClientLib is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see . * * * This class has been adapted to the needs of the RtspCamera project * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ public class RtspBaseIntegerHeader extends RtspHeader { private int value; public RtspBaseIntegerHeader(String name) { super(name); String text = getRawValue(); if(text != null) value = Integer.parseInt(text); } public RtspBaseIntegerHeader(String name, int value) { super(name); setValue(value); } public RtspBaseIntegerHeader(String name, String header) throws Exception { super(header); checkName(name); value = Integer.parseInt(getRawValue()); } public final void setValue(int newValue) { value = newValue; setRawValue(String.valueOf(value)); } public final int getValue() { return value; } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/header/RtspBaseStringHeader.java ================================================ package de.kp.net.rtsp.client.header; /** * Copyright 2010 Voice Technology Ind. e Com. Ltda. * * RTSPClientLib is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * RTSPClientLib is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see . * * * This class has been adapted to the needs of the RtspCamera project * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ public class RtspBaseStringHeader extends RtspHeader { public RtspBaseStringHeader(String name) { super(name); } public RtspBaseStringHeader(String name, String header) { super(header); try { checkName(name); } catch(Exception e) { setName(name); } } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/header/RtspContent.java ================================================ package de.kp.net.rtsp.client.header; /** * Copyright 2010 Voice Technology Ind. e Com. Ltda. * * RTSPClientLib is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * RTSPClientLib is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see . * * * This class has been adapted to the needs of the RtspCamera project * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ import de.kp.net.rtsp.client.api.Message; public class RtspContent { private String type; private String encoding; private byte[] content; public void setDescription(Message message) throws Exception { type = message.getHeader(ContentTypeHeader.NAME).getRawValue(); try { encoding = message.getHeader(ContentEncodingHeader.NAME).getRawValue(); } catch(Exception e) { } } public String getType() { return type; } public void setType(String type) { this.type = type; } public String getEncoding() { return encoding; } public void setEncoding(String encoding) { this.encoding = encoding; } public byte[] getBytes() { return content; } public void setBytes(byte[] content) { this.content = content; } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/header/RtspHeader.java ================================================ package de.kp.net.rtsp.client.header; /** * Copyright 2010 Voice Technology Ind. e Com. Ltda. * * RTSPClientLib is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * RTSPClientLib is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see . * * * This class has been adapted to the needs of the RtspCamera project * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ public class RtspHeader { private String name; private String value; /** * Constructs a new header. * * @param header * if the character ':' (colon) is not found, it will be the name of * the header. Otherwise, this constructor parses the header line. */ public RtspHeader(String header) { int colon = header.indexOf(':'); if(colon == -1) name = header; else { name = header.substring(0, colon); value = header.substring(++colon).trim(); } } public RtspHeader(String name, String value) { this.name = name; this.value = value; } public String getName() { return name; } public String getRawValue() { return value; } public void setRawValue(String value) { this.value = value; } public String toString() { return name + ": " + value; } public boolean equals(Object obj) { if(super.equals(obj)) return true; if(obj instanceof String) return getName().equals(obj); if(obj instanceof RtspHeader) return getName().equals(((RtspHeader) obj).getName()); return false; } protected final void checkName(String expected) throws Exception { if(expected.equalsIgnoreCase(getName()) == false) throw new Exception("[Header Mismatch] - Expected: " + expected + " Retrieved: " + getName()); } protected final void setName(String name) { value = this.name; this.name = name; } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/header/SessionHeader.java ================================================ package de.kp.net.rtsp.client.header; /** * Copyright 2010 Voice Technology Ind. e Com. Ltda. * * RTSPClientLib is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * RTSPClientLib is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see . * * * This class has been adapted to the needs of the RtspCamera project * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ public class SessionHeader extends RtspBaseStringHeader { public static final String NAME = "Session"; public SessionHeader() { super(NAME); } public SessionHeader(String header) { super(NAME, header); } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/header/TransportHeader.java ================================================ package de.kp.net.rtsp.client.header; /* Copyright 2010 Voice Technology Ind. e Com. Ltda. This file is part of RTSPClientLib. RTSPClientLib is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. RTSPClientLib is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with RTSPClientLib. If not, see . */ import java.util.Arrays; import java.util.List; /** * Models a "Transport" header from RFC 2326. According to specification, there may be parameters, which will be inserted as a list of strings, which follow below: * parameter = ( "unicast" | "multicast" ) | ";" "destination" [ "=" address ] | ";" "interleaved" "=" channel [ "-" channel ] | ";" "append" | ";" "ttl" "=" ttl | ";" "layers" "=" 1*DIGIT | ";" "port" "=" port [ "-" port ] | ";" "client_port" "=" port [ "-" port ] | ";" "server_port" "=" port [ "-" port ] | ";" "ssrc" "=" ssrc | ";" "mode" = <"> 1\#mode <"> ttl = 1*3(DIGIT) port = 1*5(DIGIT) ssrc = 8*8(HEX) channel = 1*3(DIGIT) address = host mode = <"> *Method <"> | Method * @author paulo * */ public class TransportHeader extends RtspHeader { public static final String NAME = "Transport"; public static enum LowerTransport { TCP, UDP, DEFAULT }; private LowerTransport transport; private List parameters; public TransportHeader(String header) { super(header); String value = getRawValue(); if(!value.startsWith("RTP/AVP")) throw new IllegalArgumentException("Missing RTP/AVP"); int index = 7; if(value.charAt(index) == '/') { switch(value.charAt(++index)) { case 'T': transport = LowerTransport.TCP; break; case 'U': transport = LowerTransport.UDP; break; default: throw new IllegalArgumentException("Invalid Transport: " + value.substring(7)); } index += 3; } else transport = LowerTransport.DEFAULT; if(value.charAt(index) != ';' && index != value.length()) throw new IllegalArgumentException("Parameter block expected"); addParameters(value.substring(++index).split(";")); } public TransportHeader(LowerTransport transport, String... parameters) { super(NAME); this.transport = transport; addParameters(parameters); } public String getParameter(String part) { for(String parameter : parameters) if(parameter.startsWith(part)) return parameter; throw new IllegalArgumentException("No such parameter named " + part); } void addParameters(String[] parameterList) { if(parameters == null) parameters = Arrays.asList(parameterList); else parameters.addAll(Arrays.asList(parameterList)); } LowerTransport getTransport() { return transport; } @Override public String toString() { StringBuilder buffer = new StringBuilder(NAME).append(": ").append("RTP/AVP"); if(transport != LowerTransport.DEFAULT) buffer.append('/').append(transport); for(String parameter : parameters) buffer.append(';').append(parameter); return buffer.toString(); } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/message/MessageBuffer.java ================================================ package de.kp.net.rtsp.client.message; /** * Copyright 2010 Voice Technology Ind. e Com. Ltda. * * RTSPClientLib is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * RTSPClientLib is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see . * * * This class has been adapted to the needs of the RtspCamera project * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ import de.kp.net.rtsp.client.api.Message; public class MessageBuffer { /** * buffer for received data */ private byte[] data; /** * offset for starting useful area */ private int offset; /** * length of useful portion. */ private int length; /** * Used (read) buffer. */ private int used; /** * {@link Message} created during last parsing. */ private Message message; /** * Adds more data to buffer and ensures the sequence [data, newData] is * contiguous. * * @param newData data to be added to the buffer. */ public void addData(byte[] newData, int newLength) { if (data == null) { data = newData; length = newLength; offset = 0; } else { // buffer seems to be small. if((data.length - offset - length) < newLength) { // try to sequeeze data at the beginning of the buffer only if current // buffer does not overlap if(offset >= length && (data.length - length) >= newLength) { System.arraycopy(data, offset, data, 0, length); offset = 0; } else { // worst-case scenario, a new buffer will have to be created byte[] temp = new byte[data.length + newLength]; System.arraycopy(data, offset, temp, 0, length); offset = 0; data = temp; } } // there's room for everything - just copy System.arraycopy(newData, 0, data, offset + length, newLength); length += newLength; } } /** * Discards used portions of the buffer. */ public void discardData() { offset += used; length -= used; } public byte[] getData() { return data; } public int getOffset() { return offset; } public int getLength() { return length; } public void setMessage(Message message) { this.message = message; } public Message getMessage() { return message; } public void setused(int used) { this.used = used; } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/message/RtspDescriptor.java ================================================ package de.kp.net.rtsp.client.message; import java.util.ArrayList; import java.util.List; import java.util.StringTokenizer; import de.kp.net.rtsp.RtspConstants; public class RtspDescriptor { private static String SEP = "\r\n"; private ArrayList mediaList; public RtspDescriptor(String descriptor) { // initialize media list mediaList = new ArrayList(); RtspMedia mediaItem = null; try { StringTokenizer tokenizer = new StringTokenizer(descriptor, SEP); while (tokenizer.hasMoreTokens()) { String token = tokenizer.nextToken(); if (token.startsWith("m=")) { // a new media item is detected mediaItem = new RtspMedia(token); mediaList.add(mediaItem); } else if (token.startsWith("a=")) { mediaItem.setAttribute(token); } } } catch (Exception e) { e.printStackTrace(); } } public List getMediaList() { return mediaList; } public RtspMedia getFirstVideo() { RtspMedia video = null; for (RtspMedia mediaItem:this.mediaList) { if (mediaItem.getMediaType().equals(RtspConstants.SDP_VIDEO_TYPE)) { video = mediaItem; break; } } return video; } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/message/RtspEntityMessage.java ================================================ package de.kp.net.rtsp.client.message; /** * Copyright 2010 Voice Technology Ind. e Com. Ltda. * * RTSPClientLib is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * RTSPClientLib is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see . * * * This class has been adapted to the needs of the RtspCamera project * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ import de.kp.net.rtsp.client.api.EntityMessage; import de.kp.net.rtsp.client.api.Message; import de.kp.net.rtsp.client.header.RtspContent; import de.kp.net.rtsp.client.header.ContentEncodingHeader; import de.kp.net.rtsp.client.header.ContentLengthHeader; import de.kp.net.rtsp.client.header.ContentTypeHeader; public class RtspEntityMessage implements EntityMessage { private RtspContent content; private final Message message; public RtspEntityMessage(Message message) { this.message = message; } public RtspEntityMessage(Message message, RtspContent body) { this(message); setContent(body); } @Override public Message getMessage() { return message; }; public byte[] getBytes() throws Exception { message.getHeader(ContentTypeHeader.NAME); message.getHeader(ContentLengthHeader.NAME); return content.getBytes(); } @Override public RtspContent getContent() { return content; } @Override public void setContent(RtspContent content) { if(content == null) throw new NullPointerException(); this.content = content; message.addHeader(new ContentTypeHeader(content.getType())); if(content.getEncoding() != null) message.addHeader(new ContentEncodingHeader(content.getEncoding())); message.addHeader(new ContentLengthHeader(content.getBytes().length)); } @Override public boolean isEntity() { return content != null; } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/message/RtspMedia.java ================================================ package de.kp.net.rtsp.client.message; public class RtspMedia { private String mediaType; private String mediaFormat; private String transportPort; private String transportProtocol; private String encoding; private String clockrate; private String framerate; private static String SDP_CONTROL = "a=control:"; private static String SDP_RANGE = "a=range:"; private static String SDP_LENGTH = "a=length:"; private static String SDP_RTMAP = "a=rtpmap:"; private static String SDP_FRAMERATE = "a=framerate:"; public RtspMedia(String line) { String[] tokens = line.substring(2).split(" "); mediaType = tokens[0]; mediaFormat = tokens[3]; transportPort = tokens[1]; transportProtocol = tokens[2]; } public String getMediaType() { return mediaType; } public String getFrameRate() { return framerate; } public String getEncoding() { return encoding; } public String getClockrate() { return clockrate; } public String getTransportPort() { return transportPort; } public void setAttribute(String line) throws Exception { if (line.startsWith(SDP_CONTROL)) { } else if (line.startsWith(SDP_RANGE)) { } else if (line.startsWith(SDP_LENGTH)) { } else if (line.startsWith(SDP_FRAMERATE)) { framerate = line.substring(SDP_FRAMERATE.length()); } else if (line.startsWith(SDP_RTMAP)) { String[] tokens = line.substring(SDP_RTMAP.length()).split(" "); String payloadType = tokens[0]; if (payloadType.equals(mediaFormat) == false) throw new Exception("Corrupted Session Description - Payload Type"); if (tokens[1].contains("/")) { String[] subtokens = tokens[1].split("/"); encoding = subtokens[0]; clockrate = subtokens[1]; } else { encoding = tokens[1]; } } } public String toString() { return mediaType + " " + transportPort + " " + transportProtocol + " " + mediaFormat + " " + encoding + "/" + clockrate; } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/message/RtspMessage.java ================================================ package de.kp.net.rtsp.client.message; /** * Copyright 2010 Voice Technology Ind. e Com. Ltda. * * RTSPClientLib is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * RTSPClientLib is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see . * * * This class has been adapted to the needs of the RtspCamera project * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ import java.util.ArrayList; import java.util.List; import de.kp.net.rtsp.client.api.EntityMessage; import de.kp.net.rtsp.client.api.Message; import de.kp.net.rtsp.client.header.CSeqHeader; import de.kp.net.rtsp.client.header.RtspHeader; public abstract class RtspMessage implements Message { private String line; private List headers; private CSeqHeader cseq; private EntityMessage entity; public RtspMessage() { headers = new ArrayList(); } @Override public byte[] getBytes() throws Exception { getHeader(CSeqHeader.NAME); addHeader(new RtspHeader("User-Agent", "RtspClient")); byte[] message = toString().getBytes(); if (getEntityMessage() != null) { byte[] body = entity.getBytes(); byte[] full = new byte[message.length + body.length]; System.arraycopy(message, 0, full, 0, message.length); System.arraycopy(body, 0, full, message.length, body.length); message = full; } return message; } @Override public RtspHeader getHeader(final String name) throws Exception { int index = headers.indexOf(new Object() { @Override public boolean equals(Object obj) { return name.equalsIgnoreCase(((RtspHeader) obj).getName()); } }); if(index == -1) throw new Exception("[Missing Header] " + name); return headers.get(index); } @Override public RtspHeader[] getHeaders() { return headers.toArray(new RtspHeader[headers.size()]); } @Override public CSeqHeader getCSeq() { return cseq; } @Override public String getLine() { return line; } public void setLine(String line) { this.line = line; } @Override public void addHeader(RtspHeader header) { if(header == null) return; if(header instanceof CSeqHeader) cseq = (CSeqHeader) header; int index = headers.indexOf(header); if(index > -1) headers.remove(index); else index = headers.size(); headers.add(index, header); } @Override public EntityMessage getEntityMessage() { return entity; } @Override public Message setEntityMessage(EntityMessage entity) { this.entity = entity; return this; } @Override public String toString() { StringBuilder buffer = new StringBuilder(); buffer.append(getLine()).append("\r\n"); for(RtspHeader header : headers) buffer.append(header).append("\r\n"); buffer.append("\r\n"); return buffer.toString(); } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/message/RtspMessageFactory.java ================================================ package de.kp.net.rtsp.client.message; /** * Copyright 2010 Voice Technology Ind. e Com. Ltda. * * RTSPClientLib is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * RTSPClientLib is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see . * * * This class has been adapted to the needs of the RtspCamera project * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.lang.reflect.Constructor; import java.net.URISyntaxException; import java.util.HashMap; import java.util.Map; import de.kp.net.rtsp.client.api.Message; import de.kp.net.rtsp.client.api.MessageFactory; import de.kp.net.rtsp.client.api.Response; import de.kp.net.rtsp.client.header.CSeqHeader; import de.kp.net.rtsp.client.header.RtspContent; import de.kp.net.rtsp.client.header.ContentEncodingHeader; import de.kp.net.rtsp.client.header.ContentLengthHeader; import de.kp.net.rtsp.client.header.ContentTypeHeader; import de.kp.net.rtsp.client.header.RtspHeader; import de.kp.net.rtsp.client.header.SessionHeader; import de.kp.net.rtsp.client.header.TransportHeader; import de.kp.net.rtsp.client.request.RtspDescribeRequest; import de.kp.net.rtsp.client.request.RtspOptionsRequest; import de.kp.net.rtsp.client.request.RtspPauseRequest; import de.kp.net.rtsp.client.request.RtspPlayRequest; import de.kp.net.rtsp.client.request.RtspRequest; import de.kp.net.rtsp.client.request.RtspSetupRequest; import de.kp.net.rtsp.client.request.RtspTeardownRequest; import de.kp.net.rtsp.client.response.RtspResponse; public class RtspMessageFactory implements MessageFactory { private static Map> headerMap; private static Map> requestMap; static { headerMap = new HashMap>(); requestMap = new HashMap>(); try { putHeader(ContentEncodingHeader.class); putHeader(ContentLengthHeader.class); putHeader(ContentTypeHeader.class); putHeader(CSeqHeader.class); putHeader(SessionHeader.class); putHeader(TransportHeader.class); requestMap.put(RtspRequest.Method.OPTIONS, RtspOptionsRequest.class); requestMap.put(RtspRequest.Method.SETUP, RtspSetupRequest.class); requestMap.put(RtspRequest.Method.TEARDOWN, RtspTeardownRequest.class); requestMap.put(RtspRequest.Method.DESCRIBE, RtspDescribeRequest.class); requestMap.put(RtspRequest.Method.PLAY, RtspPlayRequest.class); requestMap.put(RtspRequest.Method.PAUSE, RtspPauseRequest.class); } catch (Exception e) { e.printStackTrace(); } } private static void putHeader(Class cls) throws Exception { headerMap.put(cls.getDeclaredField("NAME").get(null).toString().toLowerCase(), cls.getConstructor(String.class)); } /** * This method handles RTSP server responses */ public void incomingMessage(MessageBuffer buffer) throws Exception { ByteArrayInputStream in = new ByteArrayInputStream(buffer.getData(), buffer.getOffset(), buffer.getLength()); int initial = in.available(); Message message = null; try { // message line. String line = readLine(in); if (line.startsWith(Message.RTSP_TOKEN)) { message = new RtspResponse(line); } else { RtspRequest.Method method = null; try { method = RtspRequest.Method.valueOf(line.substring(0, line.indexOf(' '))); } catch (IllegalArgumentException ilae) { } Class cls = requestMap.get(method); if (cls != null) message = cls.getConstructor(String.class).newInstance(line); else message = new RtspRequest(line); } while (true) { line = readLine(in); if (in == null) throw new Exception(); if (line.length() == 0) break; Constructor c = headerMap.get(line.substring(0, line.indexOf(':')).toLowerCase()); if (c != null) message.addHeader(c.newInstance(line)); else message.addHeader(new RtspHeader(line)); } buffer.setMessage(message); try { int length = ((ContentLengthHeader) message .getHeader(ContentLengthHeader.NAME)).getValue(); if (in.available() < length) throw new Exception(); RtspContent content = new RtspContent(); content.setDescription(message); byte[] data = new byte[length]; in.read(data); content.setBytes(data); message.setEntityMessage(new RtspEntityMessage(message, content)); } catch (Exception e) { } } catch (Exception e) { throw new Exception(e); } finally { buffer.setused(initial - in.available()); try { in.close(); } catch (IOException e) { } } } @Override public RtspRequest outgoingRequest(String uri, RtspRequest.Method method, int cseq, RtspHeader... extras) throws URISyntaxException { Class cls = requestMap.get(method); RtspRequest message; try { message = cls != null ? cls.newInstance() : new RtspRequest(); } catch (Exception e) { throw new RuntimeException(e); } message.setLine(method, uri); fillMessage(message, cseq, extras); return message; } @Override public RtspRequest outgoingRequest(RtspContent body, String uri, RtspRequest.Method method, int cseq, RtspHeader... extras) throws URISyntaxException { Message message = outgoingRequest(uri, method, cseq, extras); return (RtspRequest) message.setEntityMessage(new RtspEntityMessage(message, body)); } @Override public Response outgoingResponse(int code, String text, int cseq, RtspHeader... extras) { RtspResponse message = new RtspResponse(); message.setLine(code, text); fillMessage(message, cseq, extras); return message; } @Override public Response outgoingResponse(RtspContent body, int code, String text, int cseq, RtspHeader... extras) { Message message = outgoingResponse(code, text, cseq, extras); return (Response) message.setEntityMessage(new RtspEntityMessage(message, body)); } private void fillMessage(Message message, int cseq, RtspHeader[] extras) { message.addHeader(new CSeqHeader(cseq)); for (RtspHeader h : extras) message.addHeader(h); } private String readLine(InputStream in) throws IOException { int ch = 0; StringBuilder b = new StringBuilder(); for (ch = in.read(); ch != -1 && ch != 0x0d && ch != 0x0a; ch = in.read()) b.append((char) ch); if (ch == -1) return null; in.read(); return b.toString(); } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/request/RtspDescribeRequest.java ================================================ package de.kp.net.rtsp.client.request; /** * Copyright 2010 Voice Technology Ind. e Com. Ltda. * * RTSPClientLib is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * RTSPClientLib is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see . * * * This class has been adapted to the needs of the RtspCamera project * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ import java.net.URISyntaxException; import de.kp.net.rtsp.client.RtspClient; import de.kp.net.rtsp.client.api.Response; public class RtspDescribeRequest extends RtspRequest { public RtspDescribeRequest() { super(); } public RtspDescribeRequest(String messageLine) throws URISyntaxException { super(messageLine); } @Override public byte[] getBytes() throws Exception { getHeader("Accept"); return super.getBytes(); } @Override public void handleResponse(RtspClient client, Response response) { super.handleResponse(client, response); try { client.getRequestListener().onDescriptor(client, new String(response.getEntityMessage().getContent().getBytes())); } catch(Exception e) { client.getRequestListener().onError(client, e); } } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/request/RtspOptionsRequest.java ================================================ package de.kp.net.rtsp.client.request; /** * Copyright 2010 Voice Technology Ind. e Com. Ltda. * * RTSPClientLib is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * RTSPClientLib is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see . * * * This class has been adapted to the needs of the RtspCamera project * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ import java.net.URI; import java.net.URISyntaxException; public class RtspOptionsRequest extends RtspRequest { public RtspOptionsRequest() { } public RtspOptionsRequest(String line) throws URISyntaxException { super(line); } @Override public void setLine(Method method, String uri) throws URISyntaxException { setMethod(method); setURI("*".equals(uri) ? uri : new URI(uri).toString()); super.setLine(method.toString() + ' ' + uri + ' ' + RTSP_VERSION_TOKEN); } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/request/RtspPauseRequest.java ================================================ package de.kp.net.rtsp.client.request; import java.net.URISyntaxException; import de.kp.net.rtsp.client.header.SessionHeader; public class RtspPauseRequest extends RtspRequest { public RtspPauseRequest() { } public RtspPauseRequest(String messageLine) throws URISyntaxException { super(messageLine); } @Override public byte[] getBytes() throws Exception { getHeader(SessionHeader.NAME); return super.getBytes(); } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/request/RtspPlayRequest.java ================================================ package de.kp.net.rtsp.client.request; /** * Copyright 2010 Voice Technology Ind. e Com. Ltda. * * RTSPClientLib is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * RTSPClientLib is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see . * * * This class has been adapted to the needs of the RtspCamera project * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ import java.net.URISyntaxException; import de.kp.net.rtsp.client.header.SessionHeader; public class RtspPlayRequest extends RtspRequest { public RtspPlayRequest() { } public RtspPlayRequest(String messageLine) throws URISyntaxException { super(messageLine); } @Override public byte[] getBytes() throws Exception { getHeader(SessionHeader.NAME); return super.getBytes(); } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/request/RtspRequest.java ================================================ package de.kp.net.rtsp.client.request; /** * Copyright 2010 Voice Technology Ind. e Com. Ltda. * * RTSPClientLib is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * RTSPClientLib is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see . * * * This class has been adapted to the needs of the RtspCamera project * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ import java.net.URI; import java.net.URISyntaxException; import de.kp.net.rtsp.client.RtspClient; import de.kp.net.rtsp.client.api.Message; import de.kp.net.rtsp.client.api.Request; import de.kp.net.rtsp.client.api.Response; import de.kp.net.rtsp.client.message.RtspMessage; public class RtspRequest extends RtspMessage implements Request { private Method method; private String uri; public RtspRequest() { } public RtspRequest(String messageLine) throws URISyntaxException { String[] parts = messageLine.split(" "); setLine(Method.valueOf(parts[0]), parts[1]); } @Override public void setLine(Method method, String uri) throws URISyntaxException { this.method = method; this.uri = new URI(uri).toString(); ; super.setLine(method.toString() + ' ' + uri + ' ' + RTSP_VERSION_TOKEN); } @Override public Method getMethod() { return method; } @Override public String getURI() { return uri; } @Override public void handleResponse(RtspClient client, Response response) { if (testForClose(client, this) || testForClose(client, response)) client.getTransport().disconnect(); } protected void setURI(String uri) { this.uri = uri; } protected void setMethod(Method method) { this.method = method; } private boolean testForClose(RtspClient client, Message message) { try { return message.getHeader("Connection").getRawValue().equalsIgnoreCase("close"); } catch(Exception e) { // this is an expected exception in case of no // connection close in the response message // client.getRequestListener().onError(client, e); } return false; } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/request/RtspSetupRequest.java ================================================ package de.kp.net.rtsp.client.request; /** * Copyright 2010 Voice Technology Ind. e Com. Ltda. * * RTSPClientLib is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * RTSPClientLib is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see . * * * This class has been adapted to the needs of the RtspCamera project * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ import java.net.URISyntaxException; import de.kp.net.rtsp.client.RtspClient; import de.kp.net.rtsp.client.api.Response; import de.kp.net.rtsp.client.header.SessionHeader; public class RtspSetupRequest extends RtspRequest { public RtspSetupRequest() { } public RtspSetupRequest(String line) throws URISyntaxException { super(line); } @Override public byte[] getBytes() throws Exception { getHeader("Transport"); return super.getBytes(); } @Override public void handleResponse(RtspClient client, Response response) { super.handleResponse(client, response); try { if(response.getStatusCode() == 200) client.setSession((SessionHeader) response.getHeader(SessionHeader.NAME)); } catch(Exception e) { client.getRequestListener().onError(client, e); } } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/request/RtspTeardownRequest.java ================================================ package de.kp.net.rtsp.client.request; /** * Copyright 2010 Voice Technology Ind. e Com. Ltda. * * RTSPClientLib is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * RTSPClientLib is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see . * * * This class has been adapted to the needs of the RtspCamera project * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ import java.net.URISyntaxException; import de.kp.net.rtsp.client.RtspClient; import de.kp.net.rtsp.client.api.Response; import de.kp.net.rtsp.client.header.SessionHeader; public class RtspTeardownRequest extends RtspRequest { public RtspTeardownRequest() { super(); } public RtspTeardownRequest(String messageLine) throws URISyntaxException { super(messageLine); } @Override public byte[] getBytes() throws Exception { getHeader(SessionHeader.NAME); return super.getBytes(); } @Override public void handleResponse(RtspClient client, Response response) { super.handleResponse(client, response); if(response.getStatusCode() == 200) client.setSession(null); client.getTransport().disconnect(); } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/response/RtspResponse.java ================================================ package de.kp.net.rtsp.client.response; /** * Copyright 2010 Voice Technology Ind. e Com. Ltda. * * RTSPClientLib is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * RTSPClientLib is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see . * * * This class has been adapted to the needs of the RtspCamera project * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ import de.kp.net.rtsp.client.api.Response; import de.kp.net.rtsp.client.message.RtspMessage; public class RtspResponse extends RtspMessage implements Response { private int status; private String text; public RtspResponse() { } public RtspResponse(String line) { setLine(line); line = line.substring(line.indexOf(' ') + 1); status = Integer.parseInt(line.substring(0, line.indexOf(' '))); text = line.substring(line.indexOf(' ') + 1); } @Override public int getStatusCode() { return status; } @Override public String getStatusText() { return text; } @Override public void setLine(int statusCode, String statusText) { status = statusCode; text = statusText; super.setLine(RTSP_VERSION_TOKEN + ' ' + status + ' ' + text); } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/transport/TCPTransport.java ================================================ package de.kp.net.rtsp.client.transport; /** * Copyright 2010 Voice Technology Ind. e Com. Ltda. * * RTSPClientLib is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * RTSPClientLib is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see . * * * This class has been adapted to the needs of the RtspCamera project * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ import java.io.IOException; import java.net.Socket; import java.net.URI; import de.kp.net.rtsp.client.api.Message; import de.kp.net.rtsp.client.api.Transport; import de.kp.net.rtsp.client.api.TransportListener; class TCPTransportThread extends Thread { private final TCPTransport transport; private volatile TCPTransportListener listener; public TCPTransportThread(TCPTransport transport, TransportListener listener) { this.transport = transport; this.listener = new TCPTransportListener(listener); } public TCPTransportListener getListener() { return listener; } public void setListener(TransportListener listener) { listener = new TCPTransportListener(listener); } @Override public void run() { listener.connected(transport); byte[] buffer = new byte[2048]; int read = -1; while(transport.isConnected()) { try { read = transport.receive(buffer); if(read == -1) { transport.setConnected(false); listener.remoteDisconnection(transport); } else listener.dataReceived(transport, buffer, read); } catch(IOException e) { listener.error(transport, e); } } } } public class TCPTransport implements Transport { private Socket socket; private TCPTransportThread thread; private TransportListener transportListener; private volatile boolean connected; public TCPTransport() { } @Override public void connect(URI to) throws IOException { if(connected) throw new IllegalStateException("Socket is still open. Close it first"); int port = to.getPort(); if(port == -1) port = 554; socket = new Socket(to.getHost(), port); setConnected(true); thread = new TCPTransportThread(this, transportListener); thread.start(); } @Override public void disconnect() { setConnected(false); try { socket.close(); } catch(IOException e) { } } @Override public boolean isConnected() { return connected; } @Override public synchronized void sendMessage(Message message) throws Exception { socket.getOutputStream().write(message.getBytes()); thread.getListener().dataSent(this); } @Override public void setTransportListener(TransportListener listener) { transportListener = listener; if(thread != null) thread.setListener(listener); } @Override public void setUserData(Object data) { } int receive(byte[] data) throws IOException { return socket.getInputStream().read(data); } void setConnected(boolean connected) { this.connected = connected; } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/client/transport/TCPTransportListener.java ================================================ package de.kp.net.rtsp.client.transport; /** * Copyright 2010 Voice Technology Ind. e Com. Ltda. * * RTSPClientLib is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * RTSPClientLib is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see . * * * This class has been adapted to the needs of the RtspCamera project * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ import de.kp.net.rtsp.client.api.Message; import de.kp.net.rtsp.client.api.Transport; import de.kp.net.rtsp.client.api.TransportListener; class TCPTransportListener implements TransportListener { private final TransportListener behaviour; public TCPTransportListener(TransportListener theBehaviour) { behaviour = theBehaviour; } @Override public void connected(Transport t) { if (behaviour != null) try { behaviour.connected(t); } catch(Throwable error) { behaviour.error(t, error); } } @Override public void dataReceived(Transport t, byte[] data, int size) { if (behaviour != null) try { behaviour.dataReceived(t, data, size); } catch(Throwable error) { behaviour.error(t, error); } } @Override public void dataSent(Transport t) { // TODO Auto-generated method stub if (behaviour != null) try { behaviour.dataSent(t); } catch(Throwable error) { behaviour.error(t, error); } } @Override public void error(Transport t, Throwable error) { if (behaviour != null) behaviour.error(t, error); } @Override public void error(Transport t, Message message, Throwable error) { if(behaviour != null) behaviour.error(t, message, error); } @Override public void remoteDisconnection(Transport t) { if (behaviour != null) try { behaviour.remoteDisconnection(t); } catch(Throwable error) { behaviour.error(t, error); } } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/server/RtspServer.java ================================================ package de.kp.net.rtsp.server; import java.io.BufferedReader; import java.io.BufferedWriter; import java.io.IOException; import java.io.InputStreamReader; import java.io.OutputStreamWriter; import java.net.InetAddress; import java.net.ServerSocket; import java.net.Socket; import java.util.Vector; import android.util.Log; import de.kp.net.rtp.RtpSender; import de.kp.net.rtp.RtpSocket; import de.kp.net.rtsp.RtspConstants; import de.kp.net.rtsp.RtspConstants.VideoEncoder; import de.kp.net.rtsp.server.response.Parser; import de.kp.net.rtsp.server.response.RtspDescribeResponse; import de.kp.net.rtsp.server.response.RtspError; import de.kp.net.rtsp.server.response.RtspOptionsResponse; import de.kp.net.rtsp.server.response.RtspPauseResponse; import de.kp.net.rtsp.server.response.RtspPlayResponse; import de.kp.net.rtsp.server.response.RtspResponse; import de.kp.net.rtsp.server.response.RtspResponseTeardown; import de.kp.net.rtsp.server.response.RtspSetupResponse; /** * This class describes a RTSP streaming * server for Android platforms. RTSP is * used to control video streaming from * a remote user agent. * * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ public class RtspServer implements Runnable { // reference to the server socket private ServerSocket serverSocket; // indicator to determine whether the server has stopped or not private boolean stopped = false; // inidicator to describe whether the server all of its threads // are terminated private boolean terminated = false; // reference to the video encoder (H263, H264) used over RTP private VideoEncoder encoder; // a temporary cache to manage all threads initiated by the RTSP server private Vector serverThreads; public RtspServer(int port, VideoEncoder encoder) throws IOException { this.serverThreads = new Vector(); this.encoder = encoder; this.serverSocket = new ServerSocket(port); } public void run() { /* * In order to communicate with different clients, * we construct a thread for each client that is * connected. */ while (this.stopped == false) { try { Socket clientSocket = this.serverSocket.accept(); serverThreads.add(new ServerThread(clientSocket, this.encoder)); } catch (IOException e) { e.printStackTrace(); } } } public boolean isTerminated() { return this.terminated; } /** * This method is used to stop the RTSP server */ public void stop() { this.stopped = true; terminate(); try { this.serverSocket.close(); } catch (IOException e) { // nothing todo } } /** * This method is used to tear down all threads that have * been invoked by the RTSP server during life time */ private void terminate() { for (Thread serverThread:serverThreads) { if (serverThread.isAlive()) serverThread.interrupt(); } this.terminated = true; } private class ServerThread extends Thread { private String TAG = "RtspServer"; // response to RTSP client private RtspResponse rtspResponse; private String contentBase = ""; /* * input and output stream buffer for TCP connection; * UDP response are sent through DatagramSocket */ private BufferedReader rtspBufferedReader; private BufferedWriter rtspBufferedWriter; private int rtspState; // Sequence number of RTSP messages within the session private int cseq = 0; private int clientPort; // remote (client) address private InetAddress clientAddress; /* * This datagram socket is used to send UDP * packets to the clientIPAddress */ private RtpSocket rtpSocket; private final Socket clientSocket; private VideoEncoder encoder; public ServerThread(Socket socket, VideoEncoder encoder) { this.clientSocket = socket; this.encoder = encoder; // register IP address of requesting client this.clientAddress = this.clientSocket.getInetAddress(); start(); } public void run() { // prepare server response String response = ""; try { // Set input and output stream filters rtspBufferedReader = new BufferedReader(new InputStreamReader(this.clientSocket.getInputStream()) ); rtspBufferedWriter = new BufferedWriter(new OutputStreamWriter(this.clientSocket.getOutputStream()) ); boolean setup = false; while (setup == false) { // determine request type and also provide // server response int requestType = getRequestType(); // send response response = rtspResponse.toString(); rtspBufferedWriter.write(response); rtspBufferedWriter.flush(); if (requestType == RtspConstants.SETUP) { setup = true; // update RTSP state rtspState = RtspConstants.READY; // in case of a setup request, we create a new RtpSocket // instance used to send RtpPacket this.rtpSocket = new RtpSocket(this.clientAddress, this.clientPort); // this RTP socket is registered as RTP receiver to also // receive the streaming video of this device RtpSender.getInstance().addReceiver(this.rtpSocket); } } // this is an endless loop, that is terminated an // with interrupt sent to the respective thread while (true) { // pares incoming request to decide how to proceed int requestType = getRequestType(); // send response response = rtspResponse.toString(); rtspBufferedWriter.write(response); rtspBufferedWriter.flush(); if ((requestType == RtspConstants.PLAY) && (rtspState == RtspConstants.READY)) { Log.i(TAG, "request: PLAY"); // make sure that the respective client socket is // ready to send RTP packets this.rtpSocket.suspend(false); this.rtspState = RtspConstants.PLAYING; } else if ((requestType == RtspConstants.PAUSE) && (rtspState == RtspConstants.PLAYING)) { Log.i(TAG, "request: PAUSE"); // suspend RTP socket from sending video packets this.rtpSocket.suspend(true); } else if (requestType == RtspConstants.TEARDOWN) { Log.i(TAG, "request: TEARDOWN"); // this RTP socket is removed from the RTP Sender RtpSender.getInstance().removeReceiver(this.rtpSocket); // close the clienr socket for receiving incoming RTSP request this.clientSocket.close(); // close the associated RTP socket for sending RTP packets this.rtpSocket.close(); } // the pattern below enables an interrupt // which allows to close this thread try { sleep(20); } catch (InterruptedException e) { break; } } } catch(Throwable t) { t.printStackTrace(); System.out.println("Caught " + t + " - closing thread"); } } private int getRequestType() throws Exception { int requestType = -1; // retrieve the request in a string representation // for later evaluation String requestLine = ""; try { requestLine = Parser.readRequest(rtspBufferedReader); } catch (IOException e) { e.printStackTrace(); } Log.i(TAG, "requestLine: " + requestLine); // determine request type from incoming RTSP request requestType = Parser.getRequestType(requestLine); if (contentBase.isEmpty()) { contentBase = Parser.getContentBase(requestLine); } if (!requestLine.isEmpty()) { cseq = Parser.getCseq(requestLine); } if (requestType == RtspConstants.OPTIONS) { rtspResponse = new RtspOptionsResponse(cseq); } else if (requestType == RtspConstants.DESCRIBE) { buildDescribeResponse(requestLine); } else if (requestType == RtspConstants.SETUP) { buildSetupResponse(requestLine); } else if (requestType == RtspConstants.PAUSE) { rtspResponse = new RtspPauseResponse(cseq); } else if (requestType == RtspConstants.TEARDOWN) { rtspResponse = new RtspResponseTeardown(cseq); } else if (requestType == RtspConstants.PLAY) { rtspResponse = new RtspPlayResponse(cseq); String range = Parser.getRangePlay(requestLine); if (range != null) ((RtspPlayResponse) rtspResponse).setRange(range); } else { if( requestLine.isEmpty()){ rtspResponse = new RtspError(cseq); } else { rtspResponse = new RtspError(cseq); } } return requestType; } /** * Create an RTSP response for an incoming SETUP request. * * @param requestLine * @throws Exception */ private void buildSetupResponse(String requestLine) throws Exception { rtspResponse = new RtspSetupResponse(cseq); // client port clientPort = Parser.getClientPort(requestLine); ((RtspSetupResponse) rtspResponse).setClientPort(clientPort); // transport protocol ((RtspSetupResponse) rtspResponse).setTransportProtocol(Parser.getTransportProtocol(requestLine)); // session type ((RtspSetupResponse) rtspResponse).setSessionType(Parser.getSessionType(requestLine)); ((RtspSetupResponse) rtspResponse).setClientIP(this.clientAddress.getHostAddress()); int[] interleaved = Parser.getInterleavedSetup(requestLine); if(interleaved != null){ ((RtspSetupResponse) rtspResponse).setInterleaved(interleaved); } } /** * Create an RTSP response for an incoming DESCRIBE request. * * @param requestLine * @throws Exception */ private void buildDescribeResponse(String requestLine) throws Exception{ rtspResponse = new RtspDescribeResponse(cseq); // set file name String fileName = Parser.getFileName(requestLine); ((RtspDescribeResponse) rtspResponse).setFileName(fileName); // set video encoding ((RtspDescribeResponse) rtspResponse).setVideoEncoder(encoder); // finally set content base ((RtspDescribeResponse)rtspResponse).setContentBase(contentBase); } } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/server/response/Parser.java ================================================ package de.kp.net.rtsp.server.response; import java.io.BufferedReader; import java.io.IOException; import java.net.URI; import java.util.StringTokenizer; import de.kp.net.rtsp.RtspConstants; /** * This class provides a parser for incoming RTSP * messages and splits them into appropriate parts. * * @author Stefan Krusche (krusche@dr-kruscheundpartner.de) * */ public class Parser { /** * @param rtspBufferedReader * @return * @throws IOException */ public static String readRequest(BufferedReader rtspBufferedReader) throws IOException { String request = new String(); boolean endFound = false; int c; while ((c = rtspBufferedReader.read()) != -1) { request += (char) c; if (c == '\n') { if (endFound) { break; } else { endFound = true; } } else { if (c != '\r') { endFound = false; } } } return request; } /** * This method determines the request type of an * incoming RTSP request. * * @param request * @return */ public static int getRequestType(String request) { StringTokenizer tokens = new StringTokenizer(request); String requestType = ""; if (tokens.hasMoreTokens()) { requestType = tokens.nextToken(); } if ((new String(requestType)).compareTo("OPTIONS") == 0) return RtspConstants.OPTIONS; else if ((new String(requestType)).compareTo("DESCRIBE") == 0) return RtspConstants.DESCRIBE; else if ((new String(requestType)).compareTo("SETUP") == 0) return RtspConstants.SETUP; else if ((new String(requestType)).compareTo("PLAY") == 0) return RtspConstants.PLAY; else if ((new String(requestType)).compareTo("PAUSE") == 0) return RtspConstants.PAUSE; else if ((new String(requestType)).compareTo("TEARDOWN") == 0) return RtspConstants.TEARDOWN; return -1; } /** * @param request * @return */ public static String getContentBase(String request) { StringTokenizer tokens = new StringTokenizer(request); String contentBase = ""; if (tokens.hasMoreTokens()) { contentBase = tokens.nextToken(); contentBase = tokens.nextToken(); } return contentBase; } /** * @param request * @return * @throws Exception */ public static int getCseq(String request) throws Exception { String ineInput = getLineInput(request, "\r\n", "CSeq"); String cseq = ineInput.substring(6); return Integer.parseInt(cseq); } /** * @param request * @return * @throws Exception */ public static int[] getInterleavedSetup(String request) throws Exception { int[] interleaved = null; String lineInput = getLineInput(request, "\r\n", "Transport:"); String[] parts = lineInput.split("interleaved="); int t = parts.length; if (t > 1) { parts = parts[1].split("-"); interleaved = new int[2]; interleaved[0] = Integer.parseInt(parts[0]); interleaved[1] = Integer.parseInt(parts[1]); } return interleaved; } /** * @param request * @return * @throws Exception */ public static String getFileName(String request) throws Exception { String lineInput = getLineInput(request, " ", "rtsp"); URI uri = new URI(lineInput); //String[] parts = lineInput.split("rtsp://" + RtspConstants.SERVER_IP + "/"); //String fileName = parts[1]; String fileName = uri.getPath(); return fileName; } /** * This method retrieves a certain input from an * incoming RTSP request, described by a separator * and a specific prefix. * * @param request * @param separator * @param prefix * @return * @throws Exception */ public static String getLineInput(String request, String separator, String prefix) throws Exception { StringTokenizer str = new StringTokenizer(request, separator); String token = null; boolean match = false; while (str.hasMoreTokens()) { token = str.nextToken(); if (token.startsWith(prefix)) { match = true; break; } } return (match == true) ? token : null; } /** * This method retrieves the client port * from an incoming RTSP request. * * @param request * @return * @throws Exception */ public static int getClientPort(String request) throws Exception { String lineInput = getLineInput(request, "\r\n", "Transport:"); if (lineInput == null) throw new Exception(); String[] parts = lineInput.split(";"); parts[2] = parts[2].substring(12); String[] ports = parts[2].split("-"); return Integer.parseInt(ports[0]); } /** * This method retrieves the transport protocol * from an incoming RTSP request. * * @param request * @return * @throws Exception */ public static String getTransportProtocol(String request) throws Exception { String lineInput = getLineInput(request, "\r\n", "Transport:"); if (lineInput == null) throw new Exception(); String[] parts = lineInput.split(";"); parts[0] = parts[0].substring(11); return parts[0]; } /** * This method retrieves the range from an * incoming RTSP request. * * @param request * @return * @throws Exception */ public static String getRangePlay(String request) throws Exception { String lineInput = getLineInput(request, "\r\n", "Range:"); if (lineInput == null) { /* * Android's video view does not provide * range information with a PLAY request */ return null; } String[] parts = lineInput.split("="); return parts[1]; } /** * * This method determines the session type from an * incoming RTSP request. * * @param request * @return * @throws Exception */ public static String getSessionType(String request) throws Exception { String lineInput = getLineInput(request, "\r\n", "Transport:"); if (lineInput == null) throw new Exception(); String[] parts = lineInput.split(";"); return parts[1].trim(); } /** * This method retrieves the user agent from an * incoming RTSP request. * * @param request * @return * @throws Exception */ public String getUserAgent(String request) throws Exception{ String lineInput = getLineInput(request, "\r\n", "User-Agent:"); if (lineInput == null) throw new Exception(); String[] parts = lineInput.split(":"); return parts[1]; } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/server/response/RtspAnnounceResponse.java ================================================ package de.kp.net.rtsp.server.response; public class RtspAnnounceResponse extends RtspResponse { public RtspAnnounceResponse(int cseq) { super(cseq); } protected void generateBody() { } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/server/response/RtspDescribeResponse.java ================================================ package de.kp.net.rtsp.server.response; import java.net.UnknownHostException; import de.kp.net.rtsp.RtspConstants.VideoEncoder; public class RtspDescribeResponse extends RtspResponse { protected String rtpSession = ""; protected String contentBase = ""; private String fileName; private VideoEncoder encoder; public RtspDescribeResponse(int cseq) { super(cseq); } protected void generateBody() { SDP sdp = new SDP(fileName, encoder); String sdpContent = ""; try { sdpContent = CRLF2 + sdp.getSdp(); } catch (UnknownHostException e) { e.printStackTrace(); } body += "Content-base: "+contentBase + CRLF + "Content-Type: application/sdp"+ CRLF + "Content-Length: "+ sdpContent.length() + sdpContent; } public String getContentBase() { return contentBase; } public void setContentBase(String contentBase) { this.contentBase = contentBase; } public void setFileName(String fileName) { this.fileName = fileName; } public void setVideoEncoder(VideoEncoder encoder) { this.encoder = encoder; } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/server/response/RtspError.java ================================================ package de.kp.net.rtsp.server.response; public class RtspError extends RtspResponse { public RtspError(int cseq) { super(cseq); } protected void generateBody() { } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/server/response/RtspOptionsResponse.java ================================================ package de.kp.net.rtsp.server.response; public class RtspOptionsResponse extends RtspResponse { public RtspOptionsResponse(int cseq) { super(cseq); } protected void generateBody() { this.body = "Public:DESCRIBE,SETUP,TEARDOWN,PLAY,PAUSE"/*+SL*/; } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/server/response/RtspPauseResponse.java ================================================ package de.kp.net.rtsp.server.response; public class RtspPauseResponse extends RtspResponse { public RtspPauseResponse(int cseq) { super(cseq); } protected void generateBody() { } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/server/response/RtspPlayResponse.java ================================================ package de.kp.net.rtsp.server.response; public class RtspPlayResponse extends RtspResponse { protected String range = ""; public RtspPlayResponse(int cseq) { super(cseq); } protected void generateBody() { this.body += "Session: " + session_id + CRLF + "Range: npt=" + range; } public String getRange() { return range; } public void setRange(String range) { this.range = range; } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/server/response/RtspResponse.java ================================================ package de.kp.net.rtsp.server.response; import java.util.Date; import de.kp.net.rtsp.RtspConstants; public abstract class RtspResponse { protected String response=""; protected int cseq = 0; protected static int session_id = -1; protected boolean newSessionId = true; protected String body = ""; /** * CR = * LF = * CRLF = CR LF */ public static final String CRLF = "\r\n"; public static final String CRLF2 = "\r\n\r\n"; public static final String SEP = " "; public RtspResponse(int cseq){ this.cseq = cseq; } protected String getHeader() { StringBuffer sb = new StringBuffer(); sb.append("RTSP/1.0" + SEP + "200" + SEP + "OK" + CRLF); sb.append(cseq() +CRLF); sb.append("Date: " + new Date().toGMTString() + CRLF); sb.append("Server: " + getServer() + CRLF); return sb.toString(); } protected String cseq() { return "CSeq:" + SEP + getCseq(); } protected String getResponse() { return response; } protected void setResponse(String response) { this.response = response; } protected String getServer(){ return RtspConstants.SERVER_NAME + "/" + RtspConstants.SERVER_VERSION; } protected int getCseq() { return cseq; } protected void setCseq(int cseq) { this.cseq = cseq; } protected String getBody() { return body; } protected void setBody(String cuerpo) { this.body = cuerpo; } protected void generate(){ // note that it is important to close the response // message with 2 CRLFs response += getHeader(); response += getBody() + CRLF2; } protected abstract void generateBody(); public String toString() { generateBody(); generate(); return response; } public void createSessionId(boolean bool){ newSessionId = bool; } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/server/response/RtspResponseTeardown.java ================================================ package de.kp.net.rtsp.server.response; public class RtspResponseTeardown extends RtspResponse { public RtspResponseTeardown(int cseq) { super(cseq); } protected void generar(){ response += getHeader(); response += getBody() + CRLF; } protected void generateBody() { body += ""; } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/server/response/RtspSetupResponse.java ================================================ package de.kp.net.rtsp.server.response; import java.util.Random; import de.kp.net.rtsp.RtspConstants; public class RtspSetupResponse extends RtspResponse { private int clientRTP, clientRTCP; private String clientIP = ""; private int[] interleaved; private String transportProtocol = ""; private String sessionType = ""; public RtspSetupResponse(int cseq) { super(cseq); } protected void generateBody() { createSessionId(); body += "Session: " + session_id + CRLF + "Transport: " + transportProtocol + ";" + sessionType + ";"; if (interleaved==null) { body += "source=" + RtspConstants.SERVER_IP + ";" + getPortPart(); } else { body += getInterleavedPart(); } } private String getPortPart(){ String r= "client_port=" + clientRTP + "-" + clientRTCP + ";" + "server_port=" + RtspConstants.PORTS_RTSP_RTP[0] + "-" + RtspConstants.PORTS_RTSP_RTP[1]; return r; } private String getInterleavedPart() { return "client_ip=" + clientIP + ";interleaved=" + interleaved[0] + "-" + interleaved[1]; } private final void createSessionId() { Random r = new Random(); int id = r.nextInt(); if (id < 0) { id *= -1; } if (newSessionId) { session_id = id; } } public void setClientPort(int port) { clientRTP = port; clientRTCP = port + 1; } public String getTransportProtocol() { return transportProtocol; } public void setTransportProtocol(String transportProtocol) { this.transportProtocol = transportProtocol; } public String getSessionType() { return sessionType; } public void setSessionType(String sessionType) { this.sessionType = sessionType; } public int[] getInterleaved() { return interleaved; } public void setInterleaved(int[] interleaved) { this.interleaved = interleaved; } public String getClientIP() { return clientIP; } public void setClientIP(String clientIP) { this.clientIP = clientIP; } } ================================================ FILE: RtspCamera/src/de/kp/net/rtsp/server/response/SDP.java ================================================ package de.kp.net.rtsp.server.response; import java.net.UnknownHostException; import com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h264.H264Config; import de.kp.net.rtsp.RtspConstants; import de.kp.net.rtsp.RtspConstants.VideoEncoder; public class SDP { // the default file name private String fileName = "kupdroid"; private int audioClientPort = RtspConstants.CLIENT_AUDIO_PORT; private int clientVideoPort = RtspConstants.CLIENT_VIDEO_PORT; private VideoEncoder encoder; public SDP(String fileName, VideoEncoder encoder) { this.fileName = fileName; this.encoder = encoder; } /** * This method is used to build a minimal * SDP file description. * * @return * @throws UnknownHostException */ public String getSdp() throws UnknownHostException { StringBuffer buf = new StringBuffer(); buf.append("v=0" + RtspResponse.CRLF); // filename contains leading slash buf.append("s=" + fileName.substring(1) + RtspResponse.CRLF); int track = 1; buf.append(getSDPVideo(track)); return buf.toString(); } /* private StringBuffer getSDPAudio(){ StringBuffer buf = new StringBuffer(); //m= buf.append("m=audio " + audioClientPort + " RTP/AVP 14" + RtspResponse.CRLF); //a=rtpmap: / [/] buf.append("a=rtpmap:14 MPA/90000" + RtspResponse.CRLF); buf.append("a=control:rtsp://" + RtspConstants.SERVER_IP + "/audio" + RtspResponse.CRLF); buf.append("a=mimetype: audio/MPA" + RtspResponse.CRLF); buf.append("a=range:npt=0-"); return buf; } */ private StringBuffer getSDPVideo(int track){ StringBuffer sb = new StringBuffer(); // H263 encoding if (encoder.equals(VideoEncoder.H263_ENCODER)) { // cross encoder properties sb.append("m=video " + clientVideoPort + RtspConstants.SEP + "RTP/AVP " + RtspConstants.RTP_H263_PAYLOADTYPE + RtspResponse.CRLF); // set to H263-2000 sb.append("a=rtpmap:" + RtspConstants.RTP_H263_PAYLOADTYPE + RtspConstants.SEP + RtspConstants.H263_2000 + RtspResponse.CRLF); // additional information for android video view, due to extended checking mechanism sb.append("a=framesize:" + RtspConstants.RTP_H263_PAYLOADTYPE + RtspConstants.SEP + RtspConstants.WIDTH + "-" + RtspConstants.HEIGHT + RtspResponse.CRLF); } else if (encoder.equals(VideoEncoder.H264_ENCODER)) { // cross encoder properties sb.append("m=video " + clientVideoPort + RtspConstants.SEP + "RTP/AVP " + RtspConstants.RTP_H264_PAYLOADTYPE + RtspResponse.CRLF); sb.append("a=rtpmap:" + RtspConstants.RTP_H264_PAYLOADTYPE + RtspConstants.SEP + RtspConstants.H264 + RtspResponse.CRLF); /* * with change to in-band SPS/PPS parameters following SDP statements should be unnecessary */ // 176x144 15fps //sb.append("a=fmtp:" + RtspConstants.RTP_H264_PAYLOADTYPE + " packetization-mode=0;" + H264Config.CODEC_PARAMS +";sprop-parameter-sets=J0IAINoLExA,KM48gA==;" + RtspResponse.CRLF); // 352 288 15fps // sb.append("a=fmtp:" + RtspConstants.RTP_H264_PAYLOADTYPE + " packetization-mode=0;" + H264Config.CODEC_PARAMS +";sprop-parameter-sets=J0IAINoFglE=,KM48gA==;" + RtspResponse.CRLF); //buf.append("a=fmtp:98 packetization-mode=1;profile-level-id=420020;sprop-parameter-sets=J0IAIKaAoD0Q,KM48gA==;" + RtspResponse.CRLF); // 640x480 20fps // buf.append("a=fmtp:98 packetization-mode=1;profile-level-id=420020;sprop-parameter-sets=J0IAINoLExA,KM48gA==;" + RtspResponse.CRLF); // 176x144 15fps // sb.append("a=fmtp:" + RtspConstants.RTP_H264_PAYLOADTYPE + " packetization-mode=1;" + H264Config.CODEC_PARAMS +";sprop-parameter-sets=J0IAIKaCxMQ=,KM48gA==;" + RtspResponse.CRLF); // 176x144 20fps // buf.append("a=fmtp:98 packetization-mode=1;profile-level-id=420020;sprop-parameter-sets=J0IAINoFB8Q=,KM48gA==;" + RtspResponse.CRLF); // 320x240 10fps // additional information for android video view, due to extended checking mechanism sb.append("a=framesize:" + RtspConstants.RTP_H264_PAYLOADTYPE + RtspConstants.SEP + RtspConstants.WIDTH + "-" + RtspConstants.HEIGHT + RtspResponse.CRLF); } sb.append("a=control:trackID=" + String.valueOf(track)); return sb; } /* private StringBuffer getSDPWebcam(){ StringBuffer buf = new StringBuffer(); buf.append("m=video " + clientVideoPort + " RTP/AVP 26" + RtspResponse.CRLF); buf.append("a=rtpmap:26 JPEG/90000"+RtspResponse.CRLF); buf.append("a=control:rtsp://" + RtspConstants.SERVER_IP + "/video" + RtspResponse.CRLF); buf.append("a=mimetype: video/JPEG" + RtspResponse.CRLF); buf.append("a=range:npt=0-100"); return buf; } */ public String getFileName() { return fileName; } /** * @param fileName */ public void setFileName(String fileName) { this.fileName = fileName; } public int getClientAudioPort() { return audioClientPort; } public void setClientAudioPort(int clientAudioPort) { this.audioClientPort = clientAudioPort; } public int getClientVideoPort() { return clientVideoPort ; } public void setClientVideoPort(int clientVideoPort) { this.clientVideoPort = clientVideoPort; } } ================================================ FILE: RtspCamera/src/de/kp/rtspcamera/MediaConstants.java ================================================ package de.kp.rtspcamera; public class MediaConstants { public static boolean H264_CODEC = true; } ================================================ FILE: RtspCamera/src/de/kp/rtspcamera/RtspApiCodecsCamera.java ================================================ package de.kp.rtspcamera; import java.io.IOException; import java.io.InputStream; import java.net.SocketException; import android.app.Activity; import android.media.MediaRecorder; import android.net.LocalServerSocket; import android.net.LocalSocket; import android.net.LocalSocketAddress; import android.os.Bundle; import android.util.Log; import android.view.SurfaceHolder; import android.view.SurfaceView; import android.view.Window; import android.view.WindowManager; import de.kp.net.rtp.RtpSender; import de.kp.net.rtp.packetizer.AbstractPacketizer; import de.kp.net.rtp.packetizer.H263Packetizer; import de.kp.net.rtp.packetizer.H264Packetizer; import de.kp.net.rtsp.RtspConstants; import de.kp.net.rtsp.server.RtspServer; public class RtspApiCodecsCamera extends Activity { private String TAG = "RTSPCamera"; // default RTSP command port is 554 private int SERVER_PORT = 8080; private SurfaceView mVideoPreview; private SurfaceHolder mSurfaceHolder; // these parameters are used to separate between incoming // and outgoing streams private LocalServerSocket localSocketServer; private LocalSocket receiver; private LocalSocket sender; private MediaRecorder mediaRecorder; private boolean mediaRecorderRecording = false; protected boolean videoQualityHigh = false; private RtpSender rtpSender; private RtspServer streamer = null; private AbstractPacketizer videoPacketizer; @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); Log.d(TAG, "onCreate"); requestWindowFeature(Window.FEATURE_NO_TITLE); Window win = getWindow(); win.addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON); win.setFlags(WindowManager.LayoutParams.FLAG_FULLSCREEN, WindowManager.LayoutParams.FLAG_FULLSCREEN); setContentView(R.layout.cameraapicodecs); // hold the reference rtpSender = RtpSender.getInstance(); /* * Video preview initialization */ mVideoPreview = (SurfaceView) findViewById(R.id.smallcameraview); mSurfaceHolder = mVideoPreview.getHolder(); mSurfaceHolder.addCallback(surfaceCallback); mSurfaceHolder.setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS); } public void onResume() { Log.d(TAG, "onResume"); // starts the RTSP Server try { // initialize video encoder to be used // for SDP file generation RtspConstants.VideoEncoder rtspVideoEncoder = (MediaConstants.H264_CODEC == true) ? RtspConstants.VideoEncoder.H264_ENCODER : RtspConstants.VideoEncoder.H263_ENCODER; if (streamer == null) { streamer = new RtspServer(SERVER_PORT, rtspVideoEncoder); new Thread(streamer).start(); } Log.d(TAG, "RtspServer started"); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } /* * Camera initialization */ receiver = new LocalSocket(); try { localSocketServer = new LocalServerSocket("camera2rtsp"); // InputStream the RTPPackets can be built from receiver.connect(new LocalSocketAddress("camera2rtsp")); receiver.setReceiveBufferSize(500000); receiver.setSendBufferSize(500000); // FileDescriptor the Camera can send to sender = localSocketServer.accept(); sender.setReceiveBufferSize(500000); sender.setSendBufferSize(500000); } catch (IOException e1) { e1.printStackTrace(); super.onResume(); finish(); return; } super.onResume(); } @Override public void onPause() { // stop RTSP server if (streamer != null) streamer.stop(); streamer = null; super.onPause(); } /* * MediaRecorder listener */ private MediaRecorder.OnErrorListener mErrorListener = new MediaRecorder.OnErrorListener() { public void onError(MediaRecorder mr, int what, int extra) { // MediaRecorder or MediaPlayer error rtpSender.stop(); } }; /* * SurfaceHolder callback triple */ SurfaceHolder.Callback surfaceCallback = new SurfaceHolder.Callback() { /* * Created state: - Open camera - initial call to startPreview() - hook * PreviewCallback() on it, which notifies waiting thread with new * preview data - start thread * * @see android.view.SurfaceHolder.Callback#surfaceCreated(android.view. * SurfaceHolder ) */ public void surfaceCreated(SurfaceHolder holder) { Log.d(TAG, "surfaceCreated"); } /* * Changed state: - initiate camera preview size, set * camera.setPreviewDisplay(holder) - subsequent call to startPreview() * * @see android.view.SurfaceHolder.Callback#surfaceChanged(android.view. * SurfaceHolder , int, int, int) */ public void surfaceChanged(SurfaceHolder holder, int format, int w, int h) { Log.d(TAG, "surfaceChanged"); initializeVideo(); startVideoRecording(); } /* * Destroy State: Take care on release of camera * * @see * android.view.SurfaceHolder.Callback#surfaceDestroyed(android.view. * SurfaceHolder) */ public void surfaceDestroyed(SurfaceHolder holder) { Log.d(TAG, "surfaceDestroyed"); stopVideoRecording(); } }; // initializeVideo() starts preview and prepare media recorder. // Returns false if initializeVideo fails private void initializeVideo() { Log.d(TAG, "initializeVideo: " + mediaRecorderRecording); mediaRecorderRecording = true; Log.v(TAG, "initializeVideo set to true: " + mediaRecorderRecording); if (mediaRecorder == null) mediaRecorder = new MediaRecorder(); else mediaRecorder.reset(); mediaRecorder.setVideoSource(MediaRecorder.VideoSource.CAMERA); mediaRecorder.setOutputFormat(MediaRecorder.OutputFormat.THREE_GPP); // route video to LocalSocket mediaRecorder.setOutputFile(sender.getFileDescriptor()); // Use the same frame rate for both, since internally // if the frame rate is too large, it can cause camera to become // unstable. We need to fix the MediaRecorder to disable the support // of setting frame rate for now. mediaRecorder.setVideoFrameRate(RtspConstants.FPS); // mMediaRecorder.setVideoEncodingBitRate(RtspConstants.BITRATE); mediaRecorder.setVideoSize(Integer.valueOf(RtspConstants.WIDTH), Integer.valueOf(RtspConstants.HEIGHT)); mediaRecorder.setVideoEncoder(getMediaEncoder()); mediaRecorder.setPreviewDisplay(mSurfaceHolder.getSurface()); try { mediaRecorder.prepare(); mediaRecorder.setOnErrorListener(mErrorListener); mediaRecorder.start(); } catch (IOException exception) { exception.printStackTrace(); releaseMediaRecorder(); } } private int getMediaEncoder() { if (MediaConstants.H264_CODEC == true) return MediaRecorder.VideoEncoder.H264; return MediaRecorder.VideoEncoder.H263; } private void startVideoRecording() { Log.v(TAG, "startVideoRecording"); InputStream fis = null; try { fis = receiver.getInputStream(); } catch (IOException e1) { Log.w(TAG, "No receiver input stream"); return; } try { // actually H263 over RTP and H264 over RTP is supported if (MediaConstants.H264_CODEC == true) { videoPacketizer = new H264Packetizer(fis); } else { videoPacketizer = new H263Packetizer(fis); } videoPacketizer.startStreaming(); } catch (SocketException e) { // TODO Auto-generated catch block e.printStackTrace(); } } private void stopVideoRecording() { Log.d(TAG, "stopVideoRecording"); if (mediaRecorderRecording || mediaRecorder != null) { try { // stop thread videoPacketizer.stopStreaming(); if (mediaRecorderRecording && mediaRecorder != null) { try { mediaRecorder.setOnErrorListener(null); mediaRecorder.setOnInfoListener(null); mediaRecorder.stop(); } catch (RuntimeException e) { Log.e(TAG, "stop fail: " + e.getMessage()); } mediaRecorderRecording = false; } } catch (Exception e) { Log.e(TAG, "stopVideoRecording failed"); e.printStackTrace(); } finally { releaseMediaRecorder(); } } } private void releaseMediaRecorder() { Log.d(TAG, "Releasing media recorder."); if (mediaRecorder != null) { mediaRecorder.reset(); mediaRecorder.release(); mediaRecorder = null; } } } ================================================ FILE: RtspCamera/src/de/kp/rtspcamera/RtspNativeCodecsCamera.java ================================================ package de.kp.rtspcamera; import java.io.IOException; import android.app.Activity; import android.hardware.Camera; import android.os.Bundle; import android.util.Log; import android.view.SurfaceHolder; import android.view.SurfaceView; import android.view.Window; import android.view.WindowManager; import de.kp.net.rtp.recorder.RtspVideoRecorder; import de.kp.net.rtsp.RtspConstants; import de.kp.net.rtsp.server.RtspServer; public class RtspNativeCodecsCamera extends Activity { private String TAG = "RTSPNativeCamera"; // default RTSP command port is 554 // private int SERVER_PORT = 8080; private RtspVideoRecorder outgoingPlayer; private SurfaceView mCameraPreview; private SurfaceHolder previewHolder; private Camera camera; private boolean inPreview = false; private boolean cameraConfigured = false; private int mPreviewWidth = Integer.valueOf(RtspConstants.WIDTH); private int mPreviewHeight = Integer.valueOf(RtspConstants.HEIGHT); private RtspServer streamer; @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); Log.d(TAG, "onCreate"); requestWindowFeature(Window.FEATURE_NO_TITLE); Window win = getWindow(); win.addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON); win.setFlags(WindowManager.LayoutParams.FLAG_FULLSCREEN, WindowManager.LayoutParams.FLAG_FULLSCREEN); setContentView(R.layout.cameranativecodecs); /* * Camera preview initialization */ mCameraPreview = (SurfaceView) findViewById(R.id.smallcameraview); previewHolder = mCameraPreview.getHolder(); previewHolder.addCallback(surfaceCallback); previewHolder.setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS); // outgoingPlayer = new RtspVideoRecorder("h263-2000"); outgoingPlayer = new RtspVideoRecorder("h264"); outgoingPlayer.open(); } @Override public void onResume() { Log.d(TAG, "onResume"); // starts the RTSP Server try { // initialize video encoder to be used // for SDP file generation RtspConstants.VideoEncoder rtspVideoEncoder = (MediaConstants.H264_CODEC == true) ? RtspConstants.VideoEncoder.H264_ENCODER : RtspConstants.VideoEncoder.H263_ENCODER; if (streamer == null) { streamer = new RtspServer(RtspConstants.SERVER_PORT, rtspVideoEncoder); new Thread(streamer).start(); } Log.d(TAG, "RtspServer started"); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } /* * Camera initialization */ camera = Camera.open(); super.onResume(); } @Override public void onPause() { // stop RTSP server if (streamer != null) streamer.stop(); streamer = null; super.onPause(); } /* * SurfaceHolder callback triple */ SurfaceHolder.Callback surfaceCallback = new SurfaceHolder.Callback() { /* * Created state: - Open camera - initial call to startPreview() - hook * PreviewCallback() on it, which notifies waiting thread with new * preview data - start thread * * @see android.view.SurfaceHolder.Callback#surfaceCreated(android.view. * SurfaceHolder ) */ public void surfaceCreated(SurfaceHolder holder) { Log.d(TAG, "surfaceCreated"); } /* * Changed state: - initiate camera preview size, set * camera.setPreviewDisplay(holder) - subsequent call to startPreview() * * @see android.view.SurfaceHolder.Callback#surfaceChanged(android.view. * SurfaceHolder , int, int, int) */ public void surfaceChanged(SurfaceHolder holder, int format, int w, int h) { Log.d(TAG, "surfaceChanged"); initializePreview(w, h); startPreview(); } /* * Destroy State: Take care on release of camera * * @see * android.view.SurfaceHolder.Callback#surfaceDestroyed(android.view. * SurfaceHolder) */ public void surfaceDestroyed(SurfaceHolder holder) { Log.d(TAG, "surfaceDestroyed"); if (inPreview) { camera.stopPreview(); } camera.setPreviewCallback(null); camera.release(); camera = null; // stop captureThread outgoingPlayer.stop(); inPreview = false; cameraConfigured = false; } }; /** * This method checks availability of camera and preview * * @param width * @param height */ private void initializePreview(int width, int height) { Log.d(TAG, "initializePreview"); if (camera != null && previewHolder.getSurface() != null) { try { // provide SurfaceView for camera preview camera.setPreviewDisplay(previewHolder); } catch (Throwable t) { Log.e(TAG, "Exception in setPreviewDisplay()", t); } if (!cameraConfigured) { Camera.Parameters parameters = camera.getParameters(); parameters.setPreviewSize(mPreviewWidth, mPreviewHeight); camera.setParameters(parameters); cameraConfigured = true; } } } private void startPreview() { Log.d(TAG, "startPreview"); if (cameraConfigured && camera != null) { // activate onPreviewFrame() // camera.setPreviewCallback(cameraPreviewCallback); camera.setPreviewCallback(outgoingPlayer); // start captureThread outgoingPlayer.start(); camera.startPreview(); inPreview = true; } } public boolean isReady() { return this.inPreview; } } ================================================ FILE: RtspViewer/.classpath ================================================ ================================================ FILE: RtspViewer/.gitignore ================================================ /bin /gen ================================================ FILE: RtspViewer/.project ================================================ RtspViewer com.android.ide.eclipse.adt.ResourceManagerBuilder com.android.ide.eclipse.adt.PreCompilerBuilder org.eclipse.jdt.core.javabuilder com.android.ide.eclipse.adt.ApkBuilder com.android.ide.eclipse.adt.AndroidNature org.eclipse.jdt.core.javanature ================================================ FILE: RtspViewer/AndroidManifest.xml ================================================ ================================================ FILE: RtspViewer/gpl.txt ================================================ GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . ================================================ FILE: RtspViewer/proguard-project.txt ================================================ # To enable ProGuard in your project, edit project.properties # to define the proguard.config property as described in that file. # # Add project specific ProGuard rules here. # By default, the flags in this file are appended to flags specified # in ${sdk.dir}/tools/proguard/proguard-android.txt # You can edit the include path and order by changing the ProGuard # include property in project.properties. # # For more details, see # http://developer.android.com/guide/developing/tools/proguard.html # Add any project specific keep options here: # If your project uses WebView with JS, uncomment the following # and specify the fully qualified class name to the JavaScript interface # class: #-keepclassmembers class fqcn.of.javascript.interface.for.webview { # public *; #} ================================================ FILE: RtspViewer/project.properties ================================================ # This file is automatically generated by Android Tools. # Do not modify this file -- YOUR CHANGES WILL BE ERASED! # # This file must be checked in Version Control Systems. # # To customize properties used by the Ant build system edit # "ant.properties", and override values to adapt the script to your # project structure. # # To enable ProGuard to shrink and obfuscate your code, uncomment this (available properties: sdk.dir, user.home): #proguard.config=${sdk.dir}\tools\proguard\proguard-android.txt:proguard-project.txt # Project target. target=android-10 android.library.reference.1=../RtspCamera ================================================ FILE: RtspViewer/res/layout/videoview.xml ================================================ ================================================ FILE: RtspViewer/res/values/strings.xml ================================================ Hello World, RtspViewerActivity! RtspViewer ================================================ FILE: RtspViewer/src/de/kp/rtspviewer/RtspViewerActivity.java ================================================ package de.kp.rtspviewer; /** * This is the most minimal viewer for RtspCamera app * * @author Peter Arwanitis (arwanitis@dr-kruscheundpartner.de) * */ import android.app.Activity; import android.os.Bundle; import android.util.Log; import com.orangelabs.rcs.platform.AndroidFactory; import com.orangelabs.rcs.provider.settings.RcsSettings; import com.orangelabs.rcs.service.api.client.media.video.VideoSurfaceView; import de.kp.net.rtp.viewer.RtpVideoRenderer; public class RtspViewerActivity extends Activity { /** * Video renderer */ private RtpVideoRenderer incomingRenderer = null; /** * Video preview */ private VideoSurfaceView incomingVideoView = null; /** * hardcoded rtsp server path */ private String rtspConnect = "rtsp://192.168.178.47:8080/video"; // private String rtsp = // "rtsp://184.72.239.149/vod/mp4:BigBuckBunny_175k.mov"; private int videoHeight; private int videoWidth; private String TAG = "RtspViewer"; @Override public void onCreate(Bundle icicle) { Log.i(TAG, "onCreate"); super.onCreate(icicle); // Set application context ... skipping FileFactory AndroidFactory.setApplicationContext(getApplicationContext()); // Instantiate the settings manager RcsSettings.createInstance(getApplicationContext()); setContentView(R.layout.videoview); // h263-2000 // // h263-2000 // h264 // // // Low (H.263) // High (H.264) // // // QCIF // // QCIF // // // // Low (176x144) // // // Set incoming video preview if (incomingVideoView == null) { incomingVideoView = (VideoSurfaceView) findViewById(R.id.incoming_video_view); incomingVideoView.setAspectRatio(videoWidth, videoHeight); try { incomingRenderer = new RtpVideoRenderer(rtspConnect); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); } incomingRenderer.setVideoSurface(incomingVideoView); } } @Override protected void onPause() { Log.i(TAG, "onPause"); super.onPause(); } @Override protected void onResume() { Log.i(TAG, "onResume"); super.onResume(); incomingRenderer.open(); incomingRenderer.start(); Log.i(TAG, "onResume renderer started"); } @Override public void onDestroy() { Log.i(TAG, "onDestroy"); super.onDestroy(); incomingRenderer.stop(); incomingRenderer.close(); } }