Repository: yangyichao-mango/flink-study Branch: main Commit: 9e7daac7923b Files: 393 Total size: 1.5 MB Directory structure: gitextract_amrzktuf/ ├── .gitignore ├── README.md ├── flink-examples-1.10/ │ ├── pom.xml │ └── src/ │ └── main/ │ └── java/ │ └── flink/ │ └── examples/ │ └── sql/ │ └── _07/ │ └── query/ │ └── _06_joins/ │ └── _02_interval_joins/ │ └── _01_outer_join/ │ ├── WindowJoinFunction$46.java │ └── _06_Interval_Outer_Joins_EventTime_Test.java ├── flink-examples-1.12/ │ ├── .gitignore │ ├── pom.xml │ └── src/ │ └── main/ │ └── java/ │ └── flink/ │ └── examples/ │ ├── datastream/ │ │ └── _07/ │ │ └── query/ │ │ └── _04_window/ │ │ └── _04_TumbleWindowTest.java │ └── sql/ │ └── _07/ │ └── query/ │ └── _04_window_agg/ │ ├── _04_TumbleWindowTest.java │ ├── _04_TumbleWindowTest_GroupingWindowAggsHandler$59.java │ ├── _04_TumbleWindowTest_KeyProjection$69.java │ └── _04_TumbleWindowTest_WatermarkGenerator$6.java ├── flink-examples-1.13/ │ ├── .gitignore │ ├── pom.xml │ └── src/ │ ├── main/ │ │ ├── java/ │ │ │ └── flink/ │ │ │ ├── core/ │ │ │ │ └── source/ │ │ │ │ ├── JaninoUtils.java │ │ │ │ └── SourceFactory.java │ │ │ └── examples/ │ │ │ ├── FlinkEnvUtils.java │ │ │ ├── JacksonUtils.java │ │ │ ├── datastream/ │ │ │ │ ├── _01/ │ │ │ │ │ └── bytedance/ │ │ │ │ │ └── split/ │ │ │ │ │ ├── codegen/ │ │ │ │ │ │ ├── JaninoUtils.java │ │ │ │ │ │ └── benchmark/ │ │ │ │ │ │ └── Benchmark.java │ │ │ │ │ ├── job/ │ │ │ │ │ │ ├── SplitExampleJob.java │ │ │ │ │ │ └── start.sh │ │ │ │ │ ├── kafka/ │ │ │ │ │ │ ├── KafkaProducerCenter.java │ │ │ │ │ │ └── demo/ │ │ │ │ │ │ ├── Application.java │ │ │ │ │ │ ├── ConsumerThread.java │ │ │ │ │ │ └── ProducerThread.java │ │ │ │ │ ├── model/ │ │ │ │ │ │ ├── ClientLogSink.java │ │ │ │ │ │ ├── ClientLogSource.java │ │ │ │ │ │ ├── DynamicProducerRule.java │ │ │ │ │ │ └── Evaluable.java │ │ │ │ │ └── zkconfigcenter/ │ │ │ │ │ ├── ZkBasedConfigCenter.java │ │ │ │ │ ├── new.json │ │ │ │ │ └── old.json │ │ │ │ ├── _02/ │ │ │ │ │ ├── DataStreamTest.java │ │ │ │ │ └── DataStreamTest1.java │ │ │ │ ├── _03/ │ │ │ │ │ ├── enums_state/ │ │ │ │ │ │ ├── EnumsStateTest.java │ │ │ │ │ │ └── SenerioTest.java │ │ │ │ │ └── state/ │ │ │ │ │ ├── StateExamplesTest.java │ │ │ │ │ ├── _01_broadcast_state/ │ │ │ │ │ │ └── BroadcastStateTest.java │ │ │ │ │ ├── _03_rocksdb/ │ │ │ │ │ │ ├── CreateStateBackendTest.java │ │ │ │ │ │ ├── GettingStartDemo.java │ │ │ │ │ │ ├── Rocksdb_OperatorAndKeyedState_StateStorageDIr_Test.java │ │ │ │ │ │ ├── keyed_state/ │ │ │ │ │ │ │ ├── RocksBackendKeyedMapStateTest.java │ │ │ │ │ │ │ └── RocksBackendKeyedValueStateTest.java │ │ │ │ │ │ └── operator_state/ │ │ │ │ │ │ ├── KeyedStreamOperatorListStateTest.java │ │ │ │ │ │ └── RocksBackendOperatorListStateTest.java │ │ │ │ │ ├── _04_filesystem/ │ │ │ │ │ │ ├── keyed_state/ │ │ │ │ │ │ │ └── FsStateBackendKeyedMapStateTest.java │ │ │ │ │ │ └── operator_state/ │ │ │ │ │ │ └── FsStateBackendOperatorListStateTest.java │ │ │ │ │ └── _05_memory/ │ │ │ │ │ └── keyed_state/ │ │ │ │ │ └── MemoryStateBackendKeyedMapStateTest.java │ │ │ │ ├── _04/ │ │ │ │ │ └── keyed_co_process/ │ │ │ │ │ ├── HashMapTest.java │ │ │ │ │ └── _04_KeyedCoProcessFunctionTest.java │ │ │ │ ├── _05_ken/ │ │ │ │ │ └── _01_watermark/ │ │ │ │ │ └── WatermarkTest.java │ │ │ │ ├── _06_test/ │ │ │ │ │ └── _01_event_proctime/ │ │ │ │ │ ├── OneJobWIthProcAndEventTimeWIndowTest.java │ │ │ │ │ └── OneJobWIthTimerTest.java │ │ │ │ ├── _07_lambda_error/ │ │ │ │ │ └── LambdaErrorTest.java │ │ │ │ ├── _08_late_record/ │ │ │ │ │ └── LatenessTest.java │ │ │ │ ├── _09_join/ │ │ │ │ │ ├── _01_window_join/ │ │ │ │ │ │ └── _01_Window_Join_Test.java │ │ │ │ │ └── _02_connect/ │ │ │ │ │ └── _01_Connect_Test.java │ │ │ │ └── _10_agg/ │ │ │ │ └── AggTest.java │ │ │ ├── practice/ │ │ │ │ └── _01/ │ │ │ │ └── dau/ │ │ │ │ └── _01_DataStream_Session_Window.java │ │ │ ├── question/ │ │ │ │ ├── datastream/ │ │ │ │ │ └── _01/ │ │ │ │ │ └── kryo_protobuf_no_more_bytes_left/ │ │ │ │ │ └── KryoProtobufNoMoreBytesLeftTest.java │ │ │ │ └── sql/ │ │ │ │ └── _01/ │ │ │ │ └── lots_source_fields_poor_performance/ │ │ │ │ ├── EmbeddedKafka.java │ │ │ │ ├── _01_DataGenSourceTest.java │ │ │ │ └── _01_JsonSourceTest.java │ │ │ ├── runtime/ │ │ │ │ ├── _01/ │ │ │ │ │ └── future/ │ │ │ │ │ ├── CompletableFutureTest.java │ │ │ │ │ ├── CompletableFutureTest4.java │ │ │ │ │ ├── CompletableFuture_AnyOf_Test3.java │ │ │ │ │ ├── CompletableFuture_ThenApplyAsync_Test2.java │ │ │ │ │ ├── CompletableFuture_ThenComposeAsync_Test2.java │ │ │ │ │ └── FutureTest.java │ │ │ │ └── _04/ │ │ │ │ └── statebackend/ │ │ │ │ └── CancelAndRestoreWithCheckpointTest.java │ │ │ └── sql/ │ │ │ ├── _01/ │ │ │ │ └── countdistincterror/ │ │ │ │ ├── CountDistinctErrorTest.java │ │ │ │ ├── CountDistinctErrorTest2.java │ │ │ │ ├── CountDistinctErrorTest3.java │ │ │ │ └── udf/ │ │ │ │ ├── Mod_UDF.java │ │ │ │ ├── StatusMapper1_UDF.java │ │ │ │ └── StatusMapper_UDF.java │ │ │ ├── _02/ │ │ │ │ └── timezone/ │ │ │ │ ├── TimeZoneTest.java │ │ │ │ ├── TimeZoneTest2.java │ │ │ │ └── TimeZoneTest3.java │ │ │ ├── _03/ │ │ │ │ └── source_sink/ │ │ │ │ ├── CreateViewTest.java │ │ │ │ ├── DataStreamSourceEventTimeTest.java │ │ │ │ ├── DataStreamSourceProcessingTimeTest.java │ │ │ │ ├── KafkaSourceTest.java │ │ │ │ ├── RedisLookupTest.java │ │ │ │ ├── RedisSinkTest.java │ │ │ │ ├── SocketSourceTest.java │ │ │ │ ├── TableApiKafkaSourceTest.java │ │ │ │ ├── UpsertKafkaSinkProtobufFormatSupportTest.java │ │ │ │ ├── UpsertKafkaSinkTest.java │ │ │ │ ├── UserDefinedSourceTest.java │ │ │ │ ├── abilities/ │ │ │ │ │ ├── sink/ │ │ │ │ │ │ ├── Abilities_SinkFunction.java │ │ │ │ │ │ ├── Abilities_TableSink.java │ │ │ │ │ │ ├── Abilities_TableSinkFactory.java │ │ │ │ │ │ └── _01_SupportsWritingMetadata_Test.java │ │ │ │ │ └── source/ │ │ │ │ │ ├── Abilities_SourceFunction.java │ │ │ │ │ ├── Abilities_TableSource.java │ │ │ │ │ ├── Abilities_TableSourceFactory.java │ │ │ │ │ ├── _01_SupportsFilterPushDown_Test.java │ │ │ │ │ ├── _02_SupportsLimitPushDown_Test.java │ │ │ │ │ ├── _03_SupportsPartitionPushDown_Test.java │ │ │ │ │ ├── _04_SupportsProjectionPushDown_JDBC_Test.java │ │ │ │ │ ├── _04_SupportsProjectionPushDown_Test.java │ │ │ │ │ ├── _05_SupportsReadingMetadata_Test.java │ │ │ │ │ ├── _06_SupportsWatermarkPushDown_Test.java │ │ │ │ │ ├── _07_SupportsSourceWatermark_Test.java │ │ │ │ │ └── before/ │ │ │ │ │ ├── Before_Abilities_SourceFunction.java │ │ │ │ │ ├── Before_Abilities_TableSource.java │ │ │ │ │ ├── Before_Abilities_TableSourceFactory.java │ │ │ │ │ ├── _01_Before_SupportsFilterPushDown_Test.java │ │ │ │ │ ├── _02_Before_SupportsLimitPushDown_Test.java │ │ │ │ │ ├── _03_Before_SupportsPartitionPushDown_Test.java │ │ │ │ │ ├── _04_Before_SupportsProjectionPushDown_Test.java │ │ │ │ │ ├── _05_Before_SupportsReadingMetadata_Test.java │ │ │ │ │ ├── _06_Before_SupportsWatermarkPushDown_Test.java │ │ │ │ │ └── _07_Before_SupportsSourceWatermark_Test.java │ │ │ │ ├── ddl/ │ │ │ │ │ └── TableApiDDLTest.java │ │ │ │ └── table/ │ │ │ │ ├── redis/ │ │ │ │ │ ├── container/ │ │ │ │ │ │ ├── RedisCommandsContainer.java │ │ │ │ │ │ ├── RedisCommandsContainerBuilder.java │ │ │ │ │ │ └── RedisContainer.java │ │ │ │ │ ├── demo/ │ │ │ │ │ │ └── RedisDemo.java │ │ │ │ │ ├── mapper/ │ │ │ │ │ │ ├── LookupRedisMapper.java │ │ │ │ │ │ ├── RedisCommand.java │ │ │ │ │ │ ├── RedisCommandDescription.java │ │ │ │ │ │ └── SetRedisMapper.java │ │ │ │ │ ├── options/ │ │ │ │ │ │ ├── RedisLookupOptions.java │ │ │ │ │ │ ├── RedisOptions.java │ │ │ │ │ │ └── RedisWriteOptions.java │ │ │ │ │ ├── v1/ │ │ │ │ │ │ ├── RedisDynamicTableFactory.java │ │ │ │ │ │ ├── sink/ │ │ │ │ │ │ │ └── RedisDynamicTableSink.java │ │ │ │ │ │ └── source/ │ │ │ │ │ │ ├── RedisDynamicTableSource.java │ │ │ │ │ │ └── RedisRowDataLookupFunction.java │ │ │ │ │ └── v2/ │ │ │ │ │ ├── RedisDynamicTableFactory.java │ │ │ │ │ ├── sink/ │ │ │ │ │ │ └── RedisDynamicTableSink.java │ │ │ │ │ └── source/ │ │ │ │ │ ├── RedisDynamicTableSource.java │ │ │ │ │ ├── RedisRowDataBatchLookupFunction.java │ │ │ │ │ └── RedisRowDataLookupFunction.java │ │ │ │ ├── socket/ │ │ │ │ │ ├── SocketDynamicTableFactory.java │ │ │ │ │ ├── SocketDynamicTableSource.java │ │ │ │ │ └── SocketSourceFunction.java │ │ │ │ └── user_defined/ │ │ │ │ ├── UserDefinedDynamicTableFactory.java │ │ │ │ ├── UserDefinedDynamicTableSource.java │ │ │ │ └── UserDefinedSource.java │ │ │ ├── _04/ │ │ │ │ └── type/ │ │ │ │ ├── BlinkPlannerTest.java │ │ │ │ ├── JavaEnvTest.java │ │ │ │ └── OldPlannerTest.java │ │ │ ├── _05/ │ │ │ │ └── format/ │ │ │ │ └── formats/ │ │ │ │ ├── ProtobufFormatTest.java │ │ │ │ ├── SocketWriteTest.java │ │ │ │ ├── csv/ │ │ │ │ │ ├── ChangelogCsvDeserializer.java │ │ │ │ │ ├── ChangelogCsvFormat.java │ │ │ │ │ └── ChangelogCsvFormatFactory.java │ │ │ │ ├── protobuf/ │ │ │ │ │ ├── descriptors/ │ │ │ │ │ │ ├── Protobuf.java │ │ │ │ │ │ └── ProtobufValidator.java │ │ │ │ │ ├── row/ │ │ │ │ │ │ ├── ProtobufDeserializationSchema.java │ │ │ │ │ │ ├── ProtobufRowDeserializationSchema.java │ │ │ │ │ │ ├── ProtobufRowFormatFactory.java │ │ │ │ │ │ ├── ProtobufRowSerializationSchema.java │ │ │ │ │ │ ├── ProtobufSerializationSchema.java │ │ │ │ │ │ ├── ProtobufUtils.java │ │ │ │ │ │ └── typeutils/ │ │ │ │ │ │ └── ProtobufSchemaConverter.java │ │ │ │ │ └── rowdata/ │ │ │ │ │ ├── ProtobufFormatFactory.java │ │ │ │ │ ├── ProtobufOptions.java │ │ │ │ │ ├── ProtobufRowDataDeserializationSchema.java │ │ │ │ │ ├── ProtobufRowDataSerializationSchema.java │ │ │ │ │ ├── ProtobufToRowDataConverters.java │ │ │ │ │ └── RowDataToProtobufConverters.java │ │ │ │ └── utils/ │ │ │ │ ├── MoreRunnables.java │ │ │ │ ├── MoreSuppliers.java │ │ │ │ ├── ThrowableRunable.java │ │ │ │ └── ThrowableSupplier.java │ │ │ ├── _06/ │ │ │ │ └── calcite/ │ │ │ │ ├── CalciteTest.java │ │ │ │ ├── ParserTest.java │ │ │ │ └── javacc/ │ │ │ │ ├── JavaccCodeGenTest.java │ │ │ │ ├── Simple1Test.java │ │ │ │ └── generatedcode/ │ │ │ │ ├── ParseException.java │ │ │ │ ├── Simple1.java │ │ │ │ ├── Simple1Constants.java │ │ │ │ ├── Simple1TokenManager.java │ │ │ │ ├── SimpleCharStream.java │ │ │ │ ├── Token.java │ │ │ │ └── TokenMgrError.java │ │ │ ├── _07/ │ │ │ │ └── query/ │ │ │ │ ├── _01_select_where/ │ │ │ │ │ ├── SelectWhereHiveDialect.java │ │ │ │ │ ├── SelectWhereTest.java │ │ │ │ │ ├── SelectWhereTest2.java │ │ │ │ │ ├── SelectWhereTest3.java │ │ │ │ │ ├── SelectWhereTest4.java │ │ │ │ │ ├── SelectWhereTest5.java │ │ │ │ │ └── StreamExecCalc$10.java │ │ │ │ ├── _02_select_distinct/ │ │ │ │ │ ├── GroupAggsHandler$5.java │ │ │ │ │ ├── KeyProjection$0.java │ │ │ │ │ ├── SelectDistinctTest.java │ │ │ │ │ └── SelectDistinctTest2.java │ │ │ │ ├── _03_group_agg/ │ │ │ │ │ ├── _01_group_agg/ │ │ │ │ │ │ ├── GroupAggMiniBatchTest.java │ │ │ │ │ │ ├── GroupAggTest.java │ │ │ │ │ │ └── GroupAggsHandler$39.java │ │ │ │ │ ├── _02_count_distinct/ │ │ │ │ │ │ ├── CountDistinctGroupAggTest.java │ │ │ │ │ │ └── GroupAggsHandler$17.java │ │ │ │ │ ├── _03_grouping_sets/ │ │ │ │ │ │ ├── GroupingSetsEqualsGroupAggUnionAllGroupAggTest2.java │ │ │ │ │ │ ├── GroupingSetsGroupAggTest.java │ │ │ │ │ │ ├── GroupingSetsGroupAggTest2.java │ │ │ │ │ │ └── StreamExecExpand$20.java │ │ │ │ │ ├── _04_cube/ │ │ │ │ │ │ ├── CubeGroupAggTest.java │ │ │ │ │ │ └── CubeGroupAggTest2.java │ │ │ │ │ └── _05_rollup/ │ │ │ │ │ ├── RollUpGroupAggTest.java │ │ │ │ │ └── RollUpGroupAggTest2.java │ │ │ │ ├── _04_window_agg/ │ │ │ │ │ ├── _01_tumble_window/ │ │ │ │ │ │ ├── TumbleWindow2GroupAggTest.java │ │ │ │ │ │ ├── TumbleWindowTest.java │ │ │ │ │ │ ├── TumbleWindowTest2.java │ │ │ │ │ │ ├── TumbleWindowTest3.java │ │ │ │ │ │ ├── TumbleWindowTest4.java │ │ │ │ │ │ ├── TumbleWindowTest5.java │ │ │ │ │ │ ├── global_agg/ │ │ │ │ │ │ │ ├── GlobalWindowAggsHandler$232.java │ │ │ │ │ │ │ ├── LocalWindowAggsHandler$162.java │ │ │ │ │ │ │ └── StateWindowAggsHandler$300.java │ │ │ │ │ │ └── local_agg/ │ │ │ │ │ │ ├── KeyProjection$89.java │ │ │ │ │ │ └── LocalWindowAggsHandler$88.java │ │ │ │ │ ├── _02_cumulate_window/ │ │ │ │ │ │ ├── CumulateWindowGroupingSetsBigintTest.java │ │ │ │ │ │ ├── CumulateWindowGroupingSetsTest.java │ │ │ │ │ │ ├── CumulateWindowTest.java │ │ │ │ │ │ ├── TumbleWindowEarlyFireTest.java │ │ │ │ │ │ ├── cumulate/ │ │ │ │ │ │ │ ├── global_agg/ │ │ │ │ │ │ │ │ ├── GlobalWindowAggsHandler$232.java │ │ │ │ │ │ │ │ ├── KeyProjection$301.java │ │ │ │ │ │ │ │ ├── LocalWindowAggsHandler$162.java │ │ │ │ │ │ │ │ └── StateWindowAggsHandler$300.java │ │ │ │ │ │ │ └── local_agg/ │ │ │ │ │ │ │ ├── KeyProjection$89.java │ │ │ │ │ │ │ └── LocalWindowAggsHandler$88.java │ │ │ │ │ │ └── earlyfire/ │ │ │ │ │ │ ├── GroupAggsHandler$210.java │ │ │ │ │ │ └── GroupingWindowAggsHandler$57.java │ │ │ │ │ └── _03_hop_window/ │ │ │ │ │ └── HopWindowGroupWindowAggTest.java │ │ │ │ ├── _05_over/ │ │ │ │ │ ├── _01_row_number/ │ │ │ │ │ │ ├── RowNumberOrderByBigintTest.java │ │ │ │ │ │ ├── RowNumberOrderByStringTest.java │ │ │ │ │ │ ├── RowNumberOrderByUnixTimestampTest.java │ │ │ │ │ │ ├── RowNumberWithoutPartitionKeyTest.java │ │ │ │ │ │ ├── RowNumberWithoutRowNumberEqual1Test.java │ │ │ │ │ │ └── Scalar_UDF.java │ │ │ │ │ └── _02_agg/ │ │ │ │ │ ├── RangeIntervalProctimeTest.java │ │ │ │ │ ├── RangeIntervalRowtimeAscendingTest.java │ │ │ │ │ ├── RangeIntervalRowtimeBoundedOutOfOrdernessTest.java │ │ │ │ │ ├── RangeIntervalRowtimeStrictlyAscendingTest.java │ │ │ │ │ └── RowIntervalTest.java │ │ │ │ ├── _06_joins/ │ │ │ │ │ ├── _01_regular_joins/ │ │ │ │ │ │ ├── _01_inner_join/ │ │ │ │ │ │ │ ├── ConditionFunction$4.java │ │ │ │ │ │ │ ├── _01_InnerJoinsTest.java │ │ │ │ │ │ │ └── _02_InnerJoinsOnNotEqualTest.java │ │ │ │ │ │ └── _02_outer_join/ │ │ │ │ │ │ ├── _01_LeftJoinsTest.java │ │ │ │ │ │ ├── _02_RightJoinsTest.java │ │ │ │ │ │ └── _03_FullJoinsTest.java │ │ │ │ │ ├── _02_interval_joins/ │ │ │ │ │ │ ├── _01_proctime/ │ │ │ │ │ │ │ ├── Interval_Full_Joins_ProcesingTime_Test.java │ │ │ │ │ │ │ ├── Interval_Inner_Joins_ProcesingTime_Test.java │ │ │ │ │ │ │ ├── Interval_Left_Joins_ProcesingTime_Test.java │ │ │ │ │ │ │ └── Interval_Right_Joins_ProcesingTime_Test.java │ │ │ │ │ │ └── _02_row_time/ │ │ │ │ │ │ ├── Interval_Full_JoinsOnNotEqual_EventTime_Test.java │ │ │ │ │ │ ├── Interval_Full_Joins_EventTime_Test.java │ │ │ │ │ │ ├── Interval_Inner_Joins_EventTime_Test.java │ │ │ │ │ │ ├── Interval_Left_Joins_EventTime_Test.java │ │ │ │ │ │ └── Interval_Right_Joins_EventTime_Test.java │ │ │ │ │ ├── _03_temporal_join/ │ │ │ │ │ │ ├── _01_proctime/ │ │ │ │ │ │ │ └── Temporal_Join_ProcesingTime_Test.java │ │ │ │ │ │ └── _02_row_time/ │ │ │ │ │ │ └── Temporal_Join_EventTime_Test.java │ │ │ │ │ ├── _04_lookup_join/ │ │ │ │ │ │ └── _01_redis/ │ │ │ │ │ │ ├── RedisBatchLookupTest2.java │ │ │ │ │ │ ├── RedisDemo.java │ │ │ │ │ │ ├── RedisLookupTest.java │ │ │ │ │ │ ├── RedisLookupTest2.java │ │ │ │ │ │ └── pipeline/ │ │ │ │ │ │ ├── BatchJoinTableFuncCollector$8.java │ │ │ │ │ │ ├── BatchLookupFunction$4.java │ │ │ │ │ │ ├── JoinTableFuncCollector$8.java │ │ │ │ │ │ ├── JoinTableFuncCollector$9.java │ │ │ │ │ │ ├── LookupFunction$4.java │ │ │ │ │ │ ├── LookupFunction$5.java │ │ │ │ │ │ └── T1.java │ │ │ │ │ ├── _05_array_expansion/ │ │ │ │ │ │ └── _01_ArrayExpansionTest.java │ │ │ │ │ └── _06_table_function/ │ │ │ │ │ └── _01_inner_join/ │ │ │ │ │ ├── TableFunctionInnerJoin_Test.java │ │ │ │ │ └── TableFunctionInnerJoin_WithEmptyTableFunction_Test.java │ │ │ │ ├── _07_deduplication/ │ │ │ │ │ ├── DeduplicationProcessingTimeTest.java │ │ │ │ │ ├── DeduplicationProcessingTimeTest1.java │ │ │ │ │ └── DeduplicationRowTimeTest.java │ │ │ │ ├── _08_datastream_trans/ │ │ │ │ │ ├── AlertExample.java │ │ │ │ │ ├── AlertExampleRetract.java │ │ │ │ │ ├── AlertExampleRetractError.java │ │ │ │ │ ├── RetractExample.java │ │ │ │ │ └── Test.java │ │ │ │ ├── _09_set_operations/ │ │ │ │ │ ├── Except_Test.java │ │ │ │ │ ├── Exist_Test.java │ │ │ │ │ ├── In_Test.java │ │ │ │ │ ├── Intersect_Test.java │ │ │ │ │ ├── UnionAll_Test.java │ │ │ │ │ └── Union_Test.java │ │ │ │ ├── _10_order_by/ │ │ │ │ │ ├── OrderBy_with_time_attr_Test.java │ │ │ │ │ └── OrderBy_without_time_attr_Test.java │ │ │ │ ├── _11_limit/ │ │ │ │ │ └── Limit_Test.java │ │ │ │ ├── _12_topn/ │ │ │ │ │ └── TopN_Test.java │ │ │ │ ├── _13_window_topn/ │ │ │ │ │ └── WindowTopN_Test.java │ │ │ │ ├── _14_retract/ │ │ │ │ │ └── Retract_Test.java │ │ │ │ ├── _15_exec_options/ │ │ │ │ │ ├── Default_Parallelism_Test.java │ │ │ │ │ ├── Idle_Timeout_Test.java │ │ │ │ │ └── State_Ttl_Test.java │ │ │ │ ├── _16_optimizer_options/ │ │ │ │ │ ├── Agg_OnePhase_Strategy_window_Test.java │ │ │ │ │ ├── Agg_TwoPhase_Strategy_unbounded_Test.java │ │ │ │ │ ├── Agg_TwoPhase_Strategy_window_Test.java │ │ │ │ │ ├── DistinctAgg_Split_One_Distinct_Key_Test.java │ │ │ │ │ └── DistinctAgg_Split_Two_Distinct_Key_Test.java │ │ │ │ ├── _17_table_options/ │ │ │ │ │ ├── Dml_Syc_False_Test.java │ │ │ │ │ ├── Dml_Syc_True_Test.java │ │ │ │ │ └── TimeZone_window_Test.java │ │ │ │ └── _18_performance_tuning/ │ │ │ │ └── Count_Distinct_Filter_Test.java │ │ │ ├── _08/ │ │ │ │ └── batch/ │ │ │ │ ├── Utils.java │ │ │ │ ├── _01_ddl/ │ │ │ │ │ └── HiveDDLTest.java │ │ │ │ ├── _02_dml/ │ │ │ │ │ ├── HiveDMLBetweenAndTest.java │ │ │ │ │ ├── HiveDMLTest.java │ │ │ │ │ ├── HiveTest2.java │ │ │ │ │ ├── _01_hive_dialect/ │ │ │ │ │ │ └── HiveDMLTest.java │ │ │ │ │ ├── _02_with_as/ │ │ │ │ │ │ └── HIveWIthAsTest.java │ │ │ │ │ ├── _03_substr/ │ │ │ │ │ │ └── HiveSubstrTest.java │ │ │ │ │ ├── _04_tumble_window/ │ │ │ │ │ │ ├── Test.java │ │ │ │ │ │ ├── Test1.java │ │ │ │ │ │ ├── Test2_BIGINT_SOURCE.java │ │ │ │ │ │ ├── Test3.java │ │ │ │ │ │ └── Test5.java │ │ │ │ │ ├── _05_batch_to_datastream/ │ │ │ │ │ │ └── Test.java │ │ │ │ │ └── _06_select_where/ │ │ │ │ │ └── Test.java │ │ │ │ ├── _03_hive_udf/ │ │ │ │ │ ├── HiveModuleV2.java │ │ │ │ │ ├── HiveUDFRegistryTest.java │ │ │ │ │ ├── HiveUDFRegistryUnloadTest.java │ │ │ │ │ ├── _01_GenericUDAFResolver2/ │ │ │ │ │ │ ├── HiveUDAF_hive_module_registry_Test.java │ │ │ │ │ │ ├── HiveUDAF_sql_registry_create_function_Test.java │ │ │ │ │ │ ├── HiveUDAF_sql_registry_create_temporary_function_Test.java │ │ │ │ │ │ └── TestHiveUDAF.java │ │ │ │ │ ├── _02_GenericUDTF/ │ │ │ │ │ │ ├── HiveUDTF_hive_module_registry_Test.java │ │ │ │ │ │ ├── HiveUDTF_sql_registry_create_function_Test.java │ │ │ │ │ │ ├── HiveUDTF_sql_registry_create_temporary_function_Test.java │ │ │ │ │ │ └── TestHiveUDTF.java │ │ │ │ │ ├── _03_built_in_udf/ │ │ │ │ │ │ ├── _01_get_json_object/ │ │ │ │ │ │ │ └── HiveUDF_get_json_object_Test.java │ │ │ │ │ │ └── _02_rlike/ │ │ │ │ │ │ └── HiveUDF_rlike_Test.java │ │ │ │ │ └── _04_GenericUDF/ │ │ │ │ │ ├── HiveUDF_hive_module_registry_Test.java │ │ │ │ │ ├── HiveUDF_sql_registry_create_function_Test.java │ │ │ │ │ ├── HiveUDF_sql_registry_create_temporary_function_Test.java │ │ │ │ │ └── TestGenericUDF.java │ │ │ │ ├── _04_flink_udf/ │ │ │ │ │ ├── FlinkUDAF_Test.java │ │ │ │ │ ├── FlinkUDF_Test.java │ │ │ │ │ └── FlinkUDTF_Test.java │ │ │ │ └── _05_test/ │ │ │ │ └── _01_batch_to_datastream/ │ │ │ │ └── Test.java │ │ │ ├── _09/ │ │ │ │ └── udf/ │ │ │ │ ├── _01_hive_udf/ │ │ │ │ │ └── _01_GenericUDF/ │ │ │ │ │ ├── HiveUDF_sql_registry_create_function_Test.java │ │ │ │ │ ├── HiveUDF_sql_registry_create_function_with_hive_catalog_Test.java │ │ │ │ │ ├── HiveUDF_sql_registry_create_temporary_function_Test.java │ │ │ │ │ ├── HiveUDF_sql_registry_create_temporary_function_with_hive_catalog_Test.java │ │ │ │ │ └── TestGenericUDF.java │ │ │ │ ├── _02_stream_hive_udf/ │ │ │ │ │ ├── HiveUDF_Error_Test.java │ │ │ │ │ ├── HiveUDF_create_temporary_error_Test.java │ │ │ │ │ ├── HiveUDF_hive_module_registry_Test.java │ │ │ │ │ ├── HiveUDF_load_first_Test.java │ │ │ │ │ ├── HiveUDF_load_second_Test.java │ │ │ │ │ ├── TestGenericUDF.java │ │ │ │ │ └── UserDefinedSource.java │ │ │ │ ├── _03_advanced_type_inference/ │ │ │ │ │ ├── AdvancedFunctionsExample.java │ │ │ │ │ ├── InternalRowMergerFunction.java │ │ │ │ │ └── LastDatedValueFunction.java │ │ │ │ ├── _04_udf/ │ │ │ │ │ └── UDAF_Test.java │ │ │ │ └── _05_scalar_function/ │ │ │ │ ├── ExplodeUDTF.java │ │ │ │ ├── ExplodeUDTFV2.java │ │ │ │ ├── GetMapValue.java │ │ │ │ ├── GetSetValue.java │ │ │ │ ├── ScalarFunctionTest.java │ │ │ │ ├── ScalarFunctionTest2.java │ │ │ │ ├── SetStringUDF.java │ │ │ │ └── TableFunctionTest2.java │ │ │ ├── _10_share/ │ │ │ │ └── A.java │ │ │ ├── _11_explain/ │ │ │ │ └── Explain_Test.java │ │ │ └── _12_data_type/ │ │ │ ├── _01_interval/ │ │ │ │ ├── Timestamp3_Interval_To_Test.java │ │ │ │ └── Timestamp_ltz3_Interval_To_Test.java │ │ │ ├── _02_user_defined/ │ │ │ │ ├── User.java │ │ │ │ ├── UserDefinedDataTypes_Test.java │ │ │ │ ├── UserDefinedDataTypes_Test2.java │ │ │ │ └── UserScalarFunction.java │ │ │ └── _03_raw/ │ │ │ ├── RawScalarFunction.java │ │ │ └── Raw_DataTypes_Test2.java │ │ ├── javacc/ │ │ │ └── Simple1.jj │ │ ├── proto/ │ │ │ ├── source.proto │ │ │ └── test.proto │ │ ├── resources/ │ │ │ └── META-INF/ │ │ │ └── services/ │ │ │ └── org.apache.flink.table.factories.Factory │ │ └── scala/ │ │ └── flink/ │ │ └── examples/ │ │ └── sql/ │ │ └── _04/ │ │ └── type/ │ │ └── TableFunc0.scala │ └── test/ │ ├── java/ │ │ └── flink/ │ │ └── examples/ │ │ └── sql/ │ │ ├── _05/ │ │ │ └── format/ │ │ │ └── formats/ │ │ │ └── protobuf/ │ │ │ ├── row/ │ │ │ │ ├── ProtobufRowDeserializationSchemaTest.java │ │ │ │ └── ProtobufRowSerializationSchemaTest.java │ │ │ └── rowdata/ │ │ │ ├── ProtobufRowDataDeserializationSchemaTest.java │ │ │ └── ProtobufRowDataSerializationSchemaTest.java │ │ ├── _06/ │ │ │ └── calcite/ │ │ │ └── CalciteTest.java │ │ └── _07/ │ │ └── query/ │ │ └── _06_joins/ │ │ └── JaninoCompileTest.java │ ├── proto/ │ │ └── person.proto │ └── scala/ │ ├── ScalaEnv.scala │ └── TableFunc0.scala ├── flink-examples-1.14/ │ ├── pom.xml │ └── src/ │ └── main/ │ └── java/ │ └── flink/ │ └── examples/ │ └── sql/ │ └── _08/ │ └── batch/ │ ├── HiveModuleV2.java │ └── Test.java ├── flink-examples-1.8/ │ ├── .gitignore │ └── pom.xml └── pom.xml ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitignore ================================================ HELP.md target/ !.mvn/wrapper/maven-wrapper.jar !**/src/main/** #**/src/test/** .idea/ *.iml *.DS_Store ### IntelliJ IDEA ### .idea *.iws *.ipr ================================================ FILE: README.md ================================================ # 1.友情提示 > 1. 联系我:如要有问题咨询,请联系我(公众号:[`大数据羊说`](#32公众号),备注来自`GitHub`) > 2. 该仓库会持续更新 flink 教程福利干货,麻烦路过的各位亲给这个项目点个 `star`,太不易了,写了这么多,算是对我坚持下来的一种鼓励吧! ![在这里插入图片描述](https://raw.githubusercontent.com/yangyichao-mango/yangyichao-mango.github.io/master/1631459281928.png) ![Stargazers over time](https://starchart.cc/yangyichao-mango/flink-study.svg)

公众号 知乎 掘金 CSDN 51CT0博客 投稿

# 2.文章目录 > 以下列出的是作者对原创的一些文章和一些学习资源做了一个汇总,会持续更新!如果帮到了您,请点个star支持一下,谢谢! ## 2.1.flink sql 1. [公众号文章:踩坑记 | flink sql count 还有这种坑!](https://mp.weixin.qq.com/s/5XDkmuEIfHB_WsMHPeinkw),[源码](https://github.com/yangyichao-mango/flink-study/tree/main/flink-examples-1.13/src/main/java/flink/examples/sql/_01/countdistincterror) 2. [公众号文章:实战 | flink sql 与微博热搜的碰撞!!!](https://mp.weixin.qq.com/s/GHLoWMBZxajA2nXPHhH8WA) 3. [公众号文章:flink sql 知其所以然(一)| source\sink 原理](https://mp.weixin.qq.com/s/xIXh8B_suAlKSp56aO5aEg),[源码](https://github.com/yangyichao-mango/flink-study/tree/main/flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink) 4. [公众号文章:flink sql 知其所以然(二)| 自定义 redis 数据维表(附源码)](https://mp.weixin.qq.com/s/b_zV_tGp5QJQjgnSaxNT_Q),[源码](https://github.com/yangyichao-mango/flink-study/tree/main/flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink) 5. [公众号文章:flink sql 知其所以然(三)| 自定义 redis 数据汇表(附源码)](https://mp.weixin.qq.com/s/7Fwey_AXNJ0jQZWfXvtNmw),[源码](https://github.com/yangyichao-mango/flink-study/tree/main/flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink) 6. [公众号文章:flink sql 知其所以然(四)| sql api 类型系统](https://mp.weixin.qq.com/s/aqDRWgr3Kim7lblx10JvtA),[源码](https://github.com/yangyichao-mango/flink-study/tree/main/flink-examples-1.13/src/main/java/flink/examples/sql/_04/type) 7. [公众号文章:flink sql 知其所以然(五)| 自定义 protobuf format](https://mp.weixin.qq.com/s/STUC4trW-HA3cnrsqT-N6g),[源码](https://github.com/yangyichao-mango/flink-study/tree/main/flink-examples-1.13/src/main/java/flink/examples/sql/_05/format/formats) 8. [公众号文章:flink sql 知其所以然(六)| flink sql 约会 calcite(看这篇就够了)](https://mp.weixin.qq.com/s/SxRKp368mYSKVmuduPoXFg),[源码](https://github.com/yangyichao-mango/flink-study/tree/main/flink-examples-1.13/src/main/java/flink/examples/sql/_06/calcite) 9. [公众号文章:flink sql 知其所以然(七):不会连最适合 flink sql 的 ETL 和 group agg 场景都没见过吧?](https://github.com/yangyichao-mango/flink-study/tree/main/flink-examples-1.13/src/main/java/flink/examples/sql/_07/query) 10. [公众号文章:flink sql 知其所以然(八):flink sql tumble window 的奇妙解析之路](https://mp.weixin.qq.com/s/IRmt8dWmxAmbBh696akHdw),[源码](https://github.com/yangyichao-mango/flink-study/tree/main/flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_04_window) 11. [公众号文章:flink sql 知其所以然(九):window tvf tumble window 的奇思妙解](https://mp.weixin.qq.com/s/QVuu5_N4lHo5gXlt1tdncw),[源码](https://github.com/yangyichao-mango/flink-study/tree/main/flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_04_window_agg/_01_tumble_window) 12. [公众号文章:flink sql 知其所以然(十):大家都用 cumulate window 啦](https://mp.weixin.qq.com/s/IqAzjrQmcGmnxvHm1FAV5g),[源码](https://github.com/yangyichao-mango/flink-study/blob/main/flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_04_window_agg/_02_cumulate_window/CumulateWindowTest.java) 13. [公众号文章:flink sql 知其所以然(十一):去重不仅仅有 count distinct 还有强大的 deduplication](https://mp.weixin.qq.com/s/VL6egD76B4J7IcpHShTq7Q),[源码](https://github.com/yangyichao-mango/flink-study/tree/main/flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_05_over/_01_row_number) 14. [公众号文章:flink sql 知其所以然(十二):流 join 很难嘛???(上)](https://mp.weixin.qq.com/s/Z8QfKfhrX5KEnR-s7gRtsA),[源码](https://github.com/yangyichao-mango/flink-study/tree/main/flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_01_regular_joins) 15. [公众号文章:flink sql 知其所以然(十三):流 join 很难嘛???(下)](),[源码](https://github.com/yangyichao-mango/flink-study/tree/main/flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_02_interval_joins) 16. [公众号文章:flink sql 知其所以然(十四):维表 join 的性能优化之路(上)附源码](),[源码](https://github.com/yangyichao-mango/flink-study/tree/main/flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_04_lookup_join/_01_redis) 17. [公众号文章:flink sql 知其所以然(十五):改了改源码,实现了个 batch lookup join(附源码)](),[源码](https://github.com/yangyichao-mango/flink-study/blob/main/flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_04_lookup_join/_01_redis/RedisBatchLookupTest2.java) 18. [公众号文章:flink sql 知其所以然(十八):在 flink 中怎么使用 hive udf?附源码](),[源码](https://github.com/yangyichao-mango/flink-study/tree/main/flink-examples-1.13/src/main/java/flink/examples/sql/_09/udf/_02_stream_hive_udf) 19. [公众号文章:flink sql 知其所以然(十九):Table 与 DataStream 的转转转(附源码)](),[源码](https://github.com/yangyichao-mango/flink-study/tree/main/flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_08_datastream_trans) 20. [公众号文章:(上)史上最全干货!Flink SQL 成神之路(全文 18 万字、138 个案例、42 张图)](),[源码](https://github.com/yangyichao-mango/flink-study/blob/main/flink-examples-1.13/src/main/java/flink/examples/sql) 21. [公众号文章:(中)史上最全干货!Flink SQL 成神之路(全文 18 万字、138 个案例、42 张图)](),[源码](https://github.com/yangyichao-mango/flink-study/tree/main/flink-examples-1.13/src/main/java/flink/examples/sql) 22. [公众号文章:(下)史上最全干货!Flink SQL 成神之路(全文 18 万字、138 个案例、42 张图)](),[源码](https://github.com/yangyichao-mango/flink-study/tree/main/flink-examples-1.13/src/main/java/flink/examples/sql) ## 2.2.flink 实战 1. [公众号文章:揭秘字节跳动埋点数据实时动态处理引擎(附源码)](https://mp.weixin.qq.com/s/PoK0XOA9OHIDJezb1fLOMw),[源码](https://github.com/yangyichao-mango/flink-study/tree/main/flink-examples-1.13/src/main/java/flink/examples/datastream/_01/bytedance/split) 2. [公众号文章:踩坑记| flink state 序列化 java enum 竟然岔劈了](https://mp.weixin.qq.com/s/YElwTL-wzo2UVVIsIH_9YA),[源码](https://github.com/yangyichao-mango/flink-study/tree/main/flink-examples-1.13/src/main/java/flink/examples/datastream/_03/enums_state) 3. [公众号文章:flink idea 本地调试状态恢复](https://mp.weixin.qq.com/s/rLeKY_49q8rR9C_RmlTmhg),[源码](https://github.com/yangyichao-mango/flink-study/blob/main/flink-examples-1.13/src/main/java/flink/examples/runtime/_04/statebackend/CancelAndRestoreWithCheckpointTest.java) # 3.联系我 ## 3.1.微信 有任何学习上的疑惑都欢迎添加作者的微信,一起学习,一起交流! ![在这里插入图片描述](https://raw.githubusercontent.com/yangyichao-mango/yangyichao-mango.github.io/master/1.png) ## 3.2.公众号 如果大家想要实时关注我更新的文章以及分享的干货的话,可以关注我的公众号:**大数据羊说** ![在这里插入图片描述](https://raw.githubusercontent.com/yangyichao-mango/yangyichao-mango.github.io/master/2.png) ================================================ FILE: flink-examples-1.10/pom.xml ================================================ flink-study com.github.antigeneral 1.0-SNAPSHOT 4.0.0 com.github.antigeneral flink-examples-1.10 kr.motd.maven os-maven-plugin ${os-maven-plugin.version} org.apache.maven.plugins maven-compiler-plugin 8 8 org.xolstice.maven.plugins protobuf-maven-plugin ${protobuf-maven-plugin.version} src/test/proto com.google.protobuf:protoc:3.1.0:exe:${os.detected.classifier} grpc-java io.grpc:protoc-gen-grpc-java:${grpc-plugin.version}:exe:${os.detected.classifier} compile compile-custom 1.10.1 1.18.20 2.11 2.4.12.Final 2.12.0 2.1.1 2.5.7 2.2.4 30.1.1-jre 2.0.0 1.2.3 1.8.0-beta2 1.23.1 0.6.1 3.11.0 2.5 1.6.2 ================================================ FILE: flink-examples-1.10/src/main/java/flink/examples/sql/_07/query/_06_joins/_02_interval_joins/_01_outer_join/WindowJoinFunction$46.java ================================================ package flink.examples.sql._07.query._06_joins._02_interval_joins._01_outer_join; public class WindowJoinFunction$46 extends org.apache.flink.api.common.functions.RichFlatJoinFunction { final org.apache.flink.table.dataformat.JoinedRow joinedRow = new org.apache.flink.table.dataformat.JoinedRow(); public WindowJoinFunction$46(Object[] references) throws Exception { } @Override public void open(org.apache.flink.configuration.Configuration parameters) throws Exception { } @Override public void join(Object _in1, Object _in2, org.apache.flink.util.Collector c) throws Exception { org.apache.flink.table.dataformat.BaseRow in1 = (org.apache.flink.table.dataformat.BaseRow) _in1; org.apache.flink.table.dataformat.BaseRow in2 = (org.apache.flink.table.dataformat.BaseRow) _in2; int result$40; boolean isNull$40; int field$41; boolean isNull$41; int result$42; boolean isNull$42; int field$43; boolean isNull$43; boolean isNull$44; boolean result$45; result$40 = -1; isNull$40 = true; if (in1 != null) { isNull$41 = in1.isNullAt(0); field$41 = -1; if (!isNull$41) { field$41 = in1.getInt(0); } result$40 = field$41; isNull$40 = isNull$41; } result$42 = -1; isNull$42 = true; if (in2 != null) { isNull$43 = in2.isNullAt(0); field$43 = -1; if (!isNull$43) { field$43 = in2.getInt(0); } result$42 = field$43; isNull$42 = isNull$43; } isNull$44 = isNull$40 || isNull$42; result$45 = false; if (!isNull$44) { result$45 = result$40 == result$42; } if (result$45) { joinedRow.replace(in1, in2); c.collect(joinedRow); } } @Override public void close() throws Exception { } } ================================================ FILE: flink-examples-1.10/src/main/java/flink/examples/sql/_07/query/_06_joins/_02_interval_joins/_01_outer_join/_06_Interval_Outer_Joins_EventTime_Test.java ================================================ package flink.examples.sql._07.query._06_joins._02_interval_joins._01_outer_join; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.common.typeinfo.BasicTypeInfo; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.typeutils.ResultTypeQueryable; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.TimeCharacteristic; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.api.functions.source.SourceFunction; import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor; import org.apache.flink.streaming.api.windowing.time.Time; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.Table; import org.apache.flink.table.api.java.StreamTableEnvironment; import org.apache.flink.types.Row; public class _06_Interval_Outer_Joins_EventTime_Test { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(10); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); tEnv.getConfig().getConfiguration().setString("pipeline.name", "1.10.1 Interval Join 事件时间案例"); DataStream sourceTable = env.addSource(new UserDefinedSource1()) .assignTimestampsAndWatermarks(new BoundedOutOfOrdernessTimestampExtractor(Time.minutes(0L)) { @Override public long extractTimestamp(Row row) { return (long) row.getField(2); } }); tEnv.createTemporaryView("source_table", sourceTable, "user_id, name, timestamp, rowtime.rowtime"); DataStream dimTable = env.addSource(new UserDefinedSource2()) .assignTimestampsAndWatermarks(new BoundedOutOfOrdernessTimestampExtractor(Time.minutes(0L)) { @Override public long extractTimestamp(Row row) { return (long) row.getField(2); } }); tEnv.createTemporaryView("dim_table", dimTable, "user_id, platform, timestamp, rowtime.rowtime"); String sql = "SELECT\n" + " s.user_id as user_id,\n" + " s.name as name,\n" + " d.platform as platform\n" + "FROM source_table as s\n" + "FULL JOIN dim_table as d ON s.user_id = d.user_id\n" + "AND s.rowtime BETWEEN d.rowtime AND d.rowtime + INTERVAL '30' SECOND"; /** * join 算子:{@link org.apache.flink.table.runtime.operators.join.KeyedCoProcessOperatorWithWatermarkDelay} * -> {@link org.apache.flink.table.runtime.operators.join.RowTimeBoundedStreamJoin} */ Table result = tEnv.sqlQuery(sql); tEnv.toAppendStream(result, Row.class) .print(); env.execute("1.10.1 Interval Full Join 事件时间案例"); } private static class UserDefinedSource1 implements SourceFunction, ResultTypeQueryable { private volatile boolean isCancel; @Override public void run(SourceContext sourceContext) throws Exception { int i = 0; while (!this.isCancel) { Row row = new Row(3); row.setField(0, i); row.setField(1, "name"); long timestamp = System.currentTimeMillis(); row.setField(2, timestamp); sourceContext.collect(row); Thread.sleep(1000L); i++; } } @Override public void cancel() { this.isCancel = true; } @Override public TypeInformation getProducedType() { return new RowTypeInfo(BasicTypeInfo.INT_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.LONG_TYPE_INFO); } } private static class UserDefinedSource2 implements SourceFunction, ResultTypeQueryable { private volatile boolean isCancel; @Override public void run(SourceContext sourceContext) throws Exception { int i = 10; while (!this.isCancel) { Row row = new Row(3); row.setField(0, i); row.setField(1, "platform"); long timestamp = System.currentTimeMillis(); row.setField(2, timestamp); sourceContext.collect(row); Thread.sleep(1000L); i++; } } @Override public void cancel() { this.isCancel = true; } @Override public TypeInformation getProducedType() { return new RowTypeInfo(BasicTypeInfo.INT_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.LONG_TYPE_INFO); } } } ================================================ FILE: flink-examples-1.12/.gitignore ================================================ HELP.md target/ !.mvn/wrapper/maven-wrapper.jar !**/src/main/** #**/src/test/** .idea/ *.iml *.DS_Store ### IntelliJ IDEA ### .idea *.iws *.ipr ================================================ FILE: flink-examples-1.12/pom.xml ================================================ flink-study com.github.antigeneral 1.0-SNAPSHOT 4.0.0 com.github.antigeneral flink-examples-1.12 ================================================ FILE: flink-examples-1.12/src/main/java/flink/examples/datastream/_07/query/_04_window/_04_TumbleWindowTest.java ================================================ package flink.examples.datastream._07.query._04_window; import org.apache.flink.api.java.functions.KeySelector; import org.apache.flink.api.java.tuple.Tuple4; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.TimeCharacteristic; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.api.functions.source.SourceFunction; import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor; import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows; import org.apache.flink.streaming.api.windowing.time.Time; public class _04_TumbleWindowTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); env.setParallelism(1); env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); env.addSource(new UserDefinedSource()) .assignTimestampsAndWatermarks(new BoundedOutOfOrdernessTimestampExtractor>(Time.seconds(0)) { @Override public long extractTimestamp(Tuple4 element) { return element.f3; } }) .keyBy(new KeySelector, String>() { @Override public String getKey(Tuple4 row) throws Exception { return row.f0; } }) .window(TumblingEventTimeWindows.of(Time.seconds(10))) .sum(2) .print(); env.execute("1.12.1 DataStream TUMBLE WINDOW 案例"); } private static class UserDefinedSource implements SourceFunction> { private volatile boolean isCancel; @Override public void run(SourceContext> sourceContext) throws Exception { while (!this.isCancel) { sourceContext.collect(Tuple4.of("a", "b", 1, System.currentTimeMillis())); Thread.sleep(10L); } } @Override public void cancel() { this.isCancel = true; } } } ================================================ FILE: flink-examples-1.12/src/main/java/flink/examples/sql/_07/query/_04_window_agg/_04_TumbleWindowTest.java ================================================ package flink.examples.sql._07.query._04_window_agg; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; public class _04_TumbleWindowTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); String sourceSql = "CREATE TABLE source_table (\n" + " dim STRING,\n" + " user_id BIGINT,\n" + " price BIGINT,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.dim.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '100000',\n" + " 'fields.price.min' = '1',\n" + " 'fields.price.max' = '100000'\n" + ")"; String sinkSql = "CREATE TABLE sink_table (\n" + " dim STRING,\n" + " pv BIGINT,\n" + " sum_price BIGINT,\n" + " max_price BIGINT,\n" + " min_price BIGINT,\n" + " uv BIGINT,\n" + " window_start bigint\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ")"; String selectWhereSql = "insert into sink_table\n" + "select dim,\n" + " sum(bucket_pv) as pv,\n" + " sum(bucket_sum_price) as sum_price,\n" + " max(bucket_max_price) as max_price,\n" + " min(bucket_min_price) as min_price,\n" + " sum(bucket_uv) as uv,\n" + " max(window_start) as window_start\n" + "from (\n" + " select dim,\n" + " count(*) as bucket_pv,\n" + " sum(price) as bucket_sum_price,\n" + " max(price) as bucket_max_price,\n" + " min(price) as bucket_min_price,\n" + " count(distinct user_id) as bucket_uv,\n" + " cast(tumble_start(row_time, interval '1' minute) as bigint) * 1000 as window_start\n" + " from source_table\n" + " group by\n" + " mod(user_id, 1024),\n" + " dim,\n" + " tumble(row_time, interval '1' minute)\n" + ")\n" + "group by dim,\n" + " window_start"; tEnv.getConfig().getConfiguration().setString("pipeline.name", "1.12.1 TUMBLE WINDOW 案例"); tEnv.executeSql(sourceSql); tEnv.executeSql(sinkSql); tEnv.executeSql(selectWhereSql); } } ================================================ FILE: flink-examples-1.12/src/main/java/flink/examples/sql/_07/query/_04_window_agg/_04_TumbleWindowTest_GroupingWindowAggsHandler$59.java ================================================ package flink.examples.sql._07.query._04_window_agg; public final class _04_TumbleWindowTest_GroupingWindowAggsHandler$59 implements org.apache.flink.table.runtime.generated.NamespaceAggsHandleFunction { long agg0_count1; boolean agg0_count1IsNull; long agg1_sum; boolean agg1_sumIsNull; long agg2_max; boolean agg2_maxIsNull; long agg3_min; boolean agg3_minIsNull; long agg4_count; boolean agg4_countIsNull; private transient org.apache.flink.table.runtime.typeutils.ExternalSerializer externalSerializer$22; private transient org.apache.flink.table.runtime.typeutils.ExternalSerializer externalSerializer$23; private org.apache.flink.table.runtime.dataview.StateMapView distinctAcc_0_dataview; private org.apache.flink.table.data.binary.BinaryRawValueData distinctAcc_0_dataview_raw_value; private org.apache.flink.table.api.dataview.MapView distinct_view_0; org.apache.flink.table.data.GenericRowData acc$25 = new org.apache.flink.table.data.GenericRowData(6); org.apache.flink.table.data.GenericRowData acc$27 = new org.apache.flink.table.data.GenericRowData(6); org.apache.flink.table.data.GenericRowData aggValue$58 = new org.apache.flink.table.data.GenericRowData(9); private org.apache.flink.table.runtime.dataview.StateDataViewStore store; private org.apache.flink.table.runtime.operators.window.TimeWindow namespace; public _04_TumbleWindowTest_GroupingWindowAggsHandler$59(Object[] references) throws Exception { externalSerializer$22 = (((org.apache.flink.table.runtime.typeutils.ExternalSerializer) references[0])); externalSerializer$23 = (((org.apache.flink.table.runtime.typeutils.ExternalSerializer) references[1])); } private org.apache.flink.api.common.functions.RuntimeContext getRuntimeContext() { return store.getRuntimeContext(); } @Override public void open(org.apache.flink.table.runtime.dataview.StateDataViewStore store) throws Exception { this.store = store; distinctAcc_0_dataview = (org.apache.flink.table.runtime.dataview.StateMapView) store .getStateMapView("distinctAcc_0", true, externalSerializer$22, externalSerializer$23); distinctAcc_0_dataview_raw_value = org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(distinctAcc_0_dataview); distinct_view_0 = distinctAcc_0_dataview; } @Override public void accumulate(org.apache.flink.table.data.RowData accInput) throws Exception { boolean isNull$34; long result$35; long field$36; boolean isNull$36; boolean isNull$37; long result$38; boolean isNull$41; boolean result$42; boolean isNull$46; boolean result$47; long field$51; boolean isNull$51; boolean isNull$53; long result$54; isNull$51 = accInput.isNullAt(4); field$51 = -1L; if (!isNull$51) { field$51 = accInput.getLong(4); } isNull$36 = accInput.isNullAt(3); field$36 = -1L; if (!isNull$36) { field$36 = accInput.getLong(3); } isNull$34 = agg0_count1IsNull || false; result$35 = -1L; if (!isNull$34) { result$35 = (long) (agg0_count1 + ((long) 1L)); } agg0_count1 = result$35; ; agg0_count1IsNull = isNull$34; long result$40 = -1L; boolean isNull$40; if (isNull$36) { isNull$40 = agg1_sumIsNull; if (!isNull$40) { result$40 = agg1_sum; } } else { long result$39 = -1L; boolean isNull$39; if (agg1_sumIsNull) { isNull$39 = isNull$36; if (!isNull$39) { result$39 = field$36; } } else { isNull$37 = agg1_sumIsNull || isNull$36; result$38 = -1L; if (!isNull$37) { result$38 = (long) (agg1_sum + field$36); } isNull$39 = isNull$37; if (!isNull$39) { result$39 = result$38; } } isNull$40 = isNull$39; if (!isNull$40) { result$40 = result$39; } } agg1_sum = result$40; ; agg1_sumIsNull = isNull$40; long result$45 = -1L; boolean isNull$45; if (isNull$36) { isNull$45 = agg2_maxIsNull; if (!isNull$45) { result$45 = agg2_max; } } else { long result$44 = -1L; boolean isNull$44; if (agg2_maxIsNull) { isNull$44 = isNull$36; if (!isNull$44) { result$44 = field$36; } } else { isNull$41 = isNull$36 || agg2_maxIsNull; result$42 = false; if (!isNull$41) { result$42 = field$36 > agg2_max; } long result$43 = -1L; boolean isNull$43; if (result$42) { isNull$43 = isNull$36; if (!isNull$43) { result$43 = field$36; } } else { isNull$43 = agg2_maxIsNull; if (!isNull$43) { result$43 = agg2_max; } } isNull$44 = isNull$43; if (!isNull$44) { result$44 = result$43; } } isNull$45 = isNull$44; if (!isNull$45) { result$45 = result$44; } } agg2_max = result$45; ; agg2_maxIsNull = isNull$45; long result$50 = -1L; boolean isNull$50; if (isNull$36) { isNull$50 = agg3_minIsNull; if (!isNull$50) { result$50 = agg3_min; } } else { long result$49 = -1L; boolean isNull$49; if (agg3_minIsNull) { isNull$49 = isNull$36; if (!isNull$49) { result$49 = field$36; } } else { isNull$46 = isNull$36 || agg3_minIsNull; result$47 = false; if (!isNull$46) { result$47 = field$36 < agg3_min; } long result$48 = -1L; boolean isNull$48; if (result$47) { isNull$48 = isNull$36; if (!isNull$48) { result$48 = field$36; } } else { isNull$48 = agg3_minIsNull; if (!isNull$48) { result$48 = agg3_min; } } isNull$49 = isNull$48; if (!isNull$49) { result$49 = result$48; } } isNull$50 = isNull$49; if (!isNull$50) { result$50 = result$49; } } agg3_min = result$50; ; agg3_minIsNull = isNull$50; Long distinctKey$52 = (Long) field$51; if (isNull$51) { distinctKey$52 = null; } Long value$56 = (Long) distinct_view_0.get(distinctKey$52); if (value$56 == null) { value$56 = 0L; } boolean is_distinct_value_changed_0 = false; long existed$57 = ((long) value$56) & (1L << 0); if (existed$57 == 0) { // not existed value$56 = ((long) value$56) | (1L << 0); is_distinct_value_changed_0 = true; long result$55 = -1L; boolean isNull$55; if (isNull$51) { isNull$55 = agg4_countIsNull; if (!isNull$55) { result$55 = agg4_count; } } else { isNull$53 = agg4_countIsNull || false; result$54 = -1L; if (!isNull$53) { result$54 = (long) (agg4_count + ((long) 1L)); } isNull$55 = isNull$53; if (!isNull$55) { result$55 = result$54; } } agg4_count = result$55; ; agg4_countIsNull = isNull$55; } if (is_distinct_value_changed_0) { distinct_view_0.put(distinctKey$52, value$56); } } @Override public void retract(org.apache.flink.table.data.RowData retractInput) throws Exception { throw new RuntimeException( "This function not require retract method, but the retract method is called."); } @Override public void merge(org.apache.flink.table.runtime.operators.window.TimeWindow ns, org.apache.flink.table.data.RowData otherAcc) throws Exception { namespace = (org.apache.flink.table.runtime.operators.window.TimeWindow) ns; throw new RuntimeException("This function not require merge method, but the merge method is called."); } @Override public void setAccumulators(org.apache.flink.table.runtime.operators.window.TimeWindow ns, org.apache.flink.table.data.RowData acc) throws Exception { namespace = (org.apache.flink.table.runtime.operators.window.TimeWindow) ns; long field$28; boolean isNull$28; long field$29; boolean isNull$29; long field$30; boolean isNull$30; long field$31; boolean isNull$31; long field$32; boolean isNull$32; org.apache.flink.table.data.binary.BinaryRawValueData field$33; boolean isNull$33; isNull$32 = acc.isNullAt(4); field$32 = -1L; if (!isNull$32) { field$32 = acc.getLong(4); } isNull$28 = acc.isNullAt(0); field$28 = -1L; if (!isNull$28) { field$28 = acc.getLong(0); } isNull$29 = acc.isNullAt(1); field$29 = -1L; if (!isNull$29) { field$29 = acc.getLong(1); } isNull$31 = acc.isNullAt(3); field$31 = -1L; if (!isNull$31) { field$31 = acc.getLong(3); } // when namespace is null, the dataview is used in heap, no key and namespace set if (namespace != null) { distinctAcc_0_dataview.setCurrentNamespace(namespace); distinct_view_0 = distinctAcc_0_dataview; } else { isNull$33 = acc.isNullAt(5); field$33 = null; if (!isNull$33) { field$33 = ((org.apache.flink.table.data.binary.BinaryRawValueData) acc.getRawValue(5)); } distinct_view_0 = (org.apache.flink.table.api.dataview.MapView) field$33.getJavaObject(); } isNull$30 = acc.isNullAt(2); field$30 = -1L; if (!isNull$30) { field$30 = acc.getLong(2); } agg0_count1 = field$28; ; agg0_count1IsNull = isNull$28; agg1_sum = field$29; ; agg1_sumIsNull = isNull$29; agg2_max = field$30; ; agg2_maxIsNull = isNull$30; agg3_min = field$31; ; agg3_minIsNull = isNull$31; agg4_count = field$32; ; agg4_countIsNull = isNull$32; } @Override public org.apache.flink.table.data.RowData getAccumulators() throws Exception { acc$27 = new org.apache.flink.table.data.GenericRowData(6); if (agg0_count1IsNull) { acc$27.setField(0, null); } else { acc$27.setField(0, agg0_count1); } if (agg1_sumIsNull) { acc$27.setField(1, null); } else { acc$27.setField(1, agg1_sum); } if (agg2_maxIsNull) { acc$27.setField(2, null); } else { acc$27.setField(2, agg2_max); } if (agg3_minIsNull) { acc$27.setField(3, null); } else { acc$27.setField(3, agg3_min); } if (agg4_countIsNull) { acc$27.setField(4, null); } else { acc$27.setField(4, agg4_count); } org.apache.flink.table.data.binary.BinaryRawValueData distinct_acc$26 = org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(distinct_view_0); if (false) { acc$27.setField(5, null); } else { acc$27.setField(5, distinct_acc$26); } return acc$27; } @Override public org.apache.flink.table.data.RowData createAccumulators() throws Exception { acc$25 = new org.apache.flink.table.data.GenericRowData(6); if (false) { acc$25.setField(0, null); } else { acc$25.setField(0, ((long) 0L)); } if (true) { acc$25.setField(1, null); } else { acc$25.setField(1, ((long) -1L)); } if (true) { acc$25.setField(2, null); } else { acc$25.setField(2, ((long) -1L)); } if (true) { acc$25.setField(3, null); } else { acc$25.setField(3, ((long) -1L)); } if (false) { acc$25.setField(4, null); } else { acc$25.setField(4, ((long) 0L)); } org.apache.flink.table.api.dataview.MapView mapview$24 = new org.apache.flink.table.api.dataview.MapView(); org.apache.flink.table.data.binary.BinaryRawValueData distinct_acc$24 = org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(mapview$24); if (false) { acc$25.setField(5, null); } else { acc$25.setField(5, distinct_acc$24); } return acc$25; } @Override public org.apache.flink.table.data.RowData getValue(org.apache.flink.table.runtime.operators.window.TimeWindow ns) throws Exception { namespace = (org.apache.flink.table.runtime.operators.window.TimeWindow) ns; aggValue$58 = new org.apache.flink.table.data.GenericRowData(9); if (agg0_count1IsNull) { aggValue$58.setField(0, null); } else { aggValue$58.setField(0, agg0_count1); } if (agg1_sumIsNull) { aggValue$58.setField(1, null); } else { aggValue$58.setField(1, agg1_sum); } if (agg2_maxIsNull) { aggValue$58.setField(2, null); } else { aggValue$58.setField(2, agg2_max); } if (agg3_minIsNull) { aggValue$58.setField(3, null); } else { aggValue$58.setField(3, agg3_min); } if (agg4_countIsNull) { aggValue$58.setField(4, null); } else { aggValue$58.setField(4, agg4_count); } if (false) { aggValue$58.setField(5, null); } else { aggValue$58.setField(5, org.apache.flink.table.data.TimestampData.fromEpochMillis(namespace.getStart())); } if (false) { aggValue$58.setField(6, null); } else { aggValue$58.setField(6, org.apache.flink.table.data.TimestampData.fromEpochMillis(namespace.getEnd())); } if (false) { aggValue$58.setField(7, null); } else { aggValue$58.setField(7, org.apache.flink.table.data.TimestampData.fromEpochMillis(namespace.getEnd() - 1)); } if (true) { aggValue$58.setField(8, null); } else { aggValue$58.setField(8, org.apache.flink.table.data.TimestampData.fromEpochMillis(-1L)); } return aggValue$58; } @Override public void cleanup(org.apache.flink.table.runtime.operators.window.TimeWindow ns) throws Exception { namespace = (org.apache.flink.table.runtime.operators.window.TimeWindow) ns; distinctAcc_0_dataview.setCurrentNamespace(namespace); distinctAcc_0_dataview.clear(); } @Override public void close() throws Exception { } } ================================================ FILE: flink-examples-1.12/src/main/java/flink/examples/sql/_07/query/_04_window_agg/_04_TumbleWindowTest_KeyProjection$69.java ================================================ package flink.examples.sql._07.query._04_window_agg; public final class _04_TumbleWindowTest_KeyProjection$69 implements org.apache.flink.table.runtime.generated.Projection { org.apache.flink.table.data.binary.BinaryRowData out = new org.apache.flink.table.data.binary.BinaryRowData(2); org.apache.flink.table.data.writer.BinaryRowWriter outWriter = new org.apache.flink.table.data.writer.BinaryRowWriter(out); public _04_TumbleWindowTest_KeyProjection$69(Object[] references) throws Exception { } @Override public org.apache.flink.table.data.binary.BinaryRowData apply(org.apache.flink.table.data.RowData in1) { int field$70; boolean isNull$70; org.apache.flink.table.data.binary.BinaryStringData field$71; boolean isNull$71; outWriter.reset(); isNull$70 = in1.isNullAt(0); field$70 = -1; if (!isNull$70) { field$70 = in1.getInt(0); } if (isNull$70) { outWriter.setNullAt(0); } else { outWriter.writeInt(0, field$70); } isNull$71 = in1.isNullAt(1); field$71 = org.apache.flink.table.data.binary.BinaryStringData.EMPTY_UTF8; if (!isNull$71) { field$71 = ((org.apache.flink.table.data.binary.BinaryStringData) in1.getString(1)); } if (isNull$71) { outWriter.setNullAt(1); } else { outWriter.writeString(1, field$71); } outWriter.complete(); return out; } } ================================================ FILE: flink-examples-1.12/src/main/java/flink/examples/sql/_07/query/_04_window_agg/_04_TumbleWindowTest_WatermarkGenerator$6.java ================================================ package flink.examples.sql._07.query._04_window_agg; public final class _04_TumbleWindowTest_WatermarkGenerator$6 extends org.apache.flink.table.runtime.generated.WatermarkGenerator { public _04_TumbleWindowTest_WatermarkGenerator$6(Object[] references) throws Exception { } @Override public void open(org.apache.flink.configuration.Configuration parameters) throws Exception { } @Override public Long currentWatermark(org.apache.flink.table.data.RowData row) throws Exception { org.apache.flink.table.data.TimestampData field$7; boolean isNull$7; boolean isNull$8; org.apache.flink.table.data.TimestampData result$9; isNull$7 = row.isNullAt(3); field$7 = null; if (!isNull$7) { field$7 = row.getTimestamp(3, 3); } isNull$8 = isNull$7 || false; result$9 = null; if (!isNull$8) { result$9 = org.apache.flink.table.data.TimestampData .fromEpochMillis(field$7.getMillisecond() - ((long) 5000L), field$7.getNanoOfMillisecond()); } if (isNull$8) { return null; } else { return result$9.getMillisecond(); } } @Override public void close() throws Exception { } } ================================================ FILE: flink-examples-1.13/.gitignore ================================================ HELP.md target/ !.mvn/wrapper/maven-wrapper.jar !**/src/main/** #**/src/test/** .idea/ *.iml *.DS_Store ### IntelliJ IDEA ### .idea *.iws *.ipr ================================================ FILE: flink-examples-1.13/pom.xml ================================================ flink-study com.github.antigeneral 1.0-SNAPSHOT 4.0.0 com.github.antigeneral flink-examples-1.13 kr.motd.maven os-maven-plugin ${os-maven-plugin.version} org.apache.maven.plugins maven-compiler-plugin org.xolstice.maven.plugins protobuf-maven-plugin com.google.protobuf protobuf-java org.apache.flink flink-connector-hive_2.11 org.apache.hadoop hadoop-common 3.1.0 compile slf4j-log4j12 org.slf4j commons-logging commmons-logging servlet-api javax.servlet true org.apache.hive hive-exec log4j-slf4j-impl org.apache.logging.log4j guava com.google.guava org.apache.hadoop hadoop-mapreduce-client-core 3.1.0 slf4j-log4j12 org.slf4j jersey-client com.sun.jersey jersey-server com.sun.jersey jersey-servlet com.sun.jersey jersey-core com.sun.jersey jersey-json com.sun.jersey guava com.google.guava com.twitter chill-protobuf com.esotericsoftware.kryo kryo junit junit test net.java.dev.javacc javacc org.apache.httpcomponents httpclient 4.5.10 compile org.apache.flink flink-statebackend-rocksdb_2.11 ${flink.version} joda-time joda-time provided true com.github.rholder guava-retrying guava com.google.guava org.projectlombok lombok org.apache.flink flink-java ${flink.version} org.apache.flink flink-streaming-java_2.11 ${flink.version} flink-shaded-zookeeper-3 org.apache.flink flink-shaded-guava org.apache.flink org.apache.flink flink-clients_2.11 ${flink.version} org.mvel mvel2 redis.clients jedis org.apache.curator curator-framework org.apache.curator curator-recipes org.apache.kafka kafka-clients org.codehaus.groovy groovy org.codehaus.groovy groovy-ant org.codehaus.groovy groovy-cli-commons org.codehaus.groovy groovy-cli-picocli org.codehaus.groovy groovy-console org.codehaus.groovy groovy-datetime org.codehaus.groovy groovy-docgenerator org.codehaus.groovy groovy-groovydoc org.codehaus.groovy groovy-groovysh org.codehaus.groovy groovy-jmx org.codehaus.groovy groovy-json org.codehaus.groovy groovy-jsr223 org.codehaus.groovy groovy-macro org.codehaus.groovy groovy-nio org.codehaus.groovy groovy-servlet org.codehaus.groovy groovy-sql org.codehaus.groovy groovy-swing org.codehaus.groovy groovy-templates org.codehaus.groovy groovy-test org.codehaus.groovy groovy-test-junit5 org.codehaus.groovy groovy-testng org.codehaus.groovy groovy-xml org.apache.flink flink-streaming-scala_2.11 ${flink.version} mysql mysql-connector-java ${mysql.version} com.google.code.gson gson org.apache.flink flink-table-common ${flink.version} compile org.apache.flink flink-table-api-java ${flink.version} compile org.apache.flink flink-table-api-java-bridge_2.11 ${flink.version} compile org.apache.flink flink-table-planner-blink_2.11 ${flink.version} compile org.apache.flink flink-connector-jdbc_2.11 ${flink.version} org.apache.flink flink-connector-hbase-2.2_2.11 ${flink.version} hbase-shaded-miscellaneous org.apache.hbase.thirdparty org.apache.flink flink-json ${flink.version} org.apache.bahir flink-connector-redis_2.10 1.0 org.apache.flink flink-connector-kafka_2.12 ch.qos.logback logback-classic compile org.slf4j slf4j-log4j12 org.apache.flink flink-runtime-web_2.11 ${flink.version} com.fasterxml.jackson.core jackson-databind com.fasterxml.jackson.core jackson-core com.fasterxml.jackson.core jackson-annotations com.fasterxml.jackson.module jackson-module-kotlin com.fasterxml.jackson.module jackson-module-parameter-names com.fasterxml.jackson.datatype jackson-datatype-guava guava com.google.guava com.hubspot.jackson jackson-datatype-protobuf guava com.google.guava org.apache.calcite calcite-core guava com.google.guava com.google.guava guava ================================================ FILE: flink-examples-1.13/src/main/java/flink/core/source/JaninoUtils.java ================================================ package flink.core.source; import org.codehaus.janino.SimpleCompiler; import lombok.extern.slf4j.Slf4j; @Slf4j public class JaninoUtils { private static final SimpleCompiler COMPILER = new SimpleCompiler(); static { COMPILER.setParentClassLoader(JaninoUtils.class.getClassLoader()); } public static Class genClass(String className, String code, Class clazz) throws Exception { COMPILER.cook(code); System.out.println("生成的代码:\n" + code); return (Class) COMPILER.getClassLoader().loadClass(className); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/core/source/SourceFactory.java ================================================ package flink.core.source; import java.io.IOException; import org.apache.flink.api.common.serialization.DeserializationSchema; import org.apache.flink.api.common.serialization.SerializationSchema; import com.google.protobuf.GeneratedMessageV3; import flink.examples.datastream._04.keyed_co_process.protobuf.Source; import lombok.SneakyThrows; public class SourceFactory { public static SerializationSchema getProtobufSer(Class clazz) { return new SerializationSchema() { @Override public byte[] serialize(Message element) { return element.toByteArray(); } }; } @SneakyThrows public static DeserializationSchema getProtobufDerse(Class clazz) { String code = TEMPLATE.replaceAll("\\$\\{ProtobufClassName}", clazz.getName()) .replaceAll("\\$\\{SimpleProtobufName}", clazz.getSimpleName()); String className = clazz.getSimpleName() + "_DeserializationSchema"; Class deClass = JaninoUtils.genClass(className, code, DeserializationSchema.class); return deClass.newInstance(); } private static final String TEMPLATE = "public class ${SimpleProtobufName}_DeserializationSchema extends org.apache.flink.api.common" + ".serialization.AbstractDeserializationSchema<${ProtobufClassName}> {\n" + "\n" + " public ${SimpleProtobufName}_DeserializationSchema() {\n" + " super(${ProtobufClassName}.class);\n" + " }\n" + "\n" + " @Override\n" + " public ${ProtobufClassName} deserialize(byte[] message) throws java.io.IOException {\n" + " return ${ProtobufClassName}.parseFrom(message);\n" + " }\n" + "}"; public static void main(String[] args) throws IOException { System.out.println(SourceFactory.class.getName()); System.out.println(SourceFactory.class.getCanonicalName()); System.out.println(SourceFactory.class.getSimpleName()); System.out.println(SourceFactory.class.getTypeName()); DeserializationSchema ds = getProtobufDerse(Source.class); Source s = Source.newBuilder() .addNames("antigeneral") .build(); Source s1 = ds.deserialize(s.toByteArray()); System.out.println(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/FlinkEnvUtils.java ================================================ package flink.examples; import java.io.IOException; import java.util.Optional; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.contrib.streaming.state.PredefinedOptions; import org.apache.flink.contrib.streaming.state.RocksDBStateBackend; import org.apache.flink.runtime.state.StateBackend; import org.apache.flink.runtime.state.filesystem.FsStateBackend; import org.apache.flink.runtime.state.memory.MemoryStateBackend; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.SqlDialect; import org.apache.flink.table.api.TableEnvironment; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; import org.apache.flink.table.catalog.hive.HiveCatalog; import org.apache.flink.table.module.CoreModule; import flink.examples.sql._08.batch._03_hive_udf.HiveModuleV2; import lombok.Builder; import lombok.Data; public class FlinkEnvUtils { private static final boolean ENABLE_INCREMENTAL_CHECKPOINT = true; private static final int NUMBER_OF_TRANSFER_THREADS = 3; /** * 设置状态后端为 RocksDBStateBackend * * @param env env */ public static void setRocksDBStateBackend(StreamExecutionEnvironment env) throws IOException { setCheckpointConfig(env); RocksDBStateBackend rocksDBStateBackend = new RocksDBStateBackend( "file:///Users/flink/checkpoints", ENABLE_INCREMENTAL_CHECKPOINT); rocksDBStateBackend.setNumberOfTransferThreads(NUMBER_OF_TRANSFER_THREADS); rocksDBStateBackend.setPredefinedOptions(PredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM); env.setStateBackend((StateBackend) rocksDBStateBackend); } /** * 设置状态后端为 FsStateBackend * * @param env env */ public static void setFsStateBackend(StreamExecutionEnvironment env) throws IOException { setCheckpointConfig(env); FsStateBackend fsStateBackend = new FsStateBackend("file:///Users/flink/checkpoints"); env.setStateBackend((StateBackend) fsStateBackend); } /** * 设置状态后端为 MemoryStateBackend * * @param env env */ public static void setMemoryStateBackend(StreamExecutionEnvironment env) throws IOException { setCheckpointConfig(env); env.setStateBackend((StateBackend) new MemoryStateBackend()); } /** * Checkpoint 参数相关配置,but 不设置 StateBackend,即:读取 flink-conf.yaml 文件的配置 * * @param env env */ public static void setCheckpointConfig(StreamExecutionEnvironment env) throws IOException { env.getCheckpointConfig().setCheckpointTimeout(TimeUnit.MINUTES.toMillis(3)); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(180 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); Configuration configuration = new Configuration(); configuration.setString("state.checkpoints.num-retained", "3"); env.configure(configuration, Thread.currentThread().getContextClassLoader()); env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); } public static FlinkEnv getStreamTableEnv(String[] args) throws IOException { ParameterTool parameterTool = ParameterTool.fromArgs(args); Configuration configuration = Configuration.fromMap(parameterTool.toMap()); configuration.setString("rest.flamegraph.enabled", "true"); StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(configuration); String stateBackend = parameterTool.get("state.backend", "rocksdb"); env.setParallelism(1); if ("rocksdb".equals(stateBackend)) { setRocksDBStateBackend(env); } else if ("filesystem".equals(stateBackend)) { setFsStateBackend(env); } else if ("jobmanager".equals(stateBackend)) { setMemoryStateBackend(env); } env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode() .build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); tEnv.getConfig().addConfiguration(configuration); FlinkEnv flinkEnv = FlinkEnv .builder() .streamExecutionEnvironment(env) .streamTableEnvironment(tEnv) .build(); initHiveEnv(flinkEnv, parameterTool); return flinkEnv; } /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ private static void initHiveEnv(FlinkEnv flinkEnv, ParameterTool parameterTool) { String defaultDatabase = "default"; String hiveConfDir = "/usr/local/Cellar/hive/3.1.2/libexec/conf"; boolean enableHiveCatalog = parameterTool.getBoolean("enable.hive.catalog", false); if (enableHiveCatalog) { HiveCatalog hive = new HiveCatalog("default", defaultDatabase, hiveConfDir); Optional.ofNullable(flinkEnv.streamTEnv()) .ifPresent(s -> s.registerCatalog("default", hive)); Optional.ofNullable(flinkEnv.batchTEnv()) .ifPresent(s -> s.registerCatalog("default", hive)); // set the HiveCatalog as the current catalog of the session Optional.ofNullable(flinkEnv.streamTEnv()) .ifPresent(s -> s.useCatalog("default")); Optional.ofNullable(flinkEnv.batchTEnv()) .ifPresent(s -> s.useCatalog("default")); } boolean enableHiveDialect = parameterTool.getBoolean("enable.hive.dialect", false); if (enableHiveDialect) { Optional.ofNullable(flinkEnv.streamTEnv()) .ifPresent(s -> s.getConfig().setSqlDialect(SqlDialect.HIVE)); Optional.ofNullable(flinkEnv.batchTEnv()) .ifPresent(s -> s.getConfig().setSqlDialect(SqlDialect.HIVE)); } boolean enableHiveModuleV2 = parameterTool.getBoolean("enable.hive.module.v2", true); if (enableHiveModuleV2) { String version = "3.1.2"; HiveModuleV2 hiveModuleV2 = new HiveModuleV2(version); final boolean enableHiveModuleLoadFirst = parameterTool.getBoolean("enable.hive.module.load-first", true); Optional.ofNullable(flinkEnv.streamTEnv()) .ifPresent(s -> { if (enableHiveModuleLoadFirst) { s.unloadModule("core"); s.loadModule("default", hiveModuleV2); s.loadModule("core", CoreModule.INSTANCE); } else { s.loadModule("default", hiveModuleV2); } }); Optional.ofNullable(flinkEnv.batchTEnv()) .ifPresent(s -> { if (enableHiveModuleLoadFirst) { s.unloadModule("core"); s.loadModule("default", hiveModuleV2); s.loadModule("core", CoreModule.INSTANCE); } else { s.loadModule("default", hiveModuleV2); } }); flinkEnv.setHiveModuleV2(hiveModuleV2); } } public static FlinkEnv getBatchTableEnv(String[] args) throws IOException { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig() .enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inBatchMode() .build(); TableEnvironment tEnv = TableEnvironment.create(settings); FlinkEnv flinkEnv = FlinkEnv .builder() .streamExecutionEnvironment(env) .tableEnvironment(tEnv) .build(); initHiveEnv(flinkEnv, parameterTool); return flinkEnv; } @Builder @Data public static class FlinkEnv { private StreamExecutionEnvironment streamExecutionEnvironment; private StreamTableEnvironment streamTableEnvironment; private TableEnvironment tableEnvironment; private HiveModuleV2 hiveModuleV2; public StreamTableEnvironment streamTEnv() { return this.streamTableEnvironment; } public TableEnvironment batchTEnv() { return this.tableEnvironment; } public StreamExecutionEnvironment env() { return this.streamExecutionEnvironment; } public HiveModuleV2 hiveModuleV2() { return this.hiveModuleV2; } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/JacksonUtils.java ================================================ package flink.examples; import static com.fasterxml.jackson.core.JsonParser.Feature.ALLOW_COMMENTS; import static com.fasterxml.jackson.core.JsonParser.Feature.ALLOW_UNQUOTED_CONTROL_CHARS; import static com.fasterxml.jackson.databind.DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES; import java.util.List; import java.util.Map; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.JavaType; import com.fasterxml.jackson.databind.ObjectMapper; import com.hubspot.jackson.datatype.protobuf.ProtobufModule; public class JacksonUtils { private static ObjectMapper mapper = new ObjectMapper(); static { mapper.registerModule(new ProtobufModule()); mapper.disable(FAIL_ON_UNKNOWN_PROPERTIES); mapper.enable(ALLOW_UNQUOTED_CONTROL_CHARS); mapper.enable(ALLOW_COMMENTS); } public static String bean2Json(Object data) { try { String result = mapper.writeValueAsString(data); return result; } catch (JsonProcessingException e) { e.printStackTrace(); } return null; } public static T json2Bean(String jsonData, Class beanType) { try { T result = mapper.readValue(jsonData, beanType); return result; } catch (Exception e) { e.printStackTrace(); } return null; } public static List json2List(String jsonData, Class beanType) { JavaType javaType = mapper.getTypeFactory().constructParametricType(List.class, beanType); try { List resultList = mapper.readValue(jsonData, javaType); return resultList; } catch (Exception e) { e.printStackTrace(); } return null; } public static Map json2Map(String jsonData, Class keyType, Class valueType) { JavaType javaType = mapper.getTypeFactory().constructMapType(Map.class, keyType, valueType); try { Map resultMap = mapper.readValue(jsonData, javaType); return resultMap; } catch (Exception e) { e.printStackTrace(); } return null; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_01/bytedance/split/codegen/JaninoUtils.java ================================================ package flink.examples.datastream._01.bytedance.split.codegen; import org.codehaus.janino.SimpleCompiler; import flink.examples.datastream._01.bytedance.split.model.Evaluable; import lombok.extern.slf4j.Slf4j; @Slf4j public class JaninoUtils { private static final SimpleCompiler COMPILER = new SimpleCompiler(); static { COMPILER.setParentClassLoader(JaninoUtils.class.getClassLoader()); } public static Class genCodeAndGetClazz(Long id, String topic, String condition) throws Exception { String className = "CodeGen_" + topic + "_" + id; String code = "import org.apache.commons.lang3.ArrayUtils;\n" + "\n" + "public class " + className + " implements flink.examples.datastream._01.bytedance.split.model.Evaluable {\n" + " \n" + " @Override\n" + " public boolean eval(flink.examples.datastream._01.bytedance.split.model.ClientLogSource clientLogSource) {\n" + " \n" + " return " + condition + ";\n" + " }\n" + "}\n"; COMPILER.cook(code); System.out.println("生成的代码:\n" + code); return (Class) COMPILER.getClassLoader().loadClass(className); } public static void main(String[] args) throws Exception { Class c = genCodeAndGetClazz(1L, "topic", "1==1"); System.out.println(1); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_01/bytedance/split/codegen/benchmark/Benchmark.java ================================================ package flink.examples.datastream._01.bytedance.split.codegen.benchmark; import org.codehaus.groovy.control.CompilerConfiguration; import flink.examples.datastream._01.bytedance.split.model.ClientLogSource; import flink.examples.datastream._01.bytedance.split.model.DynamicProducerRule; import groovy.lang.GroovyClassLoader; import groovy.lang.GroovyObject; import lombok.extern.slf4j.Slf4j; @Slf4j public class Benchmark { private static void benchmarkForJava() { ClientLogSource s = ClientLogSource.builder().id(1).build(); long start2 = System.currentTimeMillis(); for (int i = 0; i < 50000000; i++) { boolean b = String.valueOf(s.getId()).equals("1"); } long end2 = System.currentTimeMillis(); System.out.println("java:" + (end2 - start2) + " ms"); } public static void benchmarkForGroovyClassLoader() { CompilerConfiguration config = new CompilerConfiguration(); config.setSourceEncoding("UTF-8"); // 设置该GroovyClassLoader的父ClassLoader为当前线程的加载器(默认) GroovyClassLoader groovyClassLoader = new GroovyClassLoader(Thread.currentThread().getContextClassLoader(), config); String groovyCode = "class demo_002 {\n" + " boolean eval(flink.examples.datastream._01.bytedance.split.model.SourceModel sourceModel) {\n" + " return String.valueOf(sourceModel.getId()).equals(\"1\");\n" + " }\n" + "}"; try { // 获得GroovyShell_2加载后的class Class groovyClass = groovyClassLoader.parseClass(groovyCode); // 获得GroovyShell_2的实例 GroovyObject groovyObject = (GroovyObject) groovyClass.newInstance(); ClientLogSource s = ClientLogSource.builder().id(1).build(); long start1 = System.currentTimeMillis(); for (int i = 0; i < 50000000; i++) { Object methodResult = groovyObject.invokeMethod("eval", s); } long end1 = System.currentTimeMillis(); System.out.println("groovy:" + (end1 - start1) + " ms"); } catch (Exception e) { e.getStackTrace(); } } public static void benchmarkForJanino() { String condition = "String.valueOf(sourceModel.getId()).equals(\"1\")"; DynamicProducerRule dynamicProducerRule = DynamicProducerRule .builder() .condition(condition) .targetTopic("t") .build(); dynamicProducerRule.init(1L); ClientLogSource s = ClientLogSource.builder().id(1).build(); long start2 = System.currentTimeMillis(); for (int i = 0; i < 50000000; i++) { boolean b = dynamicProducerRule.eval(s); } long end2 = System.currentTimeMillis(); System.out.println("janino:" + (end2 - start2) + " ms"); } public static void main(String[] args) throws Exception { for (int i = 0; i < 10; i++) { benchmarkForJava(); // janino benchmarkForJanino(); // groovy classloader benchmarkForGroovyClassLoader(); System.out.println(); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_01/bytedance/split/job/SplitExampleJob.java ================================================ package flink.examples.datastream._01.bytedance.split.job; import java.util.Date; import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; import org.apache.commons.lang3.RandomUtils; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.TimeCharacteristic; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.api.functions.ProcessFunction; import org.apache.flink.streaming.api.functions.source.SourceFunction; import org.apache.flink.util.Collector; import flink.examples.datastream._01.bytedance.split.kafka.KafkaProducerCenter; import flink.examples.datastream._01.bytedance.split.model.ClientLogSink; import flink.examples.datastream._01.bytedance.split.model.ClientLogSource; import flink.examples.datastream._01.bytedance.split.model.DynamicProducerRule; import flink.examples.datastream._01.bytedance.split.zkconfigcenter.ZkBasedConfigCenter; /** * zk:https://www.jianshu.com/p/5491d16e6abd * kafka:https://www.jianshu.com/p/dd2578d47ff6 */ public class SplitExampleJob { public static void main(String[] args) throws Exception { ParameterTool parameters = ParameterTool.fromArgs(args); StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); // 其他参数设置 env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameters); env.setMaxParallelism(2); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); env.setParallelism(1); env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime); env.addSource(new UserDefinedSource()) .process(new ProcessFunction() { private ZkBasedConfigCenter zkBasedConfigCenter; private KafkaProducerCenter kafkaProducerCenter; @Override public void open(Configuration parameters) throws Exception { super.open(parameters); this.zkBasedConfigCenter = ZkBasedConfigCenter.getInstance(); this.kafkaProducerCenter = KafkaProducerCenter.getInstance(); } @Override public void processElement(ClientLogSource clientLogSource, Context context, Collector collector) throws Exception { this.zkBasedConfigCenter.getMap().forEach(new BiConsumer() { @Override public void accept(Long id, DynamicProducerRule dynamicProducerRule) { if (dynamicProducerRule.eval(clientLogSource)) { kafkaProducerCenter.send(dynamicProducerRule.getTargetTopic(), clientLogSource.toString()); } } }); } @Override public void close() throws Exception { super.close(); this.zkBasedConfigCenter.close(); this.kafkaProducerCenter.close(); } }); env.execute(); } private static class UserDefinedSource implements SourceFunction { private volatile boolean isCancel; @Override public void run(SourceContext sourceContext) throws Exception { while (!this.isCancel) { sourceContext.collect( ClientLogSource .builder() .id(RandomUtils.nextInt(0, 10)) .price(RandomUtils.nextInt(0, 100)) .timestamp(System.currentTimeMillis()) .date(new Date().toString()) .build() ); Thread.sleep(1000L); } } @Override public void cancel() { this.isCancel = true; } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_01/bytedance/split/job/start.sh ================================================ # 1.kafka 初始化 cd /kafka-bin-目录 # 启动 kafka server ./kafka-server-start /usr/local/etc/kafka/server.properties & # 创建 3 个 topic kafka-topics --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic tuzisir kafka-topics --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic tuzisir1 kafka-topics --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic tuzisir2 # 启动一个 console consumer kafka-console-consumer --bootstrap-server localhost:9092 --topic tuzisir --from-beginning # 2.zk 初始化 cd /zk-bin-目录 zkServer start zkCli -server 127.0.0.1:2181 # zkCli 中需要执行的命令 create /kafka-config {"1":{"condition":"1==1","targetTopic":"tuzisir1"},"2":{"condition":"1!=1","targetTopic":"tuzisir2"}} get /kafka-config ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_01/bytedance/split/kafka/KafkaProducerCenter.java ================================================ package flink.examples.datastream._01.bytedance.split.kafka; import java.util.Properties; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.function.BiConsumer; import java.util.function.Function; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.clients.producer.RecordMetadata; import flink.examples.datastream._01.bytedance.split.zkconfigcenter.ZkBasedConfigCenter; public class KafkaProducerCenter { private final ConcurrentMap> producerConcurrentMap = new ConcurrentHashMap<>(); private KafkaProducerCenter() { ZkBasedConfigCenter.getInstance() .getMap() .values() .forEach(d -> getProducer(d.getTargetTopic())); } private static class Factory { private static final KafkaProducerCenter INSTANCE = new KafkaProducerCenter(); } public static KafkaProducerCenter getInstance() { return Factory.INSTANCE; } private Producer getProducer(String topicName) { Producer producer = producerConcurrentMap.get(topicName); if (null != producer) { return producer; } return producerConcurrentMap.computeIfAbsent(topicName, new Function>() { @Override public Producer apply(String topicName) { Properties props = new Properties(); props.put("bootstrap.servers", "localhost:9092"); props.put("acks", "all"); props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); return new KafkaProducer<>(props); } }); } public void send(String topicName, String message) { final ProducerRecord record = new ProducerRecord<>(topicName, "", message); try { RecordMetadata metadata = getProducer(topicName).send(record).get(); } catch (Exception e) { throw new RuntimeException(e); } } public void close() { this.producerConcurrentMap.forEach(new BiConsumer>() { @Override public void accept(String s, Producer stringStringProducer) { stringStringProducer.flush(); stringStringProducer.close(); } }); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_01/bytedance/split/kafka/demo/Application.java ================================================ package flink.examples.datastream._01.bytedance.split.kafka.demo; public class Application { private String topicName = "tuzisir"; private String consumerGrp = "consumerGrp"; private String brokerUrl = "localhost:9092"; public static void main(String[] args) throws InterruptedException { System.out.println(1); Application application = new Application(); new Thread(new ProducerThread(application), "Producer : ").start(); new Thread(new ConsumerThread(application), "Consumer1 : ").start(); //for multiple consumers in same group, start new consumer threads //new Thread(new ConsumerThread(application), "Consumer2 : ").start(); } public String getTopicName() { return topicName; } public String getConsumerGrp() { return consumerGrp; } public String getBrokerUrl() { return brokerUrl; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_01/bytedance/split/kafka/demo/ConsumerThread.java ================================================ package flink.examples.datastream._01.bytedance.split.kafka.demo; import java.time.Duration; import java.util.Collections; import java.util.Properties; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.KafkaConsumer; public class ConsumerThread implements Runnable { private Consumer consumer; public ConsumerThread(Application application) { Properties props = new Properties(); props.put("bootstrap.servers", application.getBrokerUrl()); props.put("group.id", application.getConsumerGrp()); props.put("enable.auto.commit", "true"); props.put("auto.commit.interval.ms", "1000"); props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); //props.put("auto.offset.reset", "earliest"); consumer = new KafkaConsumer<>(props); consumer.subscribe(Collections.singletonList(application.getTopicName())); } @Override public void run() { String threadName = Thread.currentThread().getName(); int noMessageToFetch = 1; while (noMessageToFetch < 3) { System.out.println(threadName + "poll start.."); final ConsumerRecords consumerRecords = consumer.poll(Duration.ofSeconds(1)); System.out.println(threadName + "records polled : " + consumerRecords.count()); if (consumerRecords.count() == 0) { noMessageToFetch++; continue; } for (ConsumerRecord record : consumerRecords) { System.out.printf(threadName + "offset = %d, key = %s, value = %s, partition =%d%n", record.offset(), record.key(), record.value(), record.partition()); } consumer.commitAsync(); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_01/bytedance/split/kafka/demo/ProducerThread.java ================================================ package flink.examples.datastream._01.bytedance.split.kafka.demo; import java.util.Properties; import java.util.concurrent.ExecutionException; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.clients.producer.RecordMetadata; public class ProducerThread implements Runnable { private Producer producer; private String topicName; public ProducerThread(Application application) { this.topicName = application.getTopicName(); Properties props = new Properties(); props.put("bootstrap.servers", application.getBrokerUrl()); props.put("acks", "all"); props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); producer = new KafkaProducer<>(props); } @Override public void run() { String threadName = Thread.currentThread().getName(); for (int index = 1; index < 100; index++) { final ProducerRecord record = new ProducerRecord<>(topicName, Integer.toString(index), Integer.toString(index)); try { RecordMetadata metadata = producer.send(record).get(); System.out .println(threadName + "Record sent with key " + index + " to partition " + metadata.partition() + " with offset " + metadata.offset()); } catch (ExecutionException e) { System.out.println(threadName + "Error in sending record :" + e); throw new RuntimeException(e); } catch (InterruptedException e) { System.out.println(threadName + "Error in sending record : " + e); throw new RuntimeException(e); } catch (Exception e) { System.out.println(threadName + "Error in sending record : " + e); throw new RuntimeException(e); } } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_01/bytedance/split/model/ClientLogSink.java ================================================ package flink.examples.datastream._01.bytedance.split.model; import lombok.Builder; import lombok.Data; @Data @Builder public class ClientLogSink { private int id; private int price; private long timestamp; } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_01/bytedance/split/model/ClientLogSource.java ================================================ package flink.examples.datastream._01.bytedance.split.model; import lombok.Builder; import lombok.Data; @Data @Builder public class ClientLogSource { private int id; private int price; private long timestamp; private String date; private String page; } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_01/bytedance/split/model/DynamicProducerRule.java ================================================ package flink.examples.datastream._01.bytedance.split.model; import flink.examples.datastream._01.bytedance.split.codegen.JaninoUtils; import lombok.Builder; import lombok.Data; @Data @Builder public class DynamicProducerRule implements Evaluable { private String condition; private String targetTopic; private Evaluable evaluable; public void init(Long id) { try { Class clazz = JaninoUtils.genCodeAndGetClazz(id, targetTopic, condition); this.evaluable = clazz.newInstance(); } catch (Exception e) { throw new RuntimeException(e); } } @Override public boolean eval(ClientLogSource clientLogSource) { return this.evaluable.eval(clientLogSource); } public static void main(String[] args) throws Exception { String condition = "String.valueOf(sourceModel.getId())==\"1\""; DynamicProducerRule dynamicProducerRule = DynamicProducerRule .builder() .condition(condition) .targetTopic("t") .build(); dynamicProducerRule.init(1L); boolean b = dynamicProducerRule.eval(ClientLogSource.builder().id(1).build()); System.out.println(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_01/bytedance/split/model/Evaluable.java ================================================ package flink.examples.datastream._01.bytedance.split.model; public interface Evaluable { boolean eval(ClientLogSource clientLogSource); } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_01/bytedance/split/zkconfigcenter/ZkBasedConfigCenter.java ================================================ package flink.examples.datastream._01.bytedance.split.zkconfigcenter; import java.lang.reflect.Type; import java.util.HashMap; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.function.BiConsumer; import java.util.function.Consumer; import org.apache.curator.framework.CuratorFramework; import org.apache.curator.framework.CuratorFrameworkFactory; import org.apache.curator.framework.recipes.cache.TreeCache; import org.apache.curator.framework.recipes.cache.TreeCacheEvent; import org.apache.curator.framework.recipes.cache.TreeCacheListener; import org.apache.curator.retry.RetryOneTime; import com.google.common.collect.Sets; import com.google.gson.Gson; import com.google.gson.reflect.TypeToken; import flink.examples.datastream._01.bytedance.split.model.DynamicProducerRule; public class ZkBasedConfigCenter { private TreeCache treeCache; private CuratorFramework zkClient; private static class Factory { private static final ZkBasedConfigCenter INSTANCE = new ZkBasedConfigCenter(); } public static ZkBasedConfigCenter getInstance() { return Factory.INSTANCE; } private ZkBasedConfigCenter() { try { open(); } catch (Exception e) { e.printStackTrace(); throw new RuntimeException(e); } } private ConcurrentMap map = new ConcurrentHashMap<>(); public ConcurrentMap getMap() { return map; } private void setData() throws Exception { String path = "/kafka-config"; zkClient = CuratorFrameworkFactory.newClient("127.0.0.1:2181", new RetryOneTime(1000)); zkClient.start(); zkClient.setData().forPath(path, ("{\n" + " 1: {\n" + " \"condition\": \"1==1\",\n" + " \"targetTopic\": \"tuzisir1\"\n" + " },\n" + " 2: {\n" + " \"condition\": \"1!=1\",\n" + " \"targetTopic\": \"tuzisir2\"\n" + " }\n" + "}").getBytes()); } private void open() throws Exception { String path = "/kafka-config"; zkClient = CuratorFrameworkFactory.newClient("127.0.0.1:2181", new RetryOneTime(1000)); zkClient.start(); // 启动时读取远程配置中心的配置信息 String json = new String(zkClient.getData().forPath(path)); this.update(json); treeCache = new TreeCache(zkClient, path); treeCache.start(); treeCache.getListenable().addListener(new TreeCacheListener() { @Override public void childEvent(CuratorFramework curatorFramework, TreeCacheEvent treeCacheEvent) throws Exception { switch (treeCacheEvent.getType()) { case NODE_UPDATED: // 通知的内容:包含路径和值 byte[] data = treeCacheEvent.getData().getData(); String json = new String(data); System.out.println("配置变化为了:" + json); // 更新数据 update(json); break; default: } } }); } public void close() { this.treeCache.close(); this.zkClient.close(); } private void update(String json) { Map result = getNewMap(json); Set needAddId = Sets.difference(result.keySet(), map.keySet()).immutableCopy(); Set needDeleteId = Sets.difference(map.keySet(), result.keySet()).immutableCopy(); needAddId.forEach(new Consumer() { @Override public void accept(Long id) { DynamicProducerRule dynamicProducerRule = result.get(id); dynamicProducerRule.init(id); map.put(id, dynamicProducerRule); } }); needDeleteId.forEach(new Consumer() { @Override public void accept(Long id) { map.remove(id); } }); } private Map getNewMap(String json) { Gson gson = new Gson(); Map newMap = null; Type type = new TypeToken>() { }.getType(); newMap = gson.fromJson(json, type); Map result = new HashMap<>(); Optional.ofNullable(newMap) .ifPresent(new Consumer>() { @Override public void accept(Map stringDynamicProducerRuleMap) { stringDynamicProducerRuleMap.forEach(new BiConsumer() { @Override public void accept(String s, DynamicProducerRule dynamicProducerRule) { result.put(Long.parseLong(s), dynamicProducerRule); } }); } }); return result; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_01/bytedance/split/zkconfigcenter/new.json ================================================ {"1":{"condition":"1==1","targetTopic":"tuzisir1"},"2":{"condition":"1!=1","targetTopic":"tuzisir2"},"3":{"condition":"1==1","targetTopic":"tuzisir"}} ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_01/bytedance/split/zkconfigcenter/old.json ================================================ {"1":{"condition":"1==1","targetTopic":"tuzisir1"},"2":{"condition":"1!=1","targetTopic":"tuzisir2"}} ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_02/DataStreamTest.java ================================================ package flink.examples.datastream._02; import java.io.IOException; import java.util.Properties; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import org.apache.commons.lang3.RandomUtils; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.common.serialization.AbstractDeserializationSchema; import org.apache.flink.api.common.serialization.DeserializationSchema; import org.apache.flink.api.java.functions.KeySelector; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.TimeCharacteristic; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.api.functions.source.SourceFunction; import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor; import org.apache.flink.streaming.api.functions.windowing.ProcessWindowFunction; import org.apache.flink.streaming.api.windowing.assigners.TumblingProcessingTimeWindows; import org.apache.flink.streaming.api.windowing.time.Time; import org.apache.flink.streaming.api.windowing.windows.TimeWindow; import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer; import org.apache.flink.util.Collector; import lombok.Builder; import lombok.Data; public class DataStreamTest { public static void main(String[] args) throws Exception { ParameterTool parameters = ParameterTool.fromArgs(args); StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); // 其他参数设置 env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameters); env.setMaxParallelism(2); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig() .enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); env.setParallelism(1); env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); Properties properties = new Properties(); properties.setProperty("bootstrap.servers", "localhost:9092"); properties.setProperty("group.id", "test"); DeserializationSchema> d = new AbstractDeserializationSchema>() { @Override public Tuple2 deserialize(byte[] message) throws IOException { return null; } }; DataStream> stream = env .addSource(new FlinkKafkaConsumer<>("topic", d, properties)); DataStream eventTimeResult = env .addSource(new UserDefinedSource()) .assignTimestampsAndWatermarks( new BoundedOutOfOrdernessTimestampExtractor(Time.seconds(1L)) { @Override public long extractTimestamp(SourceModel sourceModel) { return sourceModel.getTimestamp(); } } ) .uid("source") .keyBy(new KeySelector() { @Override public Integer getKey(SourceModel sourceModel) throws Exception { return sourceModel.getId(); } }) // !!!事件时间窗口 .timeWindow(Time.seconds(1L)) .process(new ProcessWindowFunction() { @Override public void process(Integer integer, Context context, Iterable iterable, Collector collector) throws Exception { iterable.forEach(new Consumer() { @Override public void accept(SourceModel sourceModel) { collector.collect( MidModel .builder() .id(sourceModel.getId()) .price(sourceModel.getPrice()) .timestamp(sourceModel.getTimestamp()) .build() ); } }); } }) .uid("process-event-time"); DataStream processingTimeResult = eventTimeResult .keyBy(new KeySelector() { @Override public Integer getKey(MidModel midModel) throws Exception { return midModel.getId(); } }) // !!!处理时间窗口 .window(TumblingProcessingTimeWindows.of(Time.seconds(1L))) .process(new ProcessWindowFunction() { @Override public void process(Integer integer, Context context, Iterable iterable, Collector collector) throws Exception { iterable.forEach(new Consumer() { @Override public void accept(MidModel midModel) { collector.collect( SinkModel .builder() .id(midModel.getId()) .price(midModel.getPrice()) .timestamp(midModel.getTimestamp()) .build() ); } }); } }) .uid("process-process-time"); processingTimeResult.print(); env.execute(); } @Data @Builder private static class SourceModel { private int id; private int price; private long timestamp; } @Data @Builder private static class MidModel { private int id; private int price; private long timestamp; } @Data @Builder private static class SinkModel { private int id; private int price; private long timestamp; } private static class UserDefinedSource implements SourceFunction { private volatile boolean isCancel; @Override public void run(SourceContext sourceContext) throws Exception { while (!this.isCancel) { sourceContext.collect( SourceModel .builder() .id(RandomUtils.nextInt(0, 10)) .price(RandomUtils.nextInt(0, 100)) .timestamp(System.currentTimeMillis()) .build() ); Thread.sleep(10L); } } @Override public void cancel() { this.isCancel = true; } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_02/DataStreamTest1.java ================================================ //package flink.examples.datastream._02; // //import java.io.IOException; //import java.util.Properties; //import java.util.concurrent.TimeUnit; //import java.util.function.Consumer; // //import org.apache.commons.lang3.RandomUtils; //import org.apache.flink.api.common.restartstrategy.RestartStrategies; //import org.apache.flink.api.common.serialization.AbstractDeserializationSchema; //import org.apache.flink.api.common.serialization.DeserializationSchema; //import org.apache.flink.api.java.functions.KeySelector; //import org.apache.flink.api.java.tuple.Tuple2; //import org.apache.flink.api.java.utils.ParameterTool; //import org.apache.flink.streaming.api.CheckpointingMode; //import org.apache.flink.streaming.api.TimeCharacteristic; //import org.apache.flink.streaming.api.datastream.DataStream; //import org.apache.flink.streaming.api.environment.CheckpointConfig; //import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //import org.apache.flink.streaming.api.functions.source.SourceFunction; //import org.apache.flink.streaming.api.functions.windowing.ProcessWindowFunction; //import org.apache.flink.streaming.api.windowing.assigners.TumblingProcessingTimeWindows; //import org.apache.flink.streaming.api.windowing.time.Time; //import org.apache.flink.streaming.api.windowing.windows.TimeWindow; //import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer; //import org.apache.flink.util.Collector; // //import lombok.Builder; //import lombok.Data; // // //public class DataStreamTest1 { // // public static void main(String[] args) throws Exception { // // ParameterTool parameters = ParameterTool.fromArgs(args); // // StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); // // // // 其他参数设置 // env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time // .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); // env.getConfig().setGlobalJobParameters(parameters); // env.setMaxParallelism(2); // // // ck 设置 // env.getCheckpointConfig().setFailOnCheckpointingErrors(false); // env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); // env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); // env.getCheckpointConfig() // .enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); // // env.setParallelism(1); // // env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); // // Properties properties = new Properties(); // properties.setProperty("bootstrap.servers", "localhost:9092"); // properties.setProperty("group.id", "test"); // // DeserializationSchema> d = new AbstractDeserializationSchema>() { // // @Override // public Tuple2 deserialize(byte[] message) throws IOException { // return null; // } // }; // // DataStream> stream = env // .addSource(new FlinkKafkaConsumer<>("topic", d, properties)); // // DataStream eventTimeResult = // env // .addSource(new UserDefinedSource()) // .map() // .flatMap() // .process() // .keyBy() // .sum() // // // DataStream processingTimeResult = eventTimeResult // .keyBy(new KeySelector() { // @Override // public Integer getKey(MidModel midModel) throws Exception { // return midModel.getId(); // } // }) // // !!!处理时间窗口 // .window(TumblingProcessingTimeWindows.of(Time.seconds(1L))) // .process(new ProcessWindowFunction() { // @Override // public void process(Integer integer, Context context, Iterable iterable, // Collector collector) throws Exception { // // iterable.forEach(new Consumer() { // @Override // public void accept(MidModel midModel) { // collector.collect( // SinkModel // .builder() // .id(midModel.getId()) // .price(midModel.getPrice()) // .timestamp(midModel.getTimestamp()) // .build() // ); // } // }); // // } // }) // .uid("process-process-time"); // // processingTimeResult.print(); // // env.execute(); // } // // @Data // @Builder // private static class SourceModel { // private int id; // private int price; // private long timestamp; // } // // @Data // @Builder // private static class MidModel { // private int id; // private int price; // private long timestamp; // } // // @Data // @Builder // private static class SinkModel { // private int id; // private int price; // private long timestamp; // } // // private static class UserDefinedSource implements SourceFunction { // // private volatile boolean isCancel; // // @Override // public void run(SourceContext sourceContext) throws Exception { // // while (!this.isCancel) { // sourceContext.collect( // SourceModel // .builder() // .id(RandomUtils.nextInt(0, 10)) // .price(RandomUtils.nextInt(0, 100)) // .timestamp(System.currentTimeMillis()) // .build() // ); // // Thread.sleep(10L); // } // // } // // @Override // public void cancel() { // this.isCancel = true; // } // } // //} ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_03/enums_state/EnumsStateTest.java ================================================ package flink.examples.datastream._03.enums_state; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.common.typeutils.base.EnumSerializer; import org.apache.flink.configuration.Configuration; import org.apache.flink.core.memory.DataOutputSerializer; import org.apache.flink.streaming.api.TimeCharacteristic; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; public class EnumsStateTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); env.setParallelism(1); env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime); TypeInformation t = TypeInformation.of(StateTestEnums.class); EnumSerializer e = (EnumSerializer) t.createSerializer(env.getConfig()); DataOutputSerializer d = new DataOutputSerializer(10000); e.serialize(StateTestEnums.A, d); env.execute(); } enum StateTestEnums { A, B, C ; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_03/enums_state/SenerioTest.java ================================================ package flink.examples.datastream._03.enums_state; import java.util.HashMap; import java.util.Map; import java.util.function.BiConsumer; import org.apache.flink.api.common.functions.AggregateFunction; import org.apache.flink.api.common.state.ValueState; import org.apache.flink.api.common.state.ValueStateDescriptor; import org.apache.flink.api.common.typeinfo.TypeHint; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.functions.KeySelector; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.TimeCharacteristic; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.api.functions.source.SourceFunction; import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor; import org.apache.flink.streaming.api.functions.windowing.ProcessWindowFunction; import org.apache.flink.streaming.api.windowing.time.Time; import org.apache.flink.streaming.api.windowing.windows.TimeWindow; import org.apache.flink.util.Collector; import com.google.common.collect.Lists; import lombok.Builder; import lombok.Data; import lombok.extern.slf4j.Slf4j; @Slf4j public class SenerioTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); Tuple2 k = Tuple2.of(DimNameEnum.sex, "男"); System.out.println(k.toString()); env.setParallelism(1); env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime); env.addSource(new SourceFunction() { private volatile boolean isCancel = false; @Override public void run(SourceContext ctx) throws Exception { } @Override public void cancel() { this.isCancel = true; } }) .assignTimestampsAndWatermarks(new BoundedOutOfOrdernessTimestampExtractor(Time.minutes(1L)) { @Override public long extractTimestamp(SourceModel element) { return element.getTimestamp(); } }) .keyBy(new KeySelector() { @Override public Long getKey(SourceModel value) throws Exception { return value.getUserId() % 1000; } }) .timeWindow(Time.minutes(1)) .aggregate( new AggregateFunction, Long>, Map, Long>>() { @Override public Map, Long> createAccumulator() { return new HashMap<>(); } @Override public Map, Long> add(SourceModel value, Map, Long> accumulator) { Lists.newArrayList(Tuple2.of(DimNameEnum.province, value.getProvince()) , Tuple2.of(DimNameEnum.age, value.getAge()) , Tuple2.of(DimNameEnum.sex, value.getSex())) .forEach(t -> { Long l = accumulator.get(t); if (null == l) { accumulator.put(t, 1L); } else { accumulator.put(t, l + 1); } }); return accumulator; } @Override public Map, Long> getResult( Map, Long> accumulator) { return accumulator; } @Override public Map, Long> merge( Map, Long> a, Map, Long> b) { return null; } }, new ProcessWindowFunction, Long>, SinkModel, Long, TimeWindow>() { private transient ValueState, Long>> todayPv; @Override public void open(Configuration parameters) throws Exception { super.open(parameters); this.todayPv = getRuntimeContext().getState(new ValueStateDescriptor, Long>>( "todayPv", TypeInformation.of( new TypeHint, Long>>() { }))); } @Override public void process(Long aLong, Context context, Iterable, Long>> elements, Collector out) throws Exception { // 将 elements 数据 merge 到 todayPv 中 // 然后 out#collect 出去即可 this.todayPv.value() .forEach(new BiConsumer, Long>() { @Override public void accept(Tuple2 k, Long v) { log.info("key 值:{},value 值:{}", k.toString(), v); } }); } }); env.execute(); } @Data @Builder private static class SourceModel { private long userId; private String province; private String age; private String sex; private long timestamp; } @Data @Builder private static class SinkModel { private String dimName; private String dimValue; private long timestamp; } enum DimNameEnum { province, age, sex, ; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_03/state/StateExamplesTest.java ================================================ package flink.examples.datastream._03.state; import java.util.LinkedList; import java.util.List; import org.apache.flink.api.common.functions.AggregateFunction; import org.apache.flink.api.common.functions.ReduceFunction; import org.apache.flink.api.common.state.AggregatingState; import org.apache.flink.api.common.state.AggregatingStateDescriptor; import org.apache.flink.api.common.state.ListState; import org.apache.flink.api.common.state.ListStateDescriptor; import org.apache.flink.api.common.state.MapState; import org.apache.flink.api.common.state.MapStateDescriptor; import org.apache.flink.api.common.state.ReducingState; import org.apache.flink.api.common.state.ReducingStateDescriptor; import org.apache.flink.api.common.state.StateTtlConfig; import org.apache.flink.api.common.state.ValueState; import org.apache.flink.api.common.state.ValueStateDescriptor; import org.apache.flink.api.common.time.Time; import org.apache.flink.api.common.typeinfo.BasicTypeInfo; import org.apache.flink.api.java.functions.KeySelector; import org.apache.flink.api.java.typeutils.ListTypeInfo; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.functions.KeyedProcessFunction; import org.apache.flink.streaming.api.functions.source.ParallelSourceFunction; import org.apache.flink.util.Collector; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; import lombok.Builder; import lombok.Data; /** * https://ci.apache.org/projects/flink/flink-docs-release-1.13/docs/dev/datastream/fault-tolerance/broadcast_state/ */ public class StateExamplesTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.env().setParallelism(1); flinkEnv.env() .addSource(new ParallelSourceFunction() { private volatile boolean isCancel = false; @Override public void run(SourceContext ctx) throws Exception { int i = 0; while (!this.isCancel) { ctx.collect( Item.builder() .name("item") .color(Color.RED) .shape(Shape.CIRCLE) .build() ); i++; Thread.sleep(1000); } } @Override public void cancel() { this.isCancel = true; } }) .keyBy(new KeySelector() { @Override public Integer getKey(Item item) throws Exception { return item.color.ordinal(); } }) .process(new KeyedProcessFunction() { // store partial matches, i.e. first elements of the pair waiting for their second element // we keep a list as we may have many first elements waiting private final MapStateDescriptor> mapStateDesc = new MapStateDescriptor<>( "itemsMap", BasicTypeInfo.STRING_TYPE_INFO, new ListTypeInfo<>(Item.class)); private final ListStateDescriptor listStateDesc = new ListStateDescriptor<>( "itemsList", Item.class); private final ValueStateDescriptor valueStateDesc = new ValueStateDescriptor<>( "itemsValue" , Item.class); private final ReducingStateDescriptor reducingStateDesc = new ReducingStateDescriptor<>( "itemsReducing" , new ReduceFunction() { @Override public String reduce(String value1, String value2) throws Exception { return value1 + value2; } }, String.class); private final AggregatingStateDescriptor aggregatingStateDesc = new AggregatingStateDescriptor("itemsAgg", new AggregateFunction() { @Override public String createAccumulator() { return ""; } @Override public String add(Item value, String accumulator) { return accumulator + value.name; } @Override public String getResult(String accumulator) { return accumulator; } @Override public String merge(String a, String b) { return null; } }, String.class); @Override public void open(Configuration parameters) throws Exception { super.open(parameters); mapStateDesc.enableTimeToLive(StateTtlConfig .newBuilder(Time.milliseconds(1)) .setUpdateType(StateTtlConfig.UpdateType.OnCreateAndWrite) .setStateVisibility(StateTtlConfig.StateVisibility.NeverReturnExpired) .cleanupInRocksdbCompactFilter(10) .build()); } @Override public void processElement(Item value, Context ctx, Collector out) throws Exception { MapState> mapState = getRuntimeContext().getMapState(mapStateDesc); List l = mapState.get(value.name); if (null == l) { l = new LinkedList<>(); } l.add(value); mapState.put(value.name, l); ListState listState = getRuntimeContext().getListState(listStateDesc); listState.add(value); Object o = listState.get(); ValueState valueState = getRuntimeContext().getState(valueStateDesc); valueState.update(value); Item i = valueState.value(); AggregatingState aggregatingState = getRuntimeContext().getAggregatingState(aggregatingStateDesc); aggregatingState.add(value); String aggResult = aggregatingState.get(); ReducingState reducingState = getRuntimeContext().getReducingState(reducingStateDesc); reducingState.add(value.name); String reducingResult = reducingState.get(); System.out.println(1); } }) .print(); flinkEnv.env().execute("广播状态测试任务"); } @Builder @Data private static class Rule { private String name; private Shape first; private Shape second; } @Builder @Data private static class Item { private String name; private Shape shape; private Color color; } private enum Shape { CIRCLE, SQUARE ; } private enum Color { RED, BLUE, BLACK, ; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_03/state/_01_broadcast_state/BroadcastStateTest.java ================================================ package flink.examples.datastream._03.state._01_broadcast_state; import java.util.ArrayList; import java.util.List; import java.util.Map; import org.apache.flink.api.common.state.MapState; import org.apache.flink.api.common.state.MapStateDescriptor; import org.apache.flink.api.common.typeinfo.BasicTypeInfo; import org.apache.flink.api.common.typeinfo.TypeHint; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.functions.KeySelector; import org.apache.flink.api.java.typeutils.ListTypeInfo; import org.apache.flink.streaming.api.datastream.BroadcastStream; import org.apache.flink.streaming.api.functions.co.KeyedBroadcastProcessFunction; import org.apache.flink.streaming.api.functions.source.ParallelSourceFunction; import org.apache.flink.streaming.api.functions.source.SourceFunction; import org.apache.flink.util.Collector; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; import lombok.Builder; import lombok.Data; /** * https://ci.apache.org/projects/flink/flink-docs-release-1.13/docs/dev/datastream/fault-tolerance/broadcast_state/ */ public class BroadcastStateTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); // a map descriptor to store the name of the rule (string) and the rule itself. MapStateDescriptor ruleStateDescriptor = new MapStateDescriptor<>( "RulesBroadcastState", BasicTypeInfo.STRING_TYPE_INFO, TypeInformation.of(new TypeHint() { })); // broadcast the rules and create the broadcast state BroadcastStream ruleBroadcastStream = flinkEnv.env() .addSource(new SourceFunction() { private volatile boolean isCancel = false; @Override public void run(SourceContext ctx) throws Exception { int i = 0; while (!this.isCancel) { ctx.collect( Rule.builder() .name("rule" + i) .first(Shape.CIRCLE) .second(Shape.SQUARE) .build() ); i++; Thread.sleep(1000); } } @Override public void cancel() { this.isCancel = true; } }) .setParallelism(1) .broadcast(ruleStateDescriptor); flinkEnv.env() .addSource(new ParallelSourceFunction() { private volatile boolean isCancel = false; @Override public void run(SourceContext ctx) throws Exception { int i = 0; while (!this.isCancel) { ctx.collect( Item.builder() .name("item" + i) .color(Color.RED) .shape(Shape.CIRCLE) .build() ); i++; Thread.sleep(1000); } } @Override public void cancel() { this.isCancel = true; } }) .keyBy(new KeySelector() { @Override public Color getKey(Item item) throws Exception { return item.color; } }) .connect(ruleBroadcastStream) .process(new KeyedBroadcastProcessFunction() { // store partial matches, i.e. first elements of the pair waiting for their second element // we keep a list as we may have many first elements waiting private final MapStateDescriptor> mapStateDesc = new MapStateDescriptor<>( "items", BasicTypeInfo.STRING_TYPE_INFO, new ListTypeInfo<>(Item.class)); // identical to our ruleStateDescriptor above private final MapStateDescriptor ruleStateDescriptor = new MapStateDescriptor<>( "RulesBroadcastState", BasicTypeInfo.STRING_TYPE_INFO, TypeInformation.of(new TypeHint() { })); @Override public void processBroadcastElement(Rule value, Context ctx, Collector out) throws Exception { ctx.getBroadcastState(ruleStateDescriptor).put(value.name, value); } @Override public void processElement(Item value, ReadOnlyContext ctx, Collector out) throws Exception { final MapState> state = getRuntimeContext().getMapState(mapStateDesc); final Shape shape = value.getShape(); for (Map.Entry entry : ctx.getBroadcastState(ruleStateDescriptor).immutableEntries()) { final String ruleName = entry.getKey(); final Rule rule = entry.getValue(); List stored = state.get(ruleName); if (stored == null) { stored = new ArrayList<>(); } if (shape == rule.second && !stored.isEmpty()) { for (Item i : stored) { out.collect("MATCH: " + i + " - " + value); } stored.clear(); } // there is no else{} to cover if rule.first == rule.second if (shape.equals(rule.first)) { stored.add(value); } if (stored.isEmpty()) { state.remove(ruleName); } else { state.put(ruleName, stored); } } } }) .print(); flinkEnv.env().execute("广播状态测试任务"); } @Builder @Data private static class Rule { private String name; private Shape first; private Shape second; } @Builder @Data private static class Item { private String name; private Shape shape; private Color color; } private enum Shape { CIRCLE, SQUARE ; } private enum Color { RED, BLUE, BLACK, ; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_03/state/_03_rocksdb/CreateStateBackendTest.java ================================================ //package flink.examples.datastream._03.state._03_rocksdb; // //import java.util.LinkedList; //import java.util.List; // //import org.apache.flink.api.common.state.MapState; //import org.apache.flink.api.common.state.MapStateDescriptor; //import org.apache.flink.api.common.state.StateTtlConfig; //import org.apache.flink.api.common.state.StateTtlConfig.TtlTimeCharacteristic; //import org.apache.flink.api.common.time.Time; //import org.apache.flink.api.java.functions.KeySelector; //import org.apache.flink.configuration.Configuration; //import org.apache.flink.streaming.api.functions.KeyedProcessFunction; //import org.apache.flink.streaming.api.functions.source.ParallelSourceFunction; //import org.apache.flink.util.Collector; // //import flink.examples.FlinkEnvUtils; //import flink.examples.FlinkEnvUtils.FlinkEnv; //import lombok.Builder; //import lombok.Data; // ///** // * https://ci.apache.org/projects/flink/flink-docs-release-1.13/docs/dev/datastream/fault-tolerance/broadcast_state/ // */ // //public class CreateStateBackendTest { // // // public static void main(String[] args) throws Exception { // FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); // // flinkEnv.env().setParallelism(1); // // flinkEnv.env() // .addSource(new ParallelSourceFunction() { // // private volatile boolean isCancel = false; // // @Override // public void run(SourceContext ctx) throws Exception { // // int i = 0; // // while (!this.isCancel) { // ctx.collect( // Item.builder() // .name("item") // .color(Color.RED) // .shape(Shape.CIRCLE) // .build() // ); // i++; // Thread.sleep(1000); // } // } // // @Override // public void cancel() { // this.isCancel = true; // } // }) // .keyBy(new KeySelector() { // @Override // public Integer getKey(Item item) throws Exception { // return item.color.ordinal(); // } // }) // .process(new KeyedProcessFunction() { // // // store partial matches, i.e. first elements of the pair waiting for their second element // // we keep a list as we may have many first elements waiting // private MapStateDescriptor mapStateDescriptor = // new MapStateDescriptor<>("map state name", String.class, String.class); // // private transient MapState mapState; // // @Override // public void open(Configuration parameters) throws Exception { // super.open(parameters); // // StateTtlConfig stateTtlConfig = StateTtlConfig // // 1.ttl 时长 // .newBuilder(Time.milliseconds(1)) // // // 2.更新类型 // .setUpdateType(StateTtlConfig.UpdateType.OnCreateAndWrite) // // 创建和写入更新 // .updateTtlOnCreateAndWrite() // // 读取和写入更新 // .updateTtlOnReadAndWrite() // // // 3.过期状态的访问可见性 // .setStateVisibility(StateTtlConfig.StateVisibility.NeverReturnExpired) // // 如果还没有被删除就返回 // .returnExpiredIfNotCleanedUp() // // 过期的永远不返回 // .neverReturnExpired() // // // 4.过期的时间语义 // .setTtlTimeCharacteristic(TtlTimeCharacteristic.ProcessingTime) // .useProcessingTime() // // // 5.清除策略 // // 做 CK 时把所有状态删除掉 // .cleanupFullSnapshot() // // 增量删除,只有有状态记录访问时,才会做删除;并且他会加大任务处理延迟。 // // 增量删除仅仅支持 HeapStateBeckend,Rocksdb 不支持!!! // // 每访问 1 此 state,遍历 1000 条进行删除 // .cleanupIncrementally(1000, true) // // Rocksdb 状态后端在 rocksdb 做 compaction 时清除过期状态。 // // 做 compaction 时每隔 3 个 entry,重新更新一下时间戳(用于判断是否过期) // .cleanupInRocksdbCompactFilter(3) // // 禁用 cleanup // .disableCleanupInBackground() // .build(); // // this.mapStateDescriptor.enableTimeToLive(stateTtlConfig); // this.mapState = this.getRuntimeContext().getMapState(mapStateDescriptor); // } // // // @Override // public void processElement(Item value, Context ctx, Collector out) throws Exception { // // MapState> mapState = getRuntimeContext().getMapState(mapStateDesc); // // List l = mapState.get(value.name); // // Object o = mapState.get("测试"); // // if (null == l) { // l = new LinkedList<>(); // } // // l.add(value); // // mapState.put(value.name, l); // // // // } // }) // .print(); // // // flinkEnv.env().execute("广播状态测试任务"); // // } // // @Builder // @Data // private static class Rule { // private String name; // private Shape first; // private Shape second; // } // // @Builder // @Data // private static class Item { // private String name; // private Shape shape; // private Color color; // // } // // // private enum Shape { // CIRCLE, // SQUARE // ; // } // // private enum Color { // RED, // BLUE, // BLACK, // ; // } // //} ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_03/state/_03_rocksdb/GettingStartDemo.java ================================================ package flink.examples.datastream._03.state._03_rocksdb; import org.rocksdb.Options; import org.rocksdb.RocksDB; import org.rocksdb.RocksDBException; public class GettingStartDemo { // 因为RocksDB是由C++编写的,在Java中使用首先需要加载Native库 static { // Loads the necessary library files. // Calling this method twice will have no effect. // By default the method extracts the shared library for loading at // java.io.tmpdir, however, you can override this temporary location by // setting the environment variable ROCKSDB_SHAREDLIB_DIR. // 默认这个方法会加压一个共享库到java.io.tmpdir RocksDB.loadLibrary(); } public static void main(String[] args) throws RocksDBException { // 1. 打开数据库 // 1.1 创建数据库配置 Options dbOpt = new Options(); // 1.2 配置当数据库不存在时自动创建 dbOpt.setCreateIfMissing(true); // 1.3 打开数据库。因为RocksDB默认是保存在本地磁盘,所以需要指定位置 RocksDB rdb = RocksDB.open(dbOpt, "./data/rocksdb"); // 2. 写入数据 // 2.1 RocksDB都是以字节流的方式写入数据库中,所以我们需要将字符串转换为字节流再写入。这点类似于HBase byte[] key = "zhangsan".getBytes(); byte[] value = "20".getBytes(); // 2.2 调用put方法写入数据 rdb.put(key, value); System.out.println("写入数据到RocksDB完成!"); // 3. 调用delete方法读取数据 System.out.println("从RocksDB读取key = " + new String(key) + "的value为" + new String(rdb.get(key))); // 4. 移除数据 rdb.delete(key); // 关闭资源 rdb.close(); dbOpt.close(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_03/state/_03_rocksdb/Rocksdb_OperatorAndKeyedState_StateStorageDIr_Test.java ================================================ package flink.examples.datastream._03.state._03_rocksdb; import java.util.List; import org.apache.flink.api.common.state.ListState; import org.apache.flink.api.common.state.ListStateDescriptor; import org.apache.flink.api.common.state.MapState; import org.apache.flink.api.common.state.MapStateDescriptor; import org.apache.flink.api.common.state.StateTtlConfig; import org.apache.flink.api.common.state.StateTtlConfig.StateVisibility; import org.apache.flink.api.common.state.StateTtlConfig.UpdateType; import org.apache.flink.api.common.time.Time; import org.apache.flink.api.common.typeinfo.BasicTypeInfo; import org.apache.flink.api.java.functions.KeySelector; import org.apache.flink.configuration.Configuration; import org.apache.flink.runtime.state.FunctionInitializationContext; import org.apache.flink.runtime.state.FunctionSnapshotContext; import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction; import org.apache.flink.streaming.api.functions.KeyedProcessFunction; import org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction; import org.apache.flink.util.Collector; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; import lombok.Builder; import lombok.Data; /** * https://ci.apache.org/projects/flink/flink-docs-release-1.13/docs/dev/datastream/fault-tolerance/broadcast_state/ */ public class Rocksdb_OperatorAndKeyedState_StateStorageDIr_Test { public static void main(String[] args) throws Exception { // FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[] {"--execution.savepoint.path", ""}); FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.env().setParallelism(1); flinkEnv.env() .addSource(new UserDefinedSource()) .keyBy(new KeySelector() { @Override public Integer getKey(Item item) throws Exception { return item.name.hashCode(); } }) .process(new KeyedProcessFunction() { // store partial matches, i.e. first elements of the pair waiting for their second element // we keep a list as we may have many first elements waiting private final MapStateDescriptor mapStateDesc = new MapStateDescriptor<>( "key1", String.class , Item.class); @Override public void open(Configuration parameters) throws Exception { super.open(parameters); mapStateDesc.enableTimeToLive(StateTtlConfig .newBuilder(Time.hours(24)) .setUpdateType(UpdateType.OnCreateAndWrite) .setStateVisibility(StateVisibility.NeverReturnExpired) .cleanupFullSnapshot() .build()); } @Override public void processElement(Item value, Context ctx, Collector out) throws Exception { MapState mapState = getRuntimeContext().getMapState(mapStateDesc); mapState.put(value.name, value); out.collect(value.name); } }) .keyBy(new KeySelector() { @Override public Integer getKey(String value) throws Exception { return value.hashCode(); } }) .process(new KeyedProcessFunction() { // store partial matches, i.e. first elements of the pair waiting for their second element // we keep a list as we may have many first elements waiting private final MapStateDescriptor mapStateDesc = new MapStateDescriptor<>( "key2", BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO); @Override public void open(Configuration parameters) throws Exception { super.open(parameters); mapStateDesc.enableTimeToLive(StateTtlConfig .newBuilder(Time.hours(24)) .setUpdateType(UpdateType.OnCreateAndWrite) .setStateVisibility(StateVisibility.NeverReturnExpired) .cleanupFullSnapshot() .build()); } @Override public void processElement(String value, Context ctx, Collector out) throws Exception { MapState mapState = getRuntimeContext().getMapState(mapStateDesc); mapState.put(value, value); } }) .print(); flinkEnv.env().execute("广播状态测试任务"); } @Builder @Data private static class Rule { private String name; private Shape first; private Shape second; } @Builder @Data private static class Item { private String name; private Shape shape; private Color color; } private enum Shape { CIRCLE, SQUARE ; } private enum Color { RED, BLUE, BLACK, ; } private static class UserDefinedSource extends RichParallelSourceFunction implements CheckpointedFunction { private final ListStateDescriptor listStateDescriptor = new ListStateDescriptor("a", Item.class); private volatile boolean isCancel = false; private transient ListState l; @Override public void run(SourceContext ctx) throws Exception { int i = 0; while (!this.isCancel) { ctx.collect( Item.builder() .name("item" + i) .color(Color.RED) .shape(Shape.CIRCLE) .build() ); i++; List items = (List) l.get(); items.add(Item.builder() .name("item") .color(Color.RED) .shape(Shape.CIRCLE) .build()); Thread.sleep(1); } } @Override public void cancel() { this.isCancel = true; } @Override public void snapshotState(FunctionSnapshotContext context) throws Exception { System.out.println(1); } @Override public void initializeState(FunctionInitializationContext context) throws Exception { this.l = context.getOperatorStateStore().getListState(listStateDescriptor); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_03/state/_03_rocksdb/keyed_state/RocksBackendKeyedMapStateTest.java ================================================ package flink.examples.datastream._03.state._03_rocksdb.keyed_state; import java.util.LinkedList; import java.util.List; import org.apache.flink.api.common.state.MapState; import org.apache.flink.api.common.state.MapStateDescriptor; import org.apache.flink.api.common.state.StateTtlConfig; import org.apache.flink.api.common.state.StateTtlConfig.TtlTimeCharacteristic; import org.apache.flink.api.common.time.Time; import org.apache.flink.api.common.typeinfo.BasicTypeInfo; import org.apache.flink.api.java.functions.KeySelector; import org.apache.flink.api.java.typeutils.ListTypeInfo; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.functions.KeyedProcessFunction; import org.apache.flink.streaming.api.functions.source.ParallelSourceFunction; import org.apache.flink.util.Collector; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; import lombok.Builder; import lombok.Data; /** * https://ci.apache.org/projects/flink/flink-docs-release-1.13/docs/dev/datastream/fault-tolerance/broadcast_state/ */ public class RocksBackendKeyedMapStateTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.env().setParallelism(1); flinkEnv.env() .addSource(new ParallelSourceFunction() { private volatile boolean isCancel = false; @Override public void run(SourceContext ctx) throws Exception { int i = 0; while (!this.isCancel) { ctx.collect( Item.builder() .name("item") .color(Color.RED) .shape(Shape.CIRCLE) .build() ); i++; Thread.sleep(1000); } } @Override public void cancel() { this.isCancel = true; } }) .keyBy(new KeySelector() { @Override public Integer getKey(Item item) throws Exception { return item.color.ordinal(); } }) .process(new KeyedProcessFunction() { // store partial matches, i.e. first elements of the pair waiting for their second element // we keep a list as we may have many first elements waiting private final MapStateDescriptor> mapStateDesc = new MapStateDescriptor<>( "a", BasicTypeInfo.STRING_TYPE_INFO, new ListTypeInfo<>(Item.class)); // store partial matches, i.e. first elements of the pair waiting for their second element // we keep a list as we may have many first elements waiting private final MapStateDescriptor> mapStateDescb = new MapStateDescriptor<>( "b", BasicTypeInfo.STRING_TYPE_INFO, new ListTypeInfo<>(Item.class)); @Override public void open(Configuration parameters) throws Exception { super.open(parameters); mapStateDesc.enableTimeToLive(StateTtlConfig .newBuilder(Time.milliseconds(1)) .setUpdateType(StateTtlConfig.UpdateType.OnCreateAndWrite) .setStateVisibility(StateTtlConfig.StateVisibility.NeverReturnExpired) .cleanupInRocksdbCompactFilter(10) .build()); StateTtlConfig // 1.ttl 时长 .newBuilder(Time.milliseconds(1)) // 2.更新类型 .setUpdateType(StateTtlConfig.UpdateType.OnCreateAndWrite) // 创建和写入更新 .updateTtlOnCreateAndWrite() // 读取和写入更新 .updateTtlOnReadAndWrite() // 3.过期状态的访问可见性 .setStateVisibility(StateTtlConfig.StateVisibility.NeverReturnExpired) // 如果还没有被删除就返回 .returnExpiredIfNotCleanedUp() // 过期的永远不返回 .neverReturnExpired() // 4.过期的时间语义 .setTtlTimeCharacteristic(TtlTimeCharacteristic.ProcessingTime) .useProcessingTime() // 5.清除策略 // 从 cp 或 sp 恢复时清除过期状态 .cleanupFullSnapshot() // 增量删除,只有有状态记录访问时,才会做删除;并且他会加大任务处理延迟。 // 增量删除仅仅支持 HeapStateBeckend,Rocksdb 不支持!!! // 每访问 1 此 state,遍历 1000 条进行删除 .cleanupIncrementally(1000, true) // Rocksdb 状态后端在 rocksdb 做 compaction 时清除过期状态。 // 做 compaction 时每隔 3 个 entry,重新更新一下时间戳(用于判断是否过期) .cleanupInRocksdbCompactFilter(3) // 禁用 cleanup .disableCleanupInBackground() .build(); } @Override public void processElement(Item value, Context ctx, Collector out) throws Exception { MapState> mapState = getRuntimeContext().getMapState(mapStateDesc); List l = mapState.get(value.name); Object o = mapState.get("测试"); if (null == l) { l = new LinkedList<>(); } l.add(value); mapState.put(value.name, l); } }) .print(); flinkEnv.env().execute("广播状态测试任务"); } @Builder @Data private static class Rule { private String name; private Shape first; private Shape second; } @Builder @Data private static class Item { private String name; private Shape shape; private Color color; } private enum Shape { CIRCLE, SQUARE ; } private enum Color { RED, BLUE, BLACK, ; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_03/state/_03_rocksdb/keyed_state/RocksBackendKeyedValueStateTest.java ================================================ package flink.examples.datastream._03.state._03_rocksdb.keyed_state; import java.util.LinkedList; import java.util.List; import org.apache.flink.api.common.state.StateTtlConfig; import org.apache.flink.api.common.state.ValueState; import org.apache.flink.api.common.state.ValueStateDescriptor; import org.apache.flink.api.common.time.Time; import org.apache.flink.api.java.functions.KeySelector; import org.apache.flink.api.java.typeutils.ListTypeInfo; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.functions.KeyedProcessFunction; import org.apache.flink.streaming.api.functions.source.ParallelSourceFunction; import org.apache.flink.util.Collector; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; import lombok.Builder; import lombok.Data; /** * https://ci.apache.org/projects/flink/flink-docs-release-1.13/docs/dev/datastream/fault-tolerance/broadcast_state/ */ public class RocksBackendKeyedValueStateTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.env().setParallelism(1); flinkEnv.env() .addSource(new ParallelSourceFunction() { private volatile boolean isCancel = false; @Override public void run(SourceContext ctx) throws Exception { int i = 0; while (!this.isCancel) { ctx.collect( Item.builder() .name("item") .color(Color.RED) .shape(Shape.CIRCLE) .build() ); i++; Thread.sleep(1000); } } @Override public void cancel() { this.isCancel = true; } }) .keyBy(new KeySelector() { @Override public Integer getKey(Item item) throws Exception { return item.color.ordinal(); } }) .process(new KeyedProcessFunction() { // store partial matches, i.e. first elements of the pair waiting for their second element // we keep a list as we may have many first elements waiting private final ValueStateDescriptor> valueStateDesc = new ValueStateDescriptor<>( "items" , new ListTypeInfo<>(Item.class)); @Override public void open(Configuration parameters) throws Exception { super.open(parameters); valueStateDesc.enableTimeToLive(StateTtlConfig .newBuilder(Time.milliseconds(1)) .setUpdateType(StateTtlConfig.UpdateType.OnCreateAndWrite) .setStateVisibility(StateTtlConfig.StateVisibility.NeverReturnExpired) .cleanupInRocksdbCompactFilter(10) .build()); } @Override public void processElement(Item value, Context ctx, Collector out) throws Exception { ValueState> valueState = getRuntimeContext().getState(valueStateDesc); List l = valueState.value(); if (null == l) { l = new LinkedList<>(); } l.add(value); valueState.update(l); } }) .print(); flinkEnv.env().execute("广播状态测试任务"); } @Builder @Data private static class Rule { private String name; private Shape first; private Shape second; } @Builder @Data private static class Item { private String name; private Shape shape; private Color color; } private enum Shape { CIRCLE, SQUARE ; } private enum Color { RED, BLUE, BLACK, ; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_03/state/_03_rocksdb/operator_state/KeyedStreamOperatorListStateTest.java ================================================ package flink.examples.datastream._03.state._03_rocksdb.operator_state; import java.util.List; import org.apache.flink.api.common.state.ListState; import org.apache.flink.api.common.state.ListStateDescriptor; import org.apache.flink.api.java.functions.KeySelector; import org.apache.flink.runtime.state.FunctionInitializationContext; import org.apache.flink.runtime.state.FunctionSnapshotContext; import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction; import org.apache.flink.streaming.api.functions.KeyedProcessFunction; import org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction; import org.apache.flink.util.Collector; import com.google.common.collect.Lists; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; import lombok.Builder; import lombok.Data; /** * https://ci.apache.org/projects/flink/flink-docs-release-1.13/docs/dev/datastream/fault-tolerance/broadcast_state/ */ public class KeyedStreamOperatorListStateTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.env().setParallelism(1); flinkEnv.env() .addSource(new UserDefinedSource()) .keyBy(new KeySelector() { @Override public Integer getKey(Item item) throws Exception { return item.color.ordinal(); } }) .process(new UserDefinedKeyPF()) .print(); flinkEnv.env().execute("广播状态测试任务"); } @Builder @Data private static class Rule { private String name; private Shape first; private Shape second; } @Builder @Data private static class Item { private String name; private Shape shape; private Color color; } private enum Shape { CIRCLE, SQUARE ; } private enum Color { RED, BLUE, BLACK, ; } private static class UserDefinedSource extends RichParallelSourceFunction implements CheckpointedFunction { private final ListStateDescriptor listStateDescriptor = new ListStateDescriptor("a", Item.class); private volatile boolean isCancel = false; private transient ListState l; @Override public void run(SourceContext ctx) throws Exception { int i = 0; while (!this.isCancel) { ctx.collect( Item.builder() .name("item") .color(Color.RED) .shape(Shape.CIRCLE) .build() ); i++; List items = (List) l.get(); items.add(Item.builder() .name("item") .color(Color.RED) .shape(Shape.CIRCLE) .build()); l.update(items); Thread.sleep(1000); } } @Override public void cancel() { this.isCancel = true; } @Override public void snapshotState(FunctionSnapshotContext context) throws Exception { System.out.println(1); } @Override public void initializeState(FunctionInitializationContext context) throws Exception { this.l = context.getOperatorStateStore().getListState(listStateDescriptor); } } private static class UserDefinedKeyPF extends KeyedProcessFunction implements CheckpointedFunction { private final ListStateDescriptor listStateDescriptor = new ListStateDescriptor("b", Item.class); private ListState listState; @Override public void processElement(Item value, Context ctx, Collector out) throws Exception { this.listState.update(Lists.newArrayList(value)); } @Override public void snapshotState(FunctionSnapshotContext context) throws Exception { System.out.println(1); } @Override public void initializeState(FunctionInitializationContext context) throws Exception { this.listState = context.getKeyedStateStore().getListState(listStateDescriptor); this.listState = context.getOperatorStateStore().getListState(listStateDescriptor); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_03/state/_03_rocksdb/operator_state/RocksBackendOperatorListStateTest.java ================================================ package flink.examples.datastream._03.state._03_rocksdb.operator_state; import java.util.List; import org.apache.flink.api.common.state.ListState; import org.apache.flink.api.common.state.ListStateDescriptor; import org.apache.flink.api.java.functions.KeySelector; import org.apache.flink.runtime.state.FunctionInitializationContext; import org.apache.flink.runtime.state.FunctionSnapshotContext; import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction; import org.apache.flink.streaming.api.functions.KeyedProcessFunction; import org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction; import org.apache.flink.util.Collector; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; import lombok.Builder; import lombok.Data; /** * https://ci.apache.org/projects/flink/flink-docs-release-1.13/docs/dev/datastream/fault-tolerance/broadcast_state/ */ public class RocksBackendOperatorListStateTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.env().setParallelism(1); flinkEnv.env() .addSource(new UserDefinedSource()) .keyBy(new KeySelector() { @Override public Integer getKey(Item item) throws Exception { return item.color.ordinal(); } }) .process(new KeyedProcessFunction() { @Override public void processElement(Item value, Context ctx, Collector out) throws Exception { } }) .print(); flinkEnv.env().execute("广播状态测试任务"); } @Builder @Data private static class Rule { private String name; private Shape first; private Shape second; } @Builder @Data private static class Item { private String name; private Shape shape; private Color color; } private enum Shape { CIRCLE, SQUARE ; } private enum Color { RED, BLUE, BLACK, ; } private static class UserDefinedSource extends RichParallelSourceFunction implements CheckpointedFunction { private final ListStateDescriptor listStateDescriptor = new ListStateDescriptor("a", Item.class); private volatile boolean isCancel = false; private transient ListState l; @Override public void run(SourceContext ctx) throws Exception { int i = 0; while (!this.isCancel) { ctx.collect( Item.builder() .name("item") .color(Color.RED) .shape(Shape.CIRCLE) .build() ); i++; List items = (List) l.get(); items.add(Item.builder() .name("item") .color(Color.RED) .shape(Shape.CIRCLE) .build()); l.update(items); Thread.sleep(1000); } } @Override public void cancel() { this.isCancel = true; } @Override public void snapshotState(FunctionSnapshotContext context) throws Exception { System.out.println(1); } @Override public void initializeState(FunctionInitializationContext context) throws Exception { this.l = context.getOperatorStateStore().getListState(listStateDescriptor); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_03/state/_04_filesystem/keyed_state/FsStateBackendKeyedMapStateTest.java ================================================ package flink.examples.datastream._03.state._04_filesystem.keyed_state; import java.util.LinkedList; import java.util.List; import org.apache.flink.api.common.state.MapState; import org.apache.flink.api.common.state.MapStateDescriptor; import org.apache.flink.api.common.typeinfo.BasicTypeInfo; import org.apache.flink.api.java.functions.KeySelector; import org.apache.flink.api.java.typeutils.ListTypeInfo; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.functions.KeyedProcessFunction; import org.apache.flink.streaming.api.functions.source.ParallelSourceFunction; import org.apache.flink.util.Collector; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; import lombok.Builder; import lombok.Data; /** * https://ci.apache.org/projects/flink/flink-docs-release-1.13/docs/dev/datastream/fault-tolerance/broadcast_state/ */ public class FsStateBackendKeyedMapStateTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[] {"--state.backend", "filesystem"}); flinkEnv.env().setParallelism(1); flinkEnv.env() .addSource(new ParallelSourceFunction() { private volatile boolean isCancel = false; @Override public void run(SourceContext ctx) throws Exception { int i = 0; while (!this.isCancel) { ctx.collect( Item.builder() .name("item") .color(Color.RED) .shape(Shape.CIRCLE) .build() ); i++; Thread.sleep(1000); } } @Override public void cancel() { this.isCancel = true; } }) .keyBy(new KeySelector() { @Override public Integer getKey(Item item) throws Exception { return 0; } }) .process(new KeyedProcessFunction() { // store partial matches, i.e. first elements of the pair waiting for their second element // we keep a list as we may have many first elements waiting private final MapStateDescriptor> mapStateDesc = new MapStateDescriptor<>( "items", BasicTypeInfo.STRING_TYPE_INFO, new ListTypeInfo<>(Item.class)); @Override public void open(Configuration parameters) throws Exception { super.open(parameters); // mapStateDesc.enableTimeToLive(StateTtlConfig // .newBuilder(Time.hours(1)) // .setUpdateType(StateTtlConfig.UpdateType.OnCreateAndWrite) // .setStateVisibility(StateTtlConfig.StateVisibility.NeverReturnExpired) // .cleanupInRocksdbCompactFilter(10) // .build()); } @Override public void processElement(Item value, Context ctx, Collector out) throws Exception { MapState> mapState = getRuntimeContext().getMapState(mapStateDesc); List l = mapState.get(value.name); Object o = mapState.get("测试"); if (null == l) { l = new LinkedList<>(); } l.add(value); mapState.put(value.name, l); } }) .print(); flinkEnv.env().execute("广播状态测试任务"); } @Builder @Data private static class Rule { private String name; private Shape first; private Shape second; } @Builder @Data private static class Item { private String name; private Shape shape; private Color color; } private enum Shape { CIRCLE, SQUARE ; } private enum Color { RED, BLUE, BLACK, ; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_03/state/_04_filesystem/operator_state/FsStateBackendOperatorListStateTest.java ================================================ package flink.examples.datastream._03.state._04_filesystem.operator_state; import java.util.List; import org.apache.flink.api.common.state.ListState; import org.apache.flink.api.common.state.ListStateDescriptor; import org.apache.flink.api.java.functions.KeySelector; import org.apache.flink.runtime.state.FunctionInitializationContext; import org.apache.flink.runtime.state.FunctionSnapshotContext; import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction; import org.apache.flink.streaming.api.functions.KeyedProcessFunction; import org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction; import org.apache.flink.util.Collector; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; import lombok.Builder; import lombok.Data; /** * https://ci.apache.org/projects/flink/flink-docs-release-1.13/docs/dev/datastream/fault-tolerance/broadcast_state/ */ public class FsStateBackendOperatorListStateTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[] {"--state.backend", "filesystem"}); flinkEnv.env().setParallelism(1); flinkEnv.env() .addSource(new UserDefinedSource()) .keyBy(new KeySelector() { @Override public Integer getKey(Item item) throws Exception { return item.color.ordinal(); } }) .process(new KeyedProcessFunction() { @Override public void processElement(Item value, Context ctx, Collector out) throws Exception { } }) .print(); flinkEnv.env().execute("广播状态测试任务"); } @Builder @Data private static class Rule { private String name; private Shape first; private Shape second; } @Builder @Data private static class Item { private String name; private Shape shape; private Color color; } private enum Shape { CIRCLE, SQUARE; } private enum Color { RED, BLUE, BLACK, ; } private static class UserDefinedSource extends RichParallelSourceFunction implements CheckpointedFunction { private final ListStateDescriptor listStateDescriptor = new ListStateDescriptor("a", Item.class); private volatile boolean isCancel = false; private transient ListState l; @Override public void run(SourceContext ctx) throws Exception { int i = 0; while (!this.isCancel) { ctx.collect( Item.builder() .name("item") .color(Color.RED) .shape(Shape.CIRCLE) .build() ); i++; List items = (List) l.get(); items.add(Item.builder() .name("item") .color(Color.RED) .shape(Shape.CIRCLE) .build()); l.update(items); Thread.sleep(1000); } } @Override public void cancel() { this.isCancel = true; } @Override public void snapshotState(FunctionSnapshotContext context) throws Exception { System.out.println(1); } @Override public void initializeState(FunctionInitializationContext context) throws Exception { this.l = context.getOperatorStateStore().getListState(listStateDescriptor); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_03/state/_05_memory/keyed_state/MemoryStateBackendKeyedMapStateTest.java ================================================ package flink.examples.datastream._03.state._05_memory.keyed_state; import java.util.LinkedList; import java.util.List; import org.apache.flink.api.common.state.MapState; import org.apache.flink.api.common.state.MapStateDescriptor; import org.apache.flink.api.common.state.StateTtlConfig; import org.apache.flink.api.common.time.Time; import org.apache.flink.api.common.typeinfo.BasicTypeInfo; import org.apache.flink.api.java.functions.KeySelector; import org.apache.flink.api.java.typeutils.ListTypeInfo; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.functions.KeyedProcessFunction; import org.apache.flink.streaming.api.functions.source.ParallelSourceFunction; import org.apache.flink.util.Collector; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; import lombok.Builder; import lombok.Data; /** * https://ci.apache.org/projects/flink/flink-docs-release-1.13/docs/dev/datastream/fault-tolerance/broadcast_state/ */ public class MemoryStateBackendKeyedMapStateTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[] {"--state.backend", "jobmanager"}); flinkEnv.env().setParallelism(1); flinkEnv.env() .addSource(new ParallelSourceFunction() { private volatile boolean isCancel = false; @Override public void run(SourceContext ctx) throws Exception { int i = 0; while (!this.isCancel) { ctx.collect( Item.builder() .name("item") .color(Color.RED) .shape(Shape.CIRCLE) .build() ); i++; Thread.sleep(1000); } } @Override public void cancel() { this.isCancel = true; } }) .keyBy(new KeySelector() { @Override public Integer getKey(Item item) throws Exception { return item.color.ordinal(); } }) .process(new KeyedProcessFunction() { // store partial matches, i.e. first elements of the pair waiting for their second element // we keep a list as we may have many first elements waiting private final MapStateDescriptor> mapStateDesc = new MapStateDescriptor<>( "items", BasicTypeInfo.STRING_TYPE_INFO, new ListTypeInfo<>(Item.class)); @Override public void open(Configuration parameters) throws Exception { super.open(parameters); mapStateDesc.enableTimeToLive(StateTtlConfig .newBuilder(Time.milliseconds(1)) .setUpdateType(StateTtlConfig.UpdateType.OnCreateAndWrite) .setStateVisibility(StateTtlConfig.StateVisibility.NeverReturnExpired) .cleanupInRocksdbCompactFilter(10) .build()); } @Override public void processElement(Item value, Context ctx, Collector out) throws Exception { MapState> mapState = getRuntimeContext().getMapState(mapStateDesc); List l = mapState.get(value.name); Object o = mapState.get("测试"); if (null == l) { l = new LinkedList<>(); } l.add(value); mapState.put(value.name, l); } }) .print(); flinkEnv.env().execute("广播状态测试任务"); } @Builder @Data private static class Rule { private String name; private Shape first; private Shape second; } @Builder @Data private static class Item { private String name; private Shape shape; private Color color; } private enum Shape { CIRCLE, SQUARE ; } private enum Color { RED, BLUE, BLACK, ; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_04/keyed_co_process/HashMapTest.java ================================================ package flink.examples.datastream._04.keyed_co_process; import java.util.HashMap; import java.util.Map.Entry; public class HashMapTest { public static void main(String[] args) { HashMap hashMap = new HashMap<>(); hashMap.put("1", "2"); hashMap.put("2", "2"); hashMap.put("3", "2"); hashMap.put("4", "2"); hashMap.put("5", "2"); for (Entry e : hashMap.entrySet()) { hashMap.remove(e.getKey()); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_04/keyed_co_process/_04_KeyedCoProcessFunctionTest.java ================================================ package flink.examples.datastream._04.keyed_co_process; import java.util.Map.Entry; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.common.state.MapState; import org.apache.flink.api.common.state.MapStateDescriptor; import org.apache.flink.api.common.state.StateTtlConfig; import org.apache.flink.api.common.state.StateTtlConfig.StateVisibility; import org.apache.flink.api.common.state.StateTtlConfig.UpdateType; import org.apache.flink.api.common.time.Time; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.functions.KeySelector; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.datastream.KeyedStream; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.api.functions.co.KeyedCoProcessFunction; import org.apache.flink.streaming.api.functions.sink.SinkFunction; import org.apache.flink.streaming.api.functions.source.RichSourceFunction; import org.apache.flink.util.Collector; import com.twitter.chill.protobuf.ProtobufSerializer; import flink.examples.JacksonUtils; import flink.examples.datastream._04.keyed_co_process.protobuf.Source; import flink.examples.sql._05.format.formats.protobuf.Test; public class _04_KeyedCoProcessFunctionTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(10); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig() .enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); env.registerTypeWithKryoSerializer(Source.class, ProtobufSerializer.class); env.registerTypeWithKryoSerializer(Test.class, ProtobufSerializer.class); KeyedStream source1 = env .addSource(new UserDefineSource1()) .uid("source1") .keyBy(new KeySelector() { @Override public Integer getKey(Source value) throws Exception { return value.getName().hashCode() % 1024; } }); KeyedStream source2 = env .addSource(new UserDefineSource2()) .uid("source2") .keyBy(new KeySelector() { @Override public Integer getKey(Source value) throws Exception { return value.getName().hashCode() % 1024; } }); source1.connect(source2) .process(new KeyedCoProcessFunction() { private transient MapState source1State; private transient MapState source2State; private StateTtlConfig getStateTtlConfig() { return StateTtlConfig .newBuilder(Time.hours(1)) .setUpdateType(UpdateType.OnCreateAndWrite) .setStateVisibility(StateVisibility.NeverReturnExpired) .cleanupIncrementally(3, true) .build(); } @Override public void open(Configuration parameters) throws Exception { super.open(parameters); MapStateDescriptor source1StateDescriptor = new MapStateDescriptor( "source1State" , TypeInformation.of(String.class) , TypeInformation.of(Source.class)); source1StateDescriptor .enableTimeToLive(getStateTtlConfig()); this.source1State = getRuntimeContext().getMapState(source1StateDescriptor); MapStateDescriptor source2StateDescriptor = new MapStateDescriptor( "source2State" , TypeInformation.of(String.class) , TypeInformation.of(Source.class)); source2StateDescriptor .enableTimeToLive(getStateTtlConfig()); this.source2State = getRuntimeContext().getMapState(source2StateDescriptor); } @Override public void processElement1(Source value, Context ctx, Collector out) throws Exception { ctx.timerService().registerProcessingTimeTimer(System.currentTimeMillis() + 10000); this.source1State.put(value.getName(), value); } @Override public void processElement2(Source value, Context ctx, Collector out) throws Exception { ctx.timerService().registerProcessingTimeTimer(System.currentTimeMillis() + 10000); this.source2State.put(value.getName(), value); } @Override public void onTimer(long timestamp, OnTimerContext ctx, Collector out) throws Exception { for (Entry e : this.source1State.entries()) { this.source1State.remove(e.getKey()); out.collect(Test .newBuilder() .setName(e.getValue().getName()) .build()); } for (Entry e : this.source2State.entries()) { this.source1State.remove(e.getKey()); out.collect(Test .newBuilder() .setName(e.getValue().getName()) .build()); } // this.source1State.iterator() // .forEachRemaining(a -> { // out.collect(Test // .newBuilder() // .setName(a.getValue().getName()) // .build() // ); // try { // this.source1State.remove(a.getKey()); // } catch (Exception e) { // e.printStackTrace(); // } // }); // // this.source2State.iterator() // .forEachRemaining(a -> { // out.collect(Test // .newBuilder() // .setName(a.getValue().getName()) // .build() // ); // try { // this.source2State.remove(a.getKey()); // } catch (Exception e) { // e.printStackTrace(); // } // }); } }) .uid("process") .disableChaining() .addSink(new SinkFunction() { @Override public void invoke(Test value, Context context) throws Exception { System.out.println(JacksonUtils.bean2Json(value)); } }) .uid("sink"); env.execute("KeyedCoProcessFunction 测试"); } private static class UserDefineSource1 extends RichSourceFunction { private volatile boolean isCancel = false; @Override public void run(SourceContext ctx) throws Exception { int i = 0; while (!this.isCancel) { ctx.collect( Source.newBuilder() .setName("antigenral-from-source-" + i) .build() ); i++; if (i == 20) { i = 0; } Thread.sleep(100); } } @Override public void cancel() { this.isCancel = true; } } private static class UserDefineSource2 extends RichSourceFunction { private volatile boolean isCancel = false; @Override public void run(SourceContext ctx) throws Exception { int i = 0; while (!this.isCancel) { ctx.collect( Source.getDefaultInstance() ); i++; if (i == 20) { i = 0; } Thread.sleep(100); } } @Override public void cancel() { this.isCancel = true; } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_05_ken/_01_watermark/WatermarkTest.java ================================================ package flink.examples.datastream._05_ken._01_watermark; import java.util.HashSet; import java.util.Set; import java.util.function.Consumer; import org.apache.flink.api.common.functions.FilterFunction; import org.apache.flink.api.java.functions.KeySelector; import org.apache.flink.streaming.api.functions.source.SourceFunction; import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor; import org.apache.flink.streaming.api.functions.windowing.ProcessWindowFunction; import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows; import org.apache.flink.streaming.api.windowing.time.Time; import org.apache.flink.streaming.api.windowing.windows.TimeWindow; import org.apache.flink.util.Collector; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; import lombok.Builder; import lombok.Data; public class WatermarkTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.env().setParallelism(8); flinkEnv.env() .addSource(new SourceFunction() { private volatile boolean isCancel = false; @Override public void run(SourceContext ctx) throws Exception { while (!isCancel) { // xxx 日志上报逻辑 ctx.collect( SourceModel .builder() .page("Shopping-Cart") .build() ); } } @Override public void cancel() { this.isCancel = true; } }) .filter(new FilterFunction() { @Override public boolean filter(SourceModel value) throws Exception { return value.getPage().equals("Shopping-Cart"); } }) .assignTimestampsAndWatermarks(new BoundedOutOfOrdernessTimestampExtractor(Time.minutes(1)) { @Override public long extractTimestamp(SourceModel element) { return element.getTime(); } }) .keyBy(new KeySelector() { @Override public Long getKey(SourceModel value) throws Exception { return 0L; } }) .window(TumblingEventTimeWindows.of(Time.minutes(1))) .process(new ProcessWindowFunction() { @Override public void process(Long aLong, Context context, Iterable elements, Collector out) throws Exception { long windowStart = context.window().getStart(); Set s = new HashSet<>(); elements.forEach(new Consumer() { @Override public void accept(SourceModel sourceModel) { s.add(sourceModel.userId); } }); out.collect( SinkModel .builder() .uv(s.size()) .time(windowStart) .build() ); } }) .print(); flinkEnv.env().execute(); } @Data @Builder private static class SourceModel { private long userId; private String page; private long time; } @Data @Builder private static class SinkModel { private long uv; private long time; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_06_test/_01_event_proctime/OneJobWIthProcAndEventTimeWIndowTest.java ================================================ package flink.examples.datastream._06_test._01_event_proctime; import java.util.HashSet; import java.util.Set; import java.util.function.Consumer; import org.apache.flink.api.java.functions.KeySelector; import org.apache.flink.streaming.api.functions.source.SourceFunction; import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor; import org.apache.flink.streaming.api.functions.windowing.ProcessWindowFunction; import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows; import org.apache.flink.streaming.api.windowing.assigners.TumblingProcessingTimeWindows; import org.apache.flink.streaming.api.windowing.time.Time; import org.apache.flink.streaming.api.windowing.windows.TimeWindow; import org.apache.flink.util.Collector; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; import lombok.Builder; import lombok.Data; public class OneJobWIthProcAndEventTimeWIndowTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.env().setParallelism(1); flinkEnv.env() .addSource(new SourceFunction() { private volatile boolean isCancel = false; @Override public void run(SourceContext ctx) throws Exception { while (!isCancel) { // xxx 日志上报逻辑 ctx.collect( SourceModel .builder() .page("Shopping-Cart") .userId(1) .time(System.currentTimeMillis()) .build() ); Thread.sleep(100); } } @Override public void cancel() { this.isCancel = true; } }) .assignTimestampsAndWatermarks(new BoundedOutOfOrdernessTimestampExtractor(Time.seconds(1)) { @Override public long extractTimestamp(SourceModel element) { return element.getTime(); } }) .keyBy(new KeySelector() { @Override public Long getKey(SourceModel value) throws Exception { return 0L; } }) .window(TumblingEventTimeWindows.of(Time.seconds(10))) .process(new ProcessWindowFunction() { @Override public void process(Long aLong, Context context, Iterable elements, Collector out) throws Exception { long windowStart = context.window().getStart(); Set s = new HashSet<>(); elements.forEach(new Consumer() { @Override public void accept(SourceModel sourceModel) { s.add(sourceModel.userId); } }); out.collect( MiddleModel .builder() .uv(s.size()) .time(windowStart) .build() ); } }) .keyBy(new KeySelector() { @Override public Integer getKey(MiddleModel value) throws Exception { return 0; } }) .window(TumblingProcessingTimeWindows.of(Time.seconds(10))) .process(new ProcessWindowFunction() { @Override public void process(Integer integer, Context context, Iterable elements, Collector out) throws Exception { System.out.println(1); } }) .print(); flinkEnv.env().execute(); } @Data @Builder private static class SourceModel { private long userId; private String page; private long time; } @Data @Builder private static class MiddleModel { private long uv; private long time; } @Data @Builder private static class SinkModel { private long uv; private long time; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_06_test/_01_event_proctime/OneJobWIthTimerTest.java ================================================ package flink.examples.datastream._06_test._01_event_proctime; import org.apache.flink.api.java.functions.KeySelector; import org.apache.flink.streaming.api.functions.KeyedProcessFunction; import org.apache.flink.streaming.api.functions.source.SourceFunction; import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor; import org.apache.flink.streaming.api.windowing.time.Time; import org.apache.flink.util.Collector; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; import lombok.Builder; import lombok.Data; public class OneJobWIthTimerTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.env().setParallelism(1); flinkEnv.env() .addSource(new SourceFunction() { private volatile boolean isCancel = false; @Override public void run(SourceContext ctx) throws Exception { while (!isCancel) { // xxx 日志上报逻辑 ctx.collect( SourceModel .builder() .page("Shopping-Cart") .userId(1) .time(System.currentTimeMillis()) .build() ); Thread.sleep(1000); } } @Override public void cancel() { this.isCancel = true; } }) .assignTimestampsAndWatermarks(new BoundedOutOfOrdernessTimestampExtractor(Time.seconds(1)) { @Override public long extractTimestamp(SourceModel element) { return element.getTime(); } }) .keyBy(new KeySelector() { @Override public Long getKey(SourceModel value) throws Exception { return 0L; } }) .process(new KeyedProcessFunction() { private int i = 0; @Override public void processElement(SourceModel value, Context ctx, Collector out) throws Exception { if (i == 0) { i++; System.out.println(1); ctx.timerService().registerEventTimeTimer(value.time + 1000); ctx.timerService().registerProcessingTimeTimer(value.time + 5000); } else { System.out.println(2); } } @Override public void onTimer(long timestamp, OnTimerContext ctx, Collector out) throws Exception { System.out.println(ctx.timeDomain()); } }) .print(); flinkEnv.env().execute(); } @Data @Builder private static class SourceModel { private long userId; private String page; private long time; } @Data @Builder private static class MiddleModel { private long uv; private long time; } @Data @Builder private static class SinkModel { private long uv; private long time; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_07_lambda_error/LambdaErrorTest.java ================================================ package flink.examples.datastream._07_lambda_error; import org.apache.flink.streaming.api.functions.source.SourceFunction; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; import lombok.Builder; import lombok.Data; public class LambdaErrorTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.env().setParallelism(1); flinkEnv.env() .addSource(new SourceFunction() { private volatile boolean isCancel = false; private SinkModel s; @Override public void run(SourceContext ctx) throws Exception { while (!isCancel) { // xxx 日志上报逻辑 ctx.collect( SourceModel .builder() .page("Shopping-Cart") .userId(1) .time(System.currentTimeMillis()) .build() ); Thread.sleep(100); } } @Override public void cancel() { this.isCancel = true; } }) .print(); flinkEnv.env().execute(); } @Data @Builder private static class SourceModel { private long userId; private String page; private long time; } @Data @Builder private static class MiddleModel { private long uv; private long time; } @Data @Builder private static class SinkModel { private long uv; private long time; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_08_late_record/LatenessTest.java ================================================ package flink.examples.datastream._08_late_record; import org.apache.flink.api.common.functions.FlatMapFunction; import org.apache.flink.api.java.functions.KeySelector; import org.apache.flink.streaming.api.functions.source.SourceFunction; import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor; import org.apache.flink.streaming.api.functions.windowing.ProcessWindowFunction; import org.apache.flink.streaming.api.windowing.time.Time; import org.apache.flink.streaming.api.windowing.windows.TimeWindow; import org.apache.flink.util.Collector; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; import lombok.Builder; import lombok.Data; public class LatenessTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.env().setParallelism(1); flinkEnv.env() .addSource(new SourceFunction() { private volatile boolean isCancel = false; private SinkModel s; @Override public void run(SourceContext ctx) throws Exception { while (!isCancel) { // xxx 日志上报逻辑 ctx.collect( SourceModel .builder() .page("Shopping-Cart") .userId(1) .time(System.currentTimeMillis()) .build() ); Thread.sleep(100); } } @Override public void cancel() { this.isCancel = true; } }) .assignTimestampsAndWatermarks(new BoundedOutOfOrdernessTimestampExtractor(Time.minutes(1)) { @Override public long extractTimestamp(SourceModel element) { return element.getTime(); } }) .flatMap(new FlatMapFunction() { private Collector out1; @Override public void flatMap(SourceModel value, Collector out) throws Exception { for (int i = 0; i < 3; i++) { if (out1 == null) { this.out1 = out; } out.collect( MiddleModel .builder() .uv(1L) .time(System.currentTimeMillis()) .build() ); } } }) .keyBy(new KeySelector() { @Override public Integer getKey(MiddleModel value) throws Exception { return 0; } }) .timeWindow(Time.seconds(10)) .process(new ProcessWindowFunction() { @Override public void process(Integer integer, Context context, Iterable elements, Collector out) throws Exception { System.out.println(1L); } }) .print(); flinkEnv.env().execute(); } @Data @Builder private static class SourceModel { private long userId; private String page; private long time; } @Data @Builder private static class MiddleModel { private long uv; private long time; } @Data @Builder private static class SinkModel { private long uv; private long time; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_09_join/_01_window_join/_01_Window_Join_Test.java ================================================ //package flink.examples.datastream._09_join._01_window_join; // //import org.apache.flink.api.common.functions.FlatJoinFunction; //import org.apache.flink.api.common.functions.JoinFunction; //import org.apache.flink.api.java.functions.KeySelector; //import org.apache.flink.streaming.api.functions.source.SourceFunction; //import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows; //import org.apache.flink.streaming.api.windowing.time.Time; //import org.apache.flink.util.Collector; // //import flink.examples.FlinkEnvUtils; //import flink.examples.FlinkEnvUtils.FlinkEnv; // // //public class _01_Window_Join_Test { // // public static void main(String[] args) throws Exception { // // FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); // // flinkEnv.env().setParallelism(1); // // flinkEnv.env() // .addSource(new SourceFunction() { // @Override // public void run(SourceContext ctx) throws Exception { // // } // // @Override // public void cancel() { // // } // }) // .join(flinkEnv.env().addSource(new SourceFunction() { // @Override // public void run(SourceContext ctx) throws Exception { // // } // // @Override // public void cancel() { // // } // })) // .where(new KeySelector() { // @Override // public Object getKey(Object value) throws Exception { // return null; // } // }) // .equalTo(new KeySelector() { // @Override // public Object getKey(Object value) throws Exception { // return null; // } // }) // .window(TumblingEventTimeWindows.of(Time.seconds(60))) // .apply(new FlatJoinFunction() { // @Override // public void join(Object first, Object second, Collector out) throws Exception { // // } // }) // .apply(new JoinFunction() { // @Override // public Object join(Object first, Object second) throws Exception { // return null; // } // }); // } // //} ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_09_join/_02_connect/_01_Connect_Test.java ================================================ package flink.examples.datastream._09_join._02_connect; import org.apache.flink.api.common.state.MapState; import org.apache.flink.api.common.state.MapStateDescriptor; import org.apache.flink.api.java.functions.KeySelector; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.functions.co.KeyedCoProcessFunction; import org.apache.flink.streaming.api.functions.source.SourceFunction; import org.apache.flink.util.Collector; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class _01_Connect_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.env().setParallelism(1); flinkEnv.env() .addSource(new SourceFunction() { @Override public void run(SourceContext ctx) throws Exception { } @Override public void cancel() { } }) .keyBy(new KeySelector() { @Override public Object getKey(Object value) throws Exception { return null; } }) .connect(flinkEnv.env().addSource(new SourceFunction() { @Override public void run(SourceContext ctx) throws Exception { } @Override public void cancel() { } }).keyBy(new KeySelector() { @Override public Object getKey(Object value) throws Exception { return null; } })) .process(new KeyedCoProcessFunction() { private transient MapState mapState; @Override public void open(Configuration parameters) throws Exception { super.open(parameters); this.mapState = getRuntimeContext().getMapState(new MapStateDescriptor("a", String.class, String.class)); } @Override public void processElement1(Object value, Context ctx, Collector out) throws Exception { } @Override public void processElement2(Object value, Context ctx, Collector out) throws Exception { } }) .print(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/datastream/_10_agg/AggTest.java ================================================ package flink.examples.datastream._10_agg; import org.apache.flink.api.common.functions.AggregateFunction; import org.apache.flink.api.java.functions.KeySelector; import org.apache.flink.streaming.api.functions.source.SourceFunction; import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor; import org.apache.flink.streaming.api.windowing.time.Time; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; import lombok.Builder; import lombok.Data; public class AggTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.env().setParallelism(1); flinkEnv.env() .addSource(new SourceFunction() { private volatile boolean isCancel = false; @Override public void run(SourceContext ctx) throws Exception { while (!isCancel) { // xxx 日志上报逻辑 ctx.collect( SourceModel .builder() .page("Shopping-Cart") .userId(1) .time(System.currentTimeMillis()) .build() ); Thread.sleep(1000); } } @Override public void cancel() { this.isCancel = true; } }) .assignTimestampsAndWatermarks(new BoundedOutOfOrdernessTimestampExtractor(Time.seconds(1)) { @Override public long extractTimestamp(SourceModel element) { return element.getTime(); } }) .keyBy(new KeySelector() { @Override public Long getKey(SourceModel value) throws Exception { return 0L; } }) .timeWindow(Time.seconds(3)) .aggregate(new AggregateFunction() { @Override public SourceModel createAccumulator() { return SourceModel.builder().build(); } @Override public SourceModel add(SourceModel sourceModel, SourceModel sourceModel2) { return sourceModel; } @Override public SourceModel getResult(SourceModel sourceModel) { return sourceModel; } @Override public SourceModel merge(SourceModel sourceModel, SourceModel acc1) { return null; } }) .print(); flinkEnv.env().execute(); } @Data @Builder private static class SourceModel { private long userId; private String page; private long time; } @Data @Builder private static class MiddleModel { private long uv; private long time; } @Data @Builder private static class SinkModel { private long uv; private long time; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/practice/_01/dau/_01_DataStream_Session_Window.java ================================================ package flink.examples.practice._01.dau; import java.util.Arrays; import org.apache.flink.api.java.tuple.Tuple3; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor; import org.apache.flink.streaming.api.windowing.time.Time; import org.apache.flink.table.api.Table; import org.apache.flink.types.Row; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; import flink.examples.sql._01.countdistincterror.udf.Mod_UDF; import flink.examples.sql._01.countdistincterror.udf.StatusMapper_UDF; public class _01_DataStream_Session_Window { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("table.local-time-zone", "GMT+08:00"); DataStream> tuple3DataStream = flinkEnv.env().fromCollection(Arrays.asList( Tuple3.of("2", 1L, 1627254000000L), // 北京时间:2021-07-26 07:00:00 Tuple3.of("2", 1L, 1627218000000L + 5000L), Tuple3.of("2", 101L, 1627218000000L + 6000L), Tuple3.of("2", 201L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 86400000 + 7000L))) .assignTimestampsAndWatermarks( new BoundedOutOfOrdernessTimestampExtractor>(Time.seconds(0L)) { @Override public long extractTimestamp(Tuple3 element) { return element.f2; } }); flinkEnv.streamTEnv().registerFunction("mod", new Mod_UDF()); flinkEnv.streamTEnv().registerFunction("status_mapper", new StatusMapper_UDF()); flinkEnv.streamTEnv().createTemporaryView("source_db.source_table", tuple3DataStream, "status, id, timestamp, rowtime.rowtime"); String sql = "SELECT\n" + " count(1),\n" + " cast(tumble_start(rowtime, INTERVAL '1' DAY) as string)\n" + "FROM\n" + " source_db.source_table\n" + "GROUP BY\n" + " tumble(rowtime, INTERVAL '1' DAY)"; Table result = flinkEnv.streamTEnv().sqlQuery(sql); flinkEnv.streamTEnv().toAppendStream(result, Row.class).print(); flinkEnv.env().execute(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/question/datastream/_01/kryo_protobuf_no_more_bytes_left/KryoProtobufNoMoreBytesLeftTest.java ================================================ package flink.examples.question.datastream._01.kryo_protobuf_no_more_bytes_left; import java.lang.reflect.Method; import com.esotericsoftware.kryo.Kryo; import com.esotericsoftware.kryo.io.Input; import com.esotericsoftware.kryo.io.Output; import com.google.protobuf.Message; import com.sun.tools.javac.util.Assert; import com.twitter.chill.protobuf.ProtobufSerializer; import flink.examples.datastream._04.keyed_co_process.protobuf.Source; public class KryoProtobufNoMoreBytesLeftTest { public static void main(String[] args) throws Exception { Source source = Source .newBuilder() .build(); byte[] bytes = source.toByteArray(); byte[] buffer = new byte[300]; Kryo kryo = newKryo(); Output output = new Output(buffer); // ser ProtobufSerializer protobufSerializer = new ProtobufSerializer(); protobufSerializer.write(kryo, output, source); // deser Input input = new Input(buffer); Class c = (Class) Source.getDefaultInstance().getClass(); Message m = protobufSerializer.read(kryo, input, (Class) c); testGetParse(); } private static void testGetParse() throws Exception { ProtobufSerializerV2 protobufSerializerV2 = new ProtobufSerializerV2(); Method m = protobufSerializerV2.getParse(Source.class); Source s = (Source) m.invoke(null, Source.newBuilder().setName("antigeneral").build().toByteArray()); Assert.check("antigeneral".equals(s.getName())); } private static class ProtobufSerializerV2 extends ProtobufSerializer { @Override public Method getParse(Class cls) throws Exception { return super.getParse(cls); } } private static Kryo newKryo() { Kryo kryo = new Kryo(); kryo.addDefaultSerializer(Source.class, ProtobufSerializerV2.class); return kryo; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/question/sql/_01/lots_source_fields_poor_performance/EmbeddedKafka.java ================================================ //package flink.examples.question.sql._01.lots_source_fields_poor_performance; // //import static net.mguenther.kafka.junit.ObserveKeyValues.on; //import static net.mguenther.kafka.junit.SendValues.to; // //import lombok.SneakyThrows; //import net.mguenther.kafka.junit.EmbeddedKafkaCluster; //import net.mguenther.kafka.junit.EmbeddedKafkaClusterConfig; // //public class EmbeddedKafka { // // public static void main(String[] args) { // EmbeddedKafkaCluster kafkaCluster = // EmbeddedKafkaCluster.provisionWith(EmbeddedKafkaClusterConfig.defaultClusterConfig()); // kafkaCluster.start(); // // new Thread(new Runnable() { // @SneakyThrows // @Override // public void run() { // while (true) { // kafkaCluster.send(to("test-topic", "a", "b", "c")); // Thread.sleep(1000); // } // } // }).start(); // // // new Thread(new Runnable() { // @SneakyThrows // @Override // public void run() { // while (true) { // kafkaCluster.observe(on("test-topic", 3)) // .forEach(a -> System.out.println(a.getValue())); // } // } // }).start(); // // // } // //} ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/question/sql/_01/lots_source_fields_poor_performance/_01_DataGenSourceTest.java ================================================ package flink.examples.question.sql._01.lots_source_fields_poor_performance; import java.util.Arrays; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; public class _01_DataGenSourceTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(10); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); tEnv.getConfig().getConfiguration().setString("pipeline.name", "1.13.5 TUMBLE WINDOW 案例"); tEnv.getConfig().getConfiguration().setString("state.backend", "rocksdb"); String originalSql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " name STRING,\n" + " server_timestamp BIGINT\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.name.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '100000',\n" + " 'fields.server_timestamp.min' = '1',\n" + " 'fields.server_timestamp.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " name STRING,\n" + " rn BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "select user_id,\n" + " name,\n" + " rn\n" + "from (\n" + " SELECT\n" + " user_id,\n" + " name,\n" + " row_number() over(partition by user_id order by server_timestamp) as rn\n" + " FROM source_table\n" + ")\n" + "where rn = 1"; String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " name STRING,\n" + " user_id1 BIGINT,\n" + " name1 STRING,\n" + " user_id2 BIGINT,\n" + " name2 STRING,\n" + " user_id3 BIGINT,\n" + " name3 STRING,\n" + " user_id4 BIGINT,\n" + " name4 STRING,\n" + " user_id5 BIGINT,\n" + " name5 STRING,\n" + " user_id6 BIGINT,\n" + " name6 STRING,\n" + " user_id7 BIGINT,\n" + " name7 STRING,\n" + " user_id8 BIGINT,\n" + " name8 STRING,\n" + " user_id9 BIGINT,\n" + " name9 STRING,\n" + " server_timestamp BIGINT\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.name.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '100000',\n" + " 'fields.server_timestamp.min' = '1',\n" + " 'fields.server_timestamp.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " name STRING,\n" + " rn BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "select user_id,\n" + " name,\n" + " rn\n" + "from (\n" + " SELECT\n" + " user_id,\n" + " name,\n" + " row_number() over(partition by user_id order by server_timestamp) as rn\n" + " FROM source_table\n" + ")\n" + "where rn = 1"; Arrays.stream(sql.split(";")) .forEach(tEnv::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/question/sql/_01/lots_source_fields_poor_performance/_01_JsonSourceTest.java ================================================ package flink.examples.question.sql._01.lots_source_fields_poor_performance; import java.util.Arrays; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.common.serialization.DeserializationSchema; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.api.functions.source.RichSourceFunction; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; import org.apache.flink.table.data.RowData; import com.google.common.collect.ImmutableMap; import flink.examples.JacksonUtils; public class _01_JsonSourceTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(10); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); tEnv.getConfig().getConfiguration().setString("pipeline.name", "1.13.5 TUMBLE WINDOW 案例"); tEnv.getConfig().getConfiguration().setString("state.backend", "rocksdb"); String originalSql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " name STRING,\n" + " server_timestamp BIGINT\n" + ") WITH (\n" + " 'connector' = 'user_defined',\n" + " 'class.name' = 'flink.examples.question.sql._01.lots_source_fields_poor_performance._01_JsonSourceTest$UserDefineSource1',\n" + " 'format' = 'json'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " name STRING,\n" + " rn BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "select user_id,\n" + " name,\n" + " rn\n" + "from (\n" + " SELECT\n" + " user_id,\n" + " name,\n" + " row_number() over(partition by user_id order by server_timestamp) as rn\n" + " FROM source_table\n" + ")\n" + "where rn = 1"; String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " name STRING,\n" + " user_id1 BIGINT,\n" + " name1 STRING,\n" + " user_id2 BIGINT,\n" + " name2 STRING,\n" + " user_id3 BIGINT,\n" + " name3 STRING,\n" + " user_id4 BIGINT,\n" + " name4 STRING,\n" + " user_id5 BIGINT,\n" + " name5 STRING,\n" + " user_id6 BIGINT,\n" + " name6 STRING,\n" + " user_id7 BIGINT,\n" + " name7 STRING,\n" + " user_id8 BIGINT,\n" + " name8 STRING,\n" + " user_id9 BIGINT,\n" + " name9 STRING,\n" + " server_timestamp BIGINT\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.name.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '100000',\n" + " 'fields.server_timestamp.min' = '1',\n" + " 'fields.server_timestamp.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " name STRING,\n" + " rn BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "select user_id,\n" + " name,\n" + " rn\n" + "from (\n" + " SELECT\n" + " user_id,\n" + " name,\n" + " row_number() over(partition by user_id order by server_timestamp) as rn\n" + " FROM source_table\n" + ")\n" + "where rn = 1"; Arrays.stream(originalSql.split(";")) .forEach(tEnv::executeSql); } public static class UserDefineSource1 extends RichSourceFunction { private DeserializationSchema dser; private volatile boolean isCancel; public UserDefineSource1(DeserializationSchema dser) { this.dser = dser; } @Override public void run(SourceContext ctx) throws Exception { while (!this.isCancel) { ctx.collect(this.dser.deserialize( JacksonUtils.bean2Json(ImmutableMap.of("user_id", 1111L , "name", "antigeneral" , "server_timestamp", System.currentTimeMillis()) ).getBytes() )); Thread.sleep(1000); } } @Override public void cancel() { this.isCancel = true; } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/runtime/_01/future/CompletableFutureTest.java ================================================ package flink.examples.runtime._01.future; import java.util.concurrent.CompletableFuture; public class CompletableFutureTest { public static void main(String[] args) throws Exception { // 创建异步执行任务: CompletableFuture cf = CompletableFuture.supplyAsync(CompletableFutureTest::fetchPrice); // 如果执行成功: cf.thenAccept((result) -> { System.out.println("price: " + result); }); // 如果执行异常: cf.exceptionally((e) -> { e.printStackTrace(); return null; }); // 主线程不要立刻结束,否则CompletableFuture默认使用的线程池会立刻关闭: Thread.sleep(200); } static Double fetchPrice() { try { Thread.sleep(100); } catch (InterruptedException e) { } if (false) { throw new RuntimeException("fetch price failed!"); } return 5 + Math.random() * 20; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/runtime/_01/future/CompletableFutureTest4.java ================================================ package flink.examples.runtime._01.future; import java.util.concurrent.CompletableFuture; public class CompletableFutureTest4 { public static void main(String[] args) throws Exception { // 第一个任务: CompletableFuture cfQuery = CompletableFuture.supplyAsync(() -> { return queryCode("中国石油"); }); // cfQuery成功后继续执行下一个任务: CompletableFuture cfFetch = cfQuery.thenApplyAsync((code) -> { return fetchPrice(code); }); // cfFetch成功后打印结果: cfFetch.thenAccept((result) -> { System.out.println("price: " + result); }); // 主线程不要立刻结束,否则CompletableFuture默认使用的线程池会立刻关闭: Thread.sleep(2000); } static String queryCode(String name) { try { Thread.sleep(100); } catch (InterruptedException e) { } return name; } static String fetchPrice(String code) { try { Thread.sleep(100); } catch (InterruptedException e) { } return code + ":" + 5 + Math.random() * 20; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/runtime/_01/future/CompletableFuture_AnyOf_Test3.java ================================================ package flink.examples.runtime._01.future; import java.util.concurrent.CompletableFuture; public class CompletableFuture_AnyOf_Test3 { public static void main(String[] args) throws Exception { // 两个CompletableFuture执行异步查询: CompletableFuture cfQueryFromSina = CompletableFuture.supplyAsync(() -> { return queryCode("中国石油", "https://finance.sina.com.cn/code/"); }); CompletableFuture cfQueryFrom163 = CompletableFuture.supplyAsync(() -> { return queryCode("中国石油", "https://money.163.com/code/"); }); // 用anyOf合并为一个新的CompletableFuture: CompletableFuture cfQuery = CompletableFuture.anyOf(cfQueryFromSina, cfQueryFrom163); // 两个CompletableFuture执行异步查询: CompletableFuture cfFetchFromSina = cfQuery.thenApplyAsync((code) -> { return fetchPrice((String) code, "https://finance.sina.com.cn/price/"); }); CompletableFuture cfFetchFrom163 = cfQuery.thenApplyAsync((code) -> { return fetchPrice((String) code, "https://money.163.com/price/"); }); // 用anyOf合并为一个新的CompletableFuture: CompletableFuture cfFetch = CompletableFuture.anyOf(cfFetchFromSina, cfFetchFrom163); // 最终结果: cfFetch.thenAccept((result) -> { System.out.println("price: " + result); }); // 主线程不要立刻结束,否则CompletableFuture默认使用的线程池会立刻关闭: Thread.sleep(200); } static String queryCode(String name, String url) { System.out.println("query code from " + url + "..."); try { Thread.sleep((long) (Math.random() * 100)); } catch (InterruptedException e) { } return "601857"; } static Double fetchPrice(String code, String url) { System.out.println("query price from " + url + "..."); try { Thread.sleep((long) (Math.random() * 100)); } catch (InterruptedException e) { } return 5 + Math.random() * 20; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/runtime/_01/future/CompletableFuture_ThenApplyAsync_Test2.java ================================================ package flink.examples.runtime._01.future; import java.util.concurrent.CompletableFuture; public class CompletableFuture_ThenApplyAsync_Test2 { public static void main(String[] args) throws Exception { // 第一个任务: CompletableFuture cfQuery = CompletableFuture.supplyAsync(() -> { return queryCode("中国石油"); }); // cfQuery成功后继续执行下一个任务: CompletableFuture cfFetch = cfQuery.thenApplyAsync((code) -> { return fetchPrice(code); }); // cfFetch成功后打印结果: cfFetch.thenAccept((result) -> { System.out.println("price: " + result); }); // 主线程不要立刻结束,否则CompletableFuture默认使用的线程池会立刻关闭: Thread.sleep(2000); } static String queryCode(String name) { try { Thread.sleep(100); } catch (InterruptedException e) { } return name; } static String fetchPrice(String code) { try { Thread.sleep(100); } catch (InterruptedException e) { } return code + ":" + 5 + Math.random() * 20; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/runtime/_01/future/CompletableFuture_ThenComposeAsync_Test2.java ================================================ package flink.examples.runtime._01.future; import java.util.concurrent.CompletableFuture; public class CompletableFuture_ThenComposeAsync_Test2 { public static void main(String[] args) throws Exception { // 第一个任务: CompletableFuture cfQuery = CompletableFuture.supplyAsync(() -> { return queryCode("中国石油"); }); // cfQuery成功后继续执行下一个任务: CompletableFuture cfFetch = cfQuery.thenComposeAsync((code) -> { return CompletableFuture.supplyAsync(() -> fetchPrice(code)); }); // cfFetch成功后打印结果: cfFetch.thenAccept((result) -> { System.out.println("price: " + result); }); // 主线程不要立刻结束,否则CompletableFuture默认使用的线程池会立刻关闭: Thread.sleep(2000); } static String queryCode(String name) { try { Thread.sleep(100); } catch (InterruptedException e) { } return name; } static String fetchPrice(String code) { try { Thread.sleep(100); } catch (InterruptedException e) { } return code + ":" + 5 + Math.random() * 20; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/runtime/_01/future/FutureTest.java ================================================ package flink.examples.runtime._01.future; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeoutException; public class FutureTest { public static void main(String[] args) throws ExecutionException, InterruptedException, TimeoutException { ExecutorService executor = Executors.newFixedThreadPool(4); // 定义任务: Callable task = new Task(); // 提交任务并获得Future: Future future = executor.submit(task); // 从Future获取异步执行返回的结果: String result = future.get(); System.out.println(result); executor.shutdown(); } private static class Task implements Callable { public String call() throws Exception { Thread.sleep(1000); return "1"; } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/runtime/_04/statebackend/CancelAndRestoreWithCheckpointTest.java ================================================ package flink.examples.runtime._04.statebackend; import java.util.Arrays; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.contrib.streaming.state.PredefinedOptions; import org.apache.flink.contrib.streaming.state.RocksDBStateBackend; import org.apache.flink.runtime.state.StateBackend; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; public class CancelAndRestoreWithCheckpointTest { private static final boolean ENABLE_INCREMENTAL_CHECKPOINT = true; private static final int NUMBER_OF_TRANSFER_THREADS = 3; public static void main(String[] args) throws Exception { Configuration configuration = new Configuration(); configuration.setString("execution.savepoint.path", "file:///Users/flink/checkpoints/ce2e1969c5088bf27daf35d4907659fd/chk-5"); StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(configuration); ParameterTool parameterTool = ParameterTool.fromArgs(args); // ck 设置 env.getCheckpointConfig().setCheckpointTimeout(TimeUnit.MINUTES.toMillis(3)); env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.configure(configuration, Thread.currentThread().getContextClassLoader()); env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); // 状态后端设置 // 设置存储文件位置为 file:///Users/flink/checkpoints RocksDBStateBackend rocksDBStateBackend = new RocksDBStateBackend( "file:///Users/flink/checkpoints", ENABLE_INCREMENTAL_CHECKPOINT); rocksDBStateBackend.setNumberOfTransferThreads(NUMBER_OF_TRANSFER_THREADS); rocksDBStateBackend.setPredefinedOptions(PredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM); env.setStateBackend((StateBackend) rocksDBStateBackend); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(10); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode() .build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); tEnv.getConfig().getConfiguration().setString("table.exec.emit.early-fire.enabled", "true"); tEnv.getConfig().getConfiguration().setString("table.exec.emit.early-fire.delay", "60 s"); String sql = "CREATE TABLE source_table (\n" + " dim BIGINT,\n" + " user_id BIGINT,\n" + " price BIGINT,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.dim.min' = '1',\n" + " 'fields.dim.max' = '2',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '100000',\n" + " 'fields.price.min' = '1',\n" + " 'fields.price.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " dim BIGINT,\n" + " pv BIGINT,\n" + " sum_price BIGINT,\n" + " max_price BIGINT,\n" + " min_price BIGINT,\n" + " uv BIGINT,\n" + " window_start bigint\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "insert into sink_table\n" + "select dim,\n" + " sum(bucket_pv) as pv,\n" + " sum(bucket_sum_price) as sum_price,\n" + " max(bucket_max_price) as max_price,\n" + " min(bucket_min_price) as min_price,\n" + " sum(bucket_uv) as uv,\n" + " max(window_start) as window_start\n" + "from (\n" + " select dim,\n" + " count(*) as bucket_pv,\n" + " sum(price) as bucket_sum_price,\n" + " max(price) as bucket_max_price,\n" + " min(price) as bucket_min_price,\n" + " count(distinct user_id) as bucket_uv,\n" + " UNIX_TIMESTAMP(CAST(tumble_start(row_time, interval '1' DAY) AS STRING)) * 1000 as window_start\n" + " from source_table\n" + " group by\n" + " mod(user_id, 1024),\n" + " dim,\n" + " tumble(row_time, interval '1' DAY)\n" + ")\n" + "group by dim,\n" + " window_start"; tEnv.getConfig().getConfiguration().setString("pipeline.name", "1.13.5 WINDOW TVF TUMBLE WINDOW EARLY FIRE 案例"); Arrays.stream(sql.split(";")) .forEach(tEnv::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_01/countdistincterror/CountDistinctErrorTest.java ================================================ package flink.examples.sql._01.countdistincterror; import java.util.Arrays; import org.apache.flink.api.java.tuple.Tuple3; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.Table; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; import org.apache.flink.types.Row; import flink.examples.sql._01.countdistincterror.udf.Mod_UDF; import flink.examples.sql._01.countdistincterror.udf.StatusMapper_UDF; public class CountDistinctErrorTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); DataStream> tuple3DataStream = env.fromCollection(Arrays.asList( Tuple3.of("2", 1L, 1627218000000L + 5000L), Tuple3.of("2", 101L, 1627218000000L + 6000L), Tuple3.of("2", 201L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L))); tEnv.registerFunction("mod", new Mod_UDF()); tEnv.registerFunction("status_mapper", new StatusMapper_UDF()); tEnv.createTemporaryView("source_db.source_table", tuple3DataStream, "status, id, timestamp"); String sql = "WITH detail_tmp AS (\n" + " SELECT\n" + " status,\n" + " id,\n" + " `timestamp`\n" + " FROM\n" + " (\n" + " SELECT\n" + " status,\n" + " id,\n" + " `timestamp`,\n" + " row_number() over(\n" + " PARTITION by id\n" + " ORDER BY\n" + " `timestamp` DESC\n" + " ) AS rn\n" + " FROM\n" + " (\n" + " SELECT\n" + " status,\n" + " id,\n" + " `timestamp`\n" + " FROM\n" + " source_db.source_table\n" + " ) t1\n" + " ) t2\n" + " WHERE\n" + " rn = 1\n" + ")\n" + "SELECT\n" + " DIM.status_new as status,\n" + " part_uv as uv\n" + "FROM\n" + " (\n" + " SELECT\n" + " status,\n" + " count(id) as part_uv\n" + " FROM\n" + " detail_tmp\n" + " GROUP BY\n" + " status,\n" + " mod(id, 100)\n" + " )\n" + "LEFT JOIN LATERAL TABLE(status_mapper(status)) AS DIM(status_new) ON TRUE\n"; Table result = tEnv.sqlQuery(sql); tEnv.toRetractStream(result, Row.class).print(); env.execute(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_01/countdistincterror/CountDistinctErrorTest2.java ================================================ package flink.examples.sql._01.countdistincterror; import java.util.Arrays; import org.apache.flink.api.java.tuple.Tuple3; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.Table; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; import org.apache.flink.types.Row; import flink.examples.sql._01.countdistincterror.udf.Mod_UDF; import flink.examples.sql._01.countdistincterror.udf.StatusMapper_UDF; public class CountDistinctErrorTest2 { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); DataStream> tuple3DataStream = env.fromCollection(Arrays.asList( Tuple3.of("2", 1L, 1627218000000L), Tuple3.of("2", 101L, 1627218000000L + 6000L), Tuple3.of("2", 201L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L))); tEnv.registerFunction("mod", new Mod_UDF()); tEnv.registerFunction("status_mapper", new StatusMapper_UDF()); tEnv.createTemporaryView("source_db.source_table", tuple3DataStream, "status, id, timestamp"); String sql = "WITH detail_tmp AS (\n" + " SELECT\n" + " status,\n" + " id,\n" + " `timestamp`\n" + " FROM\n" + " (\n" + " SELECT\n" + " status,\n" + " id,\n" + " `timestamp`,\n" + " row_number() over(\n" + " PARTITION by id\n" + " ORDER BY\n" + " `timestamp` DESC\n" + " ) AS rn\n" + " FROM\n" + " (\n" + " SELECT\n" + " status,\n" + " id,\n" + " `timestamp`\n" + " FROM\n" + " source_db.source_table\n" + " ) t1\n" + " ) t2\n" + " WHERE\n" + " rn = 1\n" + ")\n" + "SELECT\n" + " DIM.status_new as status,\n" + " sum(part_uv) as uv\n" + "FROM\n" + " (\n" + " SELECT\n" + " status,\n" + " count(distinct id) as part_uv\n" + " FROM\n" + " detail_tmp\n" + " GROUP BY\n" + " status,\n" + " mod(id, 100)\n" + " )\n" + "LEFT JOIN LATERAL TABLE(status_mapper(status)) AS DIM(status_new) ON TRUE\n" + "GROUP BY\n" + " DIM.status_new"; Table result = tEnv.sqlQuery(sql); tEnv.toRetractStream(result, Row.class).print(); String s = env.getExecutionPlan(); env.execute(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_01/countdistincterror/CountDistinctErrorTest3.java ================================================ package flink.examples.sql._01.countdistincterror; import java.util.Arrays; import org.apache.flink.api.java.tuple.Tuple3; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.Table; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; import org.apache.flink.types.Row; import flink.examples.sql._01.countdistincterror.udf.Mod_UDF; import flink.examples.sql._01.countdistincterror.udf.StatusMapper1_UDF; public class CountDistinctErrorTest3 { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); DataStream> tuple3DataStream = env.fromCollection(Arrays.asList( Tuple3.of("2", 1L, 1627218000000L), Tuple3.of("2", 101L, 1627218000000L + 6000L), Tuple3.of("2", 201L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L))); tEnv.registerFunction("mod", new Mod_UDF()); tEnv.registerFunction("status_mapper", new StatusMapper1_UDF()); tEnv.createTemporaryView("source_db.source_table", tuple3DataStream, "status, id, timestamp"); String sql = "WITH detail_tmp AS (\n" + " SELECT\n" + " status,\n" + " id,\n" + " `timestamp`\n" + " FROM\n" + " (\n" + " SELECT\n" + " status,\n" + " id,\n" + " `timestamp`,\n" + " row_number() over(\n" + " PARTITION by id\n" + " ORDER BY\n" + " `timestamp` DESC\n" + " ) AS rn\n" + " FROM\n" + " (\n" + " SELECT\n" + " status,\n" + " id,\n" + " `timestamp`\n" + " FROM\n" + " source_db.source_table\n" + " ) t1\n" + " ) t2\n" + " WHERE\n" + " rn = 1\n" + ")\n" + "SELECT\n" + " status_mapper(status) as status,\n" + " sum(part_uv) as uv\n" + "FROM\n" + " (\n" + " SELECT\n" + " status,\n" + " count(distinct id) as part_uv\n" + " FROM\n" + " detail_tmp\n" + " GROUP BY\n" + " status,\n" + " mod(id, 100)\n" + " )\n" + "GROUP BY\n" + " status"; Table result = tEnv.sqlQuery(sql); tEnv.toRetractStream(result, Row.class).print(); String s = env.getExecutionPlan(); env.execute(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_01/countdistincterror/udf/Mod_UDF.java ================================================ package flink.examples.sql._01.countdistincterror.udf; import org.apache.flink.table.functions.ScalarFunction; public class Mod_UDF extends ScalarFunction { public int eval(long id, int remainder) { return (int) (id % remainder); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_01/countdistincterror/udf/StatusMapper1_UDF.java ================================================ package flink.examples.sql._01.countdistincterror.udf; import org.apache.flink.table.functions.ScalarFunction; public class StatusMapper1_UDF extends ScalarFunction { private int i = 0; public String eval(String status) { if (i == 5) { i++; return "等级4"; } else { i++; if ("1".equals(status)) { return "等级1"; } else if ("2".equals(status)) { return "等级2"; } else if ("3".equals(status)) { return "等级3"; } } return "未知"; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_01/countdistincterror/udf/StatusMapper_UDF.java ================================================ package flink.examples.sql._01.countdistincterror.udf; import org.apache.flink.table.functions.TableFunction; public class StatusMapper_UDF extends TableFunction { private int i = 0; public void eval(String status) throws InterruptedException { if (i == 6) { Thread.sleep(2000L); } if (i == 5) { collect("等级4"); } else { if ("1".equals(status)) { collect("等级1"); } else if ("2".equals(status)) { collect("等级2"); } else if ("3".equals(status)) { collect("等级3"); } } i++; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_02/timezone/TimeZoneTest.java ================================================ package flink.examples.sql._02.timezone; import java.util.Arrays; import org.apache.flink.api.java.tuple.Tuple3; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor; import org.apache.flink.streaming.api.windowing.time.Time; import org.apache.flink.table.api.Table; import org.apache.flink.types.Row; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; import flink.examples.sql._01.countdistincterror.udf.Mod_UDF; import flink.examples.sql._01.countdistincterror.udf.StatusMapper_UDF; public class TimeZoneTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("table.local-time-zone", "GMT+08:00"); DataStream> tuple3DataStream = flinkEnv.env().fromCollection(Arrays.asList( Tuple3.of("2", 1L, 1627254000000L), // 北京时间:2021-07-26 07:00:00 Tuple3.of("2", 1L, 1627218000000L + 5000L), Tuple3.of("2", 101L, 1627218000000L + 6000L), Tuple3.of("2", 201L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 86400000 + 7000L))) .assignTimestampsAndWatermarks( new BoundedOutOfOrdernessTimestampExtractor>(Time.seconds(0L)) { @Override public long extractTimestamp(Tuple3 element) { return element.f2; } }); flinkEnv.streamTEnv().registerFunction("mod", new Mod_UDF()); flinkEnv.streamTEnv().registerFunction("status_mapper", new StatusMapper_UDF()); flinkEnv.streamTEnv().createTemporaryView("source_db.source_table", tuple3DataStream, "status, id, timestamp, rowtime.rowtime"); String sql = "SELECT\n" + " count(1),\n" + " cast(tumble_start(rowtime, INTERVAL '1' DAY) as string)\n" + "FROM\n" + " source_db.source_table\n" + "GROUP BY\n" + " tumble(rowtime, INTERVAL '1' DAY)"; Table result = flinkEnv.streamTEnv().sqlQuery(sql); flinkEnv.streamTEnv().toAppendStream(result, Row.class).print(); flinkEnv.env().execute(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_02/timezone/TimeZoneTest2.java ================================================ package flink.examples.sql._02.timezone; import java.util.Arrays; import org.apache.flink.api.java.tuple.Tuple3; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor; import org.apache.flink.streaming.api.windowing.time.Time; import org.apache.flink.table.api.Table; import org.apache.flink.table.api.TableResult; import org.apache.flink.types.Row; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; import flink.examples.sql._01.countdistincterror.udf.Mod_UDF; import flink.examples.sql._01.countdistincterror.udf.StatusMapper_UDF; import lombok.extern.slf4j.Slf4j; @Slf4j public class TimeZoneTest2 { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); DataStream> tuple3DataStream = flinkEnv.env().fromCollection(Arrays.asList( Tuple3.of("2", 1L, 1627254000000L), // 北京时间:2021-07-26 07:00:00 Tuple3.of("2", 1L, 1627218000000L + 5000L), Tuple3.of("2", 101L, 1627218000000L + 6000L), Tuple3.of("2", 201L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 86400000 + 7000L))) .assignTimestampsAndWatermarks( new BoundedOutOfOrdernessTimestampExtractor>(Time.seconds(0L)) { @Override public long extractTimestamp(Tuple3 element) { return element.f2; } }); flinkEnv.streamTEnv().registerFunction("mod", new Mod_UDF()); flinkEnv.streamTEnv().registerFunction("status_mapper", new StatusMapper_UDF()); flinkEnv.streamTEnv().createTemporaryView("source_db.source_table", tuple3DataStream, "status, id, timestamp, server_timestamp.rowtime"); TableResult tableResult = flinkEnv .streamTEnv() .executeSql("DESC source_db.source_table"); tableResult.print(); /** * +------------------+------------------------+------+-----+--------+-----------+ * | name | type | null | key | extras | watermark | * +------------------+------------------------+------+-----+--------+-----------+ * | status | STRING | true | | | | * | id | BIGINT | true | | | | * | timestamp | BIGINT | true | | | | * | server_timestamp | TIMESTAMP(3) *ROWTIME* | true | | | | * +------------------+------------------------+------+-----+--------+-----------+ */ String create_view_sql = "CREATE TEMPORARY VIEW source_db.source_view AS \n" + "SELECT status, id, `timestamp`, cast(server_timestamp as TIMESTAMP_LTZ(3)) as rowtime FROM source_db.source_table"; flinkEnv .streamTEnv() .executeSql(create_view_sql); flinkEnv .streamTEnv() .executeSql("DESC source_db.source_view") .print(); /** * +-----------+------------------+------+-----+--------+-----------+ * | name | type | null | key | extras | watermark | * +-----------+------------------+------+-----+--------+-----------+ * | status | STRING | true | | | | * | id | BIGINT | true | | | | * | timestamp | BIGINT | true | | | | * | rowtime | TIMESTAMP_LTZ(3) | true | | | | * +-----------+------------------+------+-----+--------+-----------+ */ String sql = "SELECT\n" + " count(1),\n" + " cast(tumble_start(rowtime, INTERVAL '1' DAY) as string)\n" + "FROM\n" + " source_db.source_table\n" + "GROUP BY\n" + " tumble(rowtime, INTERVAL '1' DAY)"; /** * +I[9, 2021-07-25 00:00:00.000] * +I[1, 2021-07-26 00:00:00.000] */ Table result = flinkEnv.streamTEnv().sqlQuery(sql); flinkEnv.streamTEnv().toAppendStream(result, Row.class).print(); flinkEnv.env().execute(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_02/timezone/TimeZoneTest3.java ================================================ package flink.examples.sql._02.timezone; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class TimeZoneTest3 { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.env().setParallelism(1); String exampleSql = "CREATE TABLE source_table (\n" + " id BIGINT,\n" + " money BIGINT,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp_LTZ(3)),\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.id.min' = '1',\n" + " 'fields.id.max' = '100000',\n" + " 'fields.money.min' = '1',\n" + " 'fields.money.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " window_end_timestamp bigint,\n" + " window_start_timestamp bigint,\n" + " window_end timestamp(3),\n" + " window_start timestamp(3),\n" + " sum_money BIGINT,\n" + " count_distinct_id BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "insert into sink_table\n" + "SELECT UNIX_TIMESTAMP(CAST(window_end AS STRING)) * 1000 as window_end_timestamp, \n" + " UNIX_TIMESTAMP(CAST(window_start AS STRING)) * 1000 as window_start_timestamp, \n" + " window_end, \n" + " window_start, \n" + " sum(money) as sum_money,\n" + " count(distinct id) as count_distinct_id\n" + "FROM TABLE(CUMULATE(\n" + " TABLE source_table\n" + " , DESCRIPTOR(row_time)\n" + " , INTERVAL '1' SECOND\n" + " , INTERVAL '1' DAY))\n" + "GROUP BY window_start, \n" + " window_end"; for (String innerSql : exampleSql.split(";")) { flinkEnv.streamTEnv().executeSql(innerSql); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/CreateViewTest.java ================================================ package flink.examples.sql._03.source_sink; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class CreateViewTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 用户自定义 SOURCE 案例"); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("state.backend", "rocksdb"); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " `name` STRING\n" + ") WITH (\n" + " 'connector' = 'user_defined',\n" + " 'format' = 'json',\n" + " 'class.name' = 'flink.examples.sql._03.source_sink.table.user_defined.UserDefinedSource'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " name STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "CREATE VIEW query_view as\n" + "SELECT\n" + " *\n" + "FROM source_table\n" + ";\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " *\n" + "FROM query_view;"; // 临时 VIEW String TEMPORARY_VIEW_SQL = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " `name` STRING\n" + ") WITH (\n" + " 'connector' = 'user_defined',\n" + " 'format' = 'json',\n" + " 'class.name' = 'flink.examples.sql._03.source_sink.table.user_defined.UserDefinedSource'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " name STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "CREATE TEMPORARY VIEW query_view as\n" + "SELECT\n" + " *\n" + "FROM source_table\n" + ";\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " *\n" + "FROM query_view;"; // 临时 Table String TEMPORARY_TABLE_SQL = "CREATE TEMPORARY TABLE source_table (\n" + " user_id BIGINT,\n" + " `name` STRING\n" + ") WITH (\n" + " 'connector' = 'user_defined',\n" + " 'format' = 'json',\n" + " 'class.name' = 'flink.examples.sql._03.source_sink.table.user_defined.UserDefinedSource'\n" + ");\n" + "\n" + "CREATE TEMPORARY TABLE sink_table (\n" + " user_id BIGINT,\n" + " name STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "CREATE TEMPORARY VIEW query_view as\n" + "SELECT\n" + " *\n" + "FROM source_table\n" + ";\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " *\n" + "FROM query_view;"; Arrays.stream(TEMPORARY_TABLE_SQL.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/DataStreamSourceEventTimeTest.java ================================================ package flink.examples.sql._03.source_sink; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.typeutils.ResultTypeQueryable; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.api.functions.source.SourceFunction; import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor; import org.apache.flink.streaming.api.windowing.time.Time; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.Table; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; import org.apache.flink.types.Row; public class DataStreamSourceEventTimeTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode() .build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); // 1. 分配 watermark DataStream r = env.addSource(new UserDefinedSource()) .assignTimestampsAndWatermarks(new BoundedOutOfOrdernessTimestampExtractor(Time.minutes(0L)) { @Override public long extractTimestamp(Row element) { return (long) element.getField("f2"); } }); // 2. 使用 f2.rowtime 的方式将 f2 字段指为事件时间时间戳 Table sourceTable = tEnv.fromDataStream(r, "f0, f1, f2.rowtime"); tEnv.createTemporaryView("source_table", sourceTable); // 3. 在 tumble window 中使用 f2 String tumbleWindowSql = "SELECT TUMBLE_START(f2, INTERVAL '5' SECOND), COUNT(DISTINCT f0)\n" + "FROM source_table\n" + "GROUP BY TUMBLE(f2, INTERVAL '5' SECOND)" ; Table resultTable = tEnv.sqlQuery(tumbleWindowSql); tEnv.toDataStream(resultTable, Row.class).print(); env.execute(); } private static class UserDefinedSource implements SourceFunction, ResultTypeQueryable { private volatile boolean isCancel; @Override public void run(SourceContext sourceContext) throws Exception { int i = 0; while (!this.isCancel) { sourceContext.collect(Row.of("a" + i, "b", System.currentTimeMillis())); Thread.sleep(10L); i++; } } @Override public void cancel() { this.isCancel = true; } @Override public TypeInformation getProducedType() { return new RowTypeInfo(TypeInformation.of(String.class), TypeInformation.of(String.class), TypeInformation.of(Long.class)); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/DataStreamSourceProcessingTimeTest.java ================================================ package flink.examples.sql._03.source_sink; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.typeutils.ResultTypeQueryable; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.api.functions.source.SourceFunction; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.Table; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; import org.apache.flink.types.Row; public class DataStreamSourceProcessingTimeTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode() .build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); // 1. 分配 watermark DataStream r = env.addSource(new UserDefinedSource()); // 2. 使用 proctime.proctime 的方式将 f2 字段指为处理时间时间戳 Table sourceTable = tEnv.fromDataStream(r, "f0, f1, f2, proctime.proctime"); tEnv.createTemporaryView("source_table", sourceTable); // 3. 在 tumble window 中使用 f2 String tumbleWindowSql = "SELECT TUMBLE_START(proctime, INTERVAL '5' SECOND), COUNT(DISTINCT f0)\n" + "FROM source_table\n" + "GROUP BY TUMBLE(proctime, INTERVAL '5' SECOND)" ; Table resultTable = tEnv.sqlQuery(tumbleWindowSql); tEnv.toDataStream(resultTable, Row.class).print(); env.execute(); } private static class UserDefinedSource implements SourceFunction, ResultTypeQueryable { private volatile boolean isCancel; @Override public void run(SourceContext sourceContext) throws Exception { int i = 0; while (!this.isCancel) { sourceContext.collect(Row.of("a" + i, "b", System.currentTimeMillis())); Thread.sleep(10L); i++; } } @Override public void cancel() { this.isCancel = true; } @Override public TypeInformation getProducedType() { return new RowTypeInfo(TypeInformation.of(String.class), TypeInformation.of(String.class), TypeInformation.of(Long.class)); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/KafkaSourceTest.java ================================================ package flink.examples.sql._03.source_sink; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.Table; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; import org.apache.flink.types.Row; public class KafkaSourceTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); env.setParallelism(1); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode() .build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); tEnv.executeSql( "CREATE TABLE KafkaSourceTable (\n" + " `f0` STRING,\n" + " `f1` STRING\n" + ") WITH (\n" + " 'connector' = 'kafka',\n" + " 'topic' = 'topic',\n" + " 'properties.bootstrap.servers' = 'localhost:9092',\n" + " 'properties.group.id' = 'testGroup',\n" + " 'format' = 'json'\n" + ")" ); Table t = tEnv.sqlQuery("SELECT * FROM KafkaSourceTable"); tEnv.toAppendStream(t, Row.class).print(); env.execute(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/RedisLookupTest.java ================================================ package flink.examples.sql._03.source_sink; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.typeutils.ResultTypeQueryable; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.TimeCharacteristic; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.api.functions.source.SourceFunction; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.Schema; import org.apache.flink.table.api.Table; import org.apache.flink.table.api.TableResult; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; import org.apache.flink.types.Row; public class RedisLookupTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); env.setParallelism(1); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); DataStream r = env.addSource(new UserDefinedSource()); Table sourceTable = tEnv.fromDataStream(r, Schema.newBuilder() .columnByExpression("proctime", "PROCTIME()") .build()); tEnv.createTemporaryView("leftTable", sourceTable); String sql = "CREATE TABLE dimTable (\n" + " name STRING,\n" + " name1 STRING,\n" + " score BIGINT" + ") WITH (\n" + " 'connector' = 'redis',\n" + " 'hostname' = '127.0.0.1',\n" + " 'port' = '6379',\n" + " 'format' = 'json',\n" + " 'lookup.cache.max-rows' = '500',\n" + " 'lookup.cache.ttl' = '3600',\n" + " 'lookup.max-retries' = '1'\n" + ")"; String joinSql = "SELECT o.f0, o.f1, c.name, c.name1, c.score\n" + "FROM leftTable AS o\n" + "LEFT JOIN dimTable FOR SYSTEM_TIME AS OF o.proctime AS c\n" + "ON o.f0 = c.name"; TableResult dimTable = tEnv.executeSql(sql); Table t = tEnv.sqlQuery(joinSql); // Table t = tEnv.sqlQuery("select * from leftTable"); tEnv.toAppendStream(t, Row.class).print(); env.execute(); } private static class UserDefinedSource implements SourceFunction, ResultTypeQueryable { private volatile boolean isCancel; @Override public void run(SourceContext sourceContext) throws Exception { while (!this.isCancel) { sourceContext.collect(Row.of("a", "b", 1L)); Thread.sleep(10L); } } @Override public void cancel() { this.isCancel = true; } @Override public TypeInformation getProducedType() { return new RowTypeInfo(TypeInformation.of(String.class), TypeInformation.of(String.class), TypeInformation.of(Long.class)); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/RedisSinkTest.java ================================================ package flink.examples.sql._03.source_sink; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.typeutils.ResultTypeQueryable; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.TimeCharacteristic; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.api.functions.source.SourceFunction; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.Schema; import org.apache.flink.table.api.Table; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; import org.apache.flink.types.Row; public class RedisSinkTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); env.setParallelism(1); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); DataStream r = env.addSource(new UserDefinedSource()); Table sourceTable = tEnv.fromDataStream(r, Schema.newBuilder() .columnByExpression("proctime", "PROCTIME()") .build()); tEnv.createTemporaryView("leftTable", sourceTable); String sql = "CREATE TABLE redis_sink_table (\n" + " key STRING,\n" + " `value` STRING\n" + ") WITH (\n" + " 'connector' = 'redis',\n" + " 'hostname' = '127.0.0.1',\n" + " 'port' = '6379',\n" + " 'write.mode' = 'string'\n" + ")"; String insertSql = "INSERT INTO redis_sink_table\n" + "SELECT o.f0, o.f1\n" + "FROM leftTable AS o\n"; tEnv.executeSql(sql); tEnv.executeSql(insertSql); env.execute(); } private static class UserDefinedSource implements SourceFunction, ResultTypeQueryable { private volatile boolean isCancel; @Override public void run(SourceContext sourceContext) throws Exception { while (!this.isCancel) { sourceContext.collect(Row.of("a", "b", 1L)); Thread.sleep(10L); } } @Override public void cancel() { this.isCancel = true; } @Override public TypeInformation getProducedType() { return new RowTypeInfo(TypeInformation.of(String.class), TypeInformation.of(String.class), TypeInformation.of(Long.class)); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/SocketSourceTest.java ================================================ package flink.examples.sql._03.source_sink; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.Table; import org.apache.flink.table.api.TableResult; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; import org.apache.flink.types.Row; public class SocketSourceTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); TableResult tr = tEnv.executeSql( "CREATE TABLE UserScores (name STRING, score INT)\n" + "WITH (\n" + " 'connector' = 'socket',\n" + " 'hostname' = 'localhost',\n" + " 'port' = '9999',\n" + " 'byte-delimiter' = '10',\n" + " 'format' = 'changelog-csv',\n" + " 'changelog-csv.column-delimiter' = '|'\n" + ")" ); // TableResult tr = tEnv.executeSql( // "CREATE TABLE Orders (\n" // + " order_number BIGINT,\n" // + " price DECIMAL(32,2),\n" // + " buyer ROW,\n" // + " order_time TIMESTAMP(3)\n" // + ") WITH (\n" // + " 'connector' = 'datagen',\n" // + " 'number-of-rows' = '10',\n" // + " 'rows-per-second' = '1'\n" // + ")" // ); // Table t = tEnv.sqlQuery("SELECT * FROM Orders"); Table t = tEnv.sqlQuery("SELECT name, SUM(score) FROM UserScores GROUP BY name"); tEnv.toRetractStream(t, Row.class).print(); env.execute("测试"); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/TableApiKafkaSourceTest.java ================================================ package flink.examples.sql._03.source_sink; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.typeutils.ResultTypeQueryable; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.api.functions.source.SourceFunction; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.Schema; import org.apache.flink.table.api.Table; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; import org.apache.flink.types.Row; public class TableApiKafkaSourceTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode() .build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); DataStream r = env.addSource(new UserDefinedSource()); Table sourceTable = tEnv.fromDataStream(r , Schema .newBuilder() .column("f0", "string") .column("f1", "string") .column("f2", "bigint") .columnByExpression("proctime", "PROCTIME()") .build()); tEnv.createTemporaryView("source_table", sourceTable); String selectWhereSql = "select f0 from source_table where f1 = 'b'"; Table resultTable = tEnv.sqlQuery(selectWhereSql); tEnv.toRetractStream(resultTable, Row.class).print(); env.execute(); } private static class UserDefinedSource implements SourceFunction, ResultTypeQueryable { private volatile boolean isCancel; @Override public void run(SourceContext sourceContext) throws Exception { int i = 0; while (!this.isCancel) { sourceContext.collect(Row.of("a" + i, "b", 1L)); Thread.sleep(10L); i++; } } @Override public void cancel() { this.isCancel = true; } @Override public TypeInformation getProducedType() { return new RowTypeInfo(TypeInformation.of(String.class), TypeInformation.of(String.class), TypeInformation.of(Long.class)); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/UpsertKafkaSinkProtobufFormatSupportTest.java ================================================ package flink.examples.sql._03.source_sink; import org.apache.flink.configuration.Configuration; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; /** * zk:https://www.jianshu.com/p/5491d16e6abd * /usr/local/Cellar/zookeeper/3.4.13/bin/zkServer start * * kafka:https://www.jianshu.com/p/dd2578d47ff6 * /usr/local/Cellar/kafka/2.2.1/bin/kafka-server-start /usr/local/Cellar/kafka/2.2.1/libexec/config/server.properties & * * 创建 topic:kafka-topics --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic tuzisir * 查看 topic:kafka-topics --list --zookeeper localhost:2181 * 向 topic 发消息:kafka-console-producer --broker-list localhost:9092 --topic tuzisir * 从 topic 消费消息:kafka-console-consumer --bootstrap-server localhost:9092 --topic tuzisir --from-beginning */ public class UpsertKafkaSinkProtobufFormatSupportTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); Configuration configuration = flinkEnv.streamTEnv().getConfig().getConfiguration(); // set low-level key-value options configuration.setString("table.exec.mini-batch.enabled", "true"); // enable mini-batch optimization configuration.setString("table.exec.mini-batch.allow-latency", "5 s"); // use 5 seconds to buffer input records configuration.setString("table.exec.mini-batch.size", "5000"); // the maximum number of records can be buffered by each aggregate operator task configuration.setString("pipeline.name", "GROUP AGG MINI BATCH 案例"); // the maximum number of records can be buffered by each aggregate operator task String sourceSql = "CREATE TABLE source_table (\n" + " order_id STRING,\n" + " price BIGINT\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.order_id.length' = '1',\n" + " 'fields.price.min' = '1',\n" + " 'fields.price.max' = '1000000'\n" + ")"; String sinkSql = "CREATE TABLE sink_table (\n" + " order_id STRING,\n" + " count_result BIGINT,\n" + " sum_result BIGINT,\n" + " avg_result DOUBLE,\n" + " min_result BIGINT,\n" + " max_result BIGINT,\n" + " PRIMARY KEY (`order_id`) NOT ENFORCED\n" + ") WITH (\n" + " 'connector' = 'upsert-kafka',\n" + " 'topic' = 'tuzisir',\n" + " 'properties.bootstrap.servers' = 'localhost:9092',\n" + " 'key.format' = 'json',\n" + " 'value.format' = 'protobuf',\n" + " 'value.protobuf.class-name' = 'flink.examples.sql._04.format.formats.protobuf.Test'\n" + ")"; String selectWhereSql = "insert into sink_table\n" + "select order_id,\n" + " count(*) as count_result,\n" + " sum(price) as sum_result,\n" + " avg(price) as avg_result,\n" + " min(price) as min_result,\n" + " max(price) as max_result\n" + "from source_table\n" + "group by order_id"; flinkEnv.streamTEnv().executeSql(sourceSql); flinkEnv.streamTEnv().executeSql(sinkSql); flinkEnv.streamTEnv().executeSql(selectWhereSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/UpsertKafkaSinkTest.java ================================================ package flink.examples.sql._03.source_sink; import org.apache.flink.configuration.Configuration; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; /** * zk:https://www.jianshu.com/p/5491d16e6abd * /usr/local/Cellar/zookeeper/3.4.13/bin/zkServer start * * kafka:https://www.jianshu.com/p/dd2578d47ff6 * /usr/local/Cellar/kafka/2.2.1/bin/kafka-server-start /usr/local/Cellar/kafka/2.2.1/libexec/config/server.properties & * * 创建 topic:kafka-topics --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic tuzisir * 查看 topic:kafka-topics --list --zookeeper localhost:2181 * 向 topic 发消息:kafka-console-producer --broker-list localhost:9092 --topic tuzisir * 从 topic 消费消息:kafka-console-consumer --bootstrap-server localhost:9092 --topic tuzisir --from-beginning */ public class UpsertKafkaSinkTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); Configuration configuration = flinkEnv.streamTEnv().getConfig().getConfiguration(); // set low-level key-value options configuration.setString("table.exec.mini-batch.enabled", "true"); // enable mini-batch optimization configuration.setString("table.exec.mini-batch.allow-latency", "5 s"); // use 5 seconds to buffer input records configuration.setString("table.exec.mini-batch.size", "5000"); // the maximum number of records can be buffered by each aggregate operator task configuration.setString("pipeline.name", "GROUP AGG MINI BATCH 案例"); // the maximum number of records can be buffered by each aggregate operator task String sourceSql = "CREATE TABLE source_table (\n" + " order_id STRING,\n" + " price BIGINT\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.order_id.length' = '1',\n" + " 'fields.price.min' = '1',\n" + " 'fields.price.max' = '1000000'\n" + ")"; String sinkSql = "CREATE TABLE sink_table (\n" + " order_id STRING,\n" + " count_result BIGINT,\n" + " sum_result BIGINT,\n" + " avg_result DOUBLE,\n" + " min_result BIGINT,\n" + " max_result BIGINT,\n" + " PRIMARY KEY (`order_id`) NOT ENFORCED\n" + ") WITH (\n" + " 'connector' = 'upsert-kafka',\n" + " 'topic' = 'tuzisir',\n" + " 'properties.bootstrap.servers' = 'localhost:9092',\n" + " 'key.format' = 'json',\n" + " 'value.format' = 'json'\n" + ")"; String selectWhereSql = "insert into sink_table\n" + "select order_id,\n" + " count(*) as count_result,\n" + " sum(price) as sum_result,\n" + " avg(price) as avg_result,\n" + " min(price) as min_result,\n" + " max(price) as max_result\n" + "from source_table\n" + "group by order_id"; flinkEnv.streamTEnv().executeSql(sourceSql); flinkEnv.streamTEnv().executeSql(sinkSql); flinkEnv.streamTEnv().executeSql(selectWhereSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/UserDefinedSourceTest.java ================================================ package flink.examples.sql._03.source_sink; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class UserDefinedSourceTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 用户自定义 SOURCE 案例"); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("state.backend", "rocksdb"); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " `name` STRING\n" + ") WITH (\n" + " 'connector' = 'user_defined',\n" + " 'format' = 'json',\n" + " 'class.name' = 'flink.examples.sql._03.source_sink.table.user_defined.UserDefinedSource'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " name STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " *\n" + "FROM source_table;"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/abilities/sink/Abilities_SinkFunction.java ================================================ package flink.examples.sql._03.source_sink.abilities.sink; import org.apache.flink.api.common.functions.util.PrintSinkOutputWriter; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.functions.sink.RichSinkFunction; import org.apache.flink.streaming.api.functions.sink.SinkFunction; import org.apache.flink.streaming.api.operators.StreamingRuntimeContext; import org.apache.flink.table.connector.sink.DynamicTableSink.DataStructureConverter; import org.apache.flink.table.data.RowData; public class Abilities_SinkFunction extends RichSinkFunction { private static final long serialVersionUID = 1L; private final DataStructureConverter converter; private final PrintSinkOutputWriter writer; public Abilities_SinkFunction( DataStructureConverter converter, String printIdentifier, boolean stdErr) { this.converter = converter; this.writer = new PrintSinkOutputWriter<>(printIdentifier, stdErr); } @Override public void open(Configuration parameters) throws Exception { super.open(parameters); StreamingRuntimeContext context = (StreamingRuntimeContext) getRuntimeContext(); writer.open(context.getIndexOfThisSubtask(), context.getNumberOfParallelSubtasks()); } @Override public void invoke(RowData value, SinkFunction.Context context) { Object data = converter.toExternal(value); assert data != null; writer.write(data.toString()); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/abilities/sink/Abilities_TableSink.java ================================================ package flink.examples.sql._03.source_sink.abilities.sink; import java.util.HashMap; import java.util.List; import java.util.Map; import javax.annotation.Nullable; import org.apache.flink.table.api.DataTypes; import org.apache.flink.table.connector.ChangelogMode; import org.apache.flink.table.connector.sink.DynamicTableSink; import org.apache.flink.table.connector.sink.SinkFunctionProvider; import org.apache.flink.table.connector.sink.abilities.SupportsOverwrite; import org.apache.flink.table.connector.sink.abilities.SupportsPartitioning; import org.apache.flink.table.connector.sink.abilities.SupportsWritingMetadata; import org.apache.flink.table.types.DataType; import com.google.common.collect.Maps; import flink.examples.JacksonUtils; import lombok.extern.slf4j.Slf4j; @Slf4j public class Abilities_TableSink implements DynamicTableSink , SupportsOverwrite , SupportsPartitioning , SupportsWritingMetadata { private DataType type; private final String printIdentifier; private final boolean stdErr; private final @Nullable Integer parallelism; private boolean overwrite = false; private Map staticPartition; public Abilities_TableSink( DataType type, String printIdentifier, boolean stdErr, Integer parallelism) { this.type = type; this.printIdentifier = printIdentifier; this.stdErr = stdErr; this.parallelism = parallelism; } @Override public ChangelogMode getChangelogMode(ChangelogMode requestedMode) { return requestedMode; } @Override public SinkRuntimeProvider getSinkRuntimeProvider(DynamicTableSink.Context context) { DataStructureConverter converter = context.createDataStructureConverter(type); return SinkFunctionProvider.of( new Abilities_SinkFunction(converter, printIdentifier, stdErr), parallelism); } @Override public DynamicTableSink copy() { return new Abilities_TableSink(type, printIdentifier, stdErr, parallelism); } @Override public String asSummaryString() { return "Print to " + (stdErr ? "System.err" : "System.out"); } @Override public void applyOverwrite(boolean overwrite) { this.overwrite = overwrite; } @Override public void applyStaticPartition(Map partition) { this.staticPartition = Maps.newHashMap(partition); } @Override public Map listWritableMetadata() { return new HashMap() {{ put("flink_write_timestamp", DataTypes.BIGINT()); }}; } @Override public void applyWritableMetadata(List metadataKeys, DataType consumedDataType) { this.type = consumedDataType; log.info("metadataKeys:" + JacksonUtils.bean2Json(metadataKeys)); log.info("consumedDataType:" + JacksonUtils.bean2Json(consumedDataType)); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/abilities/sink/Abilities_TableSinkFactory.java ================================================ package flink.examples.sql._03.source_sink.abilities.sink; import static org.apache.flink.configuration.ConfigOptions.key; import java.util.HashSet; import java.util.Set; import org.apache.flink.configuration.ConfigOption; import org.apache.flink.configuration.ReadableConfig; import org.apache.flink.table.connector.sink.DynamicTableSink; import org.apache.flink.table.factories.DynamicTableSinkFactory; import org.apache.flink.table.factories.FactoryUtil; public class Abilities_TableSinkFactory implements DynamicTableSinkFactory { public static final String IDENTIFIER = "abilities_print"; public static final ConfigOption PRINT_IDENTIFIER = key("print-identifier") .stringType() .noDefaultValue() .withDescription( "Message that identify print and is prefixed to the output of the value."); public static final ConfigOption STANDARD_ERROR = key("standard-error") .booleanType() .defaultValue(false) .withDescription( "True, if the format should print to standard error instead of standard out."); @Override public String factoryIdentifier() { return IDENTIFIER; } @Override public Set> requiredOptions() { return new HashSet<>(); } @Override public Set> optionalOptions() { Set> options = new HashSet<>(); options.add(PRINT_IDENTIFIER); options.add(STANDARD_ERROR); options.add(FactoryUtil.SINK_PARALLELISM); return options; } @Override public DynamicTableSink createDynamicTableSink(Context context) { FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context); helper.validate(); ReadableConfig options = helper.getOptions(); return new Abilities_TableSink( context.getCatalogTable().getResolvedSchema().toPhysicalRowDataType(), options.get(PRINT_IDENTIFIER), options.get(STANDARD_ERROR), options.getOptional(FactoryUtil.SINK_PARALLELISM).orElse(null)); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/abilities/sink/_01_SupportsWritingMetadata_Test.java ================================================ package flink.examples.sql._03.source_sink.abilities.sink; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class _01_SupportsWritingMetadata_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 用户自定义 SOURCE 案例"); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("state.backend", "rocksdb"); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " flink_read_timestamp BIGINT METADATA VIRTUAL,\n" + " `name` STRING\n" + ") WITH (\n" + " 'connector' = 'supports_reading_metadata_user_defined',\n" + " 'format' = 'json',\n" + " 'class.name' = 'flink.examples.sql._03.source_sink.abilities.source.Abilities_SourceFunction'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " flink_write_timestamp BIGINT METADATA,\n" + " name STRING\n" + ") WITH (\n" + " 'connector' = 'abilities_print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " user_id\n" + " , flink_read_timestamp as flink_write_timestamp\n" + " , name\n" + "FROM source_table"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/abilities/source/Abilities_SourceFunction.java ================================================ package flink.examples.sql._03.source_sink.abilities.source; import org.apache.flink.api.common.serialization.DeserializationSchema; import org.apache.flink.streaming.api.functions.source.RichSourceFunction; import org.apache.flink.streaming.api.watermark.Watermark; import org.apache.flink.table.data.RowData; import com.google.common.collect.ImmutableMap; import flink.examples.JacksonUtils; public class Abilities_SourceFunction extends RichSourceFunction { private DeserializationSchema dser; private long limit = -1; private volatile boolean isCancel = false; private boolean enableSourceWatermark = false; public Abilities_SourceFunction(DeserializationSchema dser) { this.dser = dser; } public Abilities_SourceFunction(DeserializationSchema dser, long limit) { this.dser = dser; this.limit = limit; } public Abilities_SourceFunction(DeserializationSchema dser, boolean enableSourceWatermark) { this.dser = dser; this.enableSourceWatermark = enableSourceWatermark; } @Override public void run(SourceContext ctx) throws Exception { int i = 0; while (!this.isCancel) { long currentTimeMills = System.currentTimeMillis(); ctx.collect(this.dser.deserialize( JacksonUtils.bean2Json(ImmutableMap.of( "user_id", 11111L + i , "name", "antigeneral" , "flink_read_timestamp", currentTimeMills + "")).getBytes() )); Thread.sleep(1000); i++; if (limit >= 0 && i > limit) { this.isCancel = true; } if (enableSourceWatermark) { ctx.emitWatermark(new Watermark(currentTimeMills)); } } } @Override public void cancel() { this.isCancel = true; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/abilities/source/Abilities_TableSource.java ================================================ package flink.examples.sql._03.source_sink.abilities.source; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Optional; import org.apache.flink.api.common.eventtime.WatermarkStrategy; import org.apache.flink.api.common.serialization.DeserializationSchema; import org.apache.flink.streaming.api.functions.source.RichSourceFunction; import org.apache.flink.table.api.DataTypes; import org.apache.flink.table.api.TableColumn.MetadataColumn; import org.apache.flink.table.api.TableSchema; import org.apache.flink.table.connector.ChangelogMode; import org.apache.flink.table.connector.format.DecodingFormat; import org.apache.flink.table.connector.source.DynamicTableSource; import org.apache.flink.table.connector.source.ScanTableSource; import org.apache.flink.table.connector.source.SourceFunctionProvider; import org.apache.flink.table.connector.source.abilities.SupportsFilterPushDown; import org.apache.flink.table.connector.source.abilities.SupportsLimitPushDown; import org.apache.flink.table.connector.source.abilities.SupportsPartitionPushDown; import org.apache.flink.table.connector.source.abilities.SupportsProjectionPushDown; import org.apache.flink.table.connector.source.abilities.SupportsReadingMetadata; import org.apache.flink.table.connector.source.abilities.SupportsSourceWatermark; import org.apache.flink.table.connector.source.abilities.SupportsWatermarkPushDown; import org.apache.flink.table.data.RowData; import org.apache.flink.table.expressions.ResolvedExpression; import org.apache.flink.table.types.DataType; import org.apache.flink.table.utils.TableSchemaUtils; import com.google.common.collect.Lists; import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; @Slf4j public class Abilities_TableSource implements ScanTableSource , SupportsFilterPushDown // 过滤条件下推 , SupportsLimitPushDown // limit 条件下推 , SupportsPartitionPushDown // , SupportsProjectionPushDown // select 下推 , SupportsReadingMetadata // 元数据 , SupportsWatermarkPushDown , SupportsSourceWatermark { private final String className; private final DecodingFormat> decodingFormat; private final DataType sourceRowDataType; private DataType producedDataType; private TableSchema physicalSchema; private TableSchema tableSchema; private long limit = -1; private WatermarkStrategy watermarkStrategy; private boolean enableSourceWatermark; private List filters; private List metadataKeys; public Abilities_TableSource( String className, DecodingFormat> decodingFormat, DataType sourceRowDataType, DataType producedDataType, TableSchema physicalSchema, TableSchema tableSchema) { DataTypes.BIGINT(); this.className = className; this.decodingFormat = decodingFormat; this.sourceRowDataType = sourceRowDataType; this.producedDataType = producedDataType; this.physicalSchema = physicalSchema; this.tableSchema = tableSchema; } @Override public ChangelogMode getChangelogMode() { // in our example the format decides about the changelog mode // but it could also be the source itself return decodingFormat.getChangelogMode(); } @SneakyThrows @Override public ScanRuntimeProvider getScanRuntimeProvider(ScanContext runtimeProviderContext) { // create runtime classes that are shipped to the cluster final DeserializationSchema deserializer = decodingFormat.createRuntimeDecoder( runtimeProviderContext, this.producedDataType); Class clazz = this.getClass().getClassLoader().loadClass(className); RichSourceFunction r; if (limit > 0) { r = (RichSourceFunction) clazz.getConstructor(DeserializationSchema.class, long.class).newInstance(deserializer, this.limit); } else if (enableSourceWatermark) { r = (RichSourceFunction) clazz.getConstructor(DeserializationSchema.class, boolean.class).newInstance(deserializer, this.enableSourceWatermark); } else { r = (RichSourceFunction) clazz.getConstructor(DeserializationSchema.class).newInstance(deserializer); } return SourceFunctionProvider.of(r, false); } @Override public DynamicTableSource copy() { return new Abilities_TableSource(className, decodingFormat, sourceRowDataType, producedDataType, physicalSchema, tableSchema); } @Override public String asSummaryString() { return "Socket Table Source"; } @Override public Result applyFilters(List filters) { this.filters = new LinkedList<>(filters); // 不上推任何过滤条件 // return Result.of(Lists.newLinkedList(), filters); // 将所有的过滤条件都上推到 source return Result.of(filters, Lists.newLinkedList()); } @Override public void applyLimit(long limit) { this.limit = limit; } @Override public Optional>> listPartitions() { return Optional.empty(); } @Override public void applyPartitions(List> remainingPartitions) { System.out.println(1); } @Override public boolean supportsNestedProjection() { return false; } @Override public void applyProjection(int[][] projectedFields) { this.tableSchema = projectSchemaWithMetadata(this.tableSchema, projectedFields); } @Override public Map listReadableMetadata() { return new HashMap() {{ put("flink_read_timestamp", DataTypes.BIGINT()); }}; } @Override public void applyReadableMetadata(List metadataKeys, DataType producedDataType) { this.metadataKeys = metadataKeys; this.producedDataType = producedDataType; } @Override public void applyWatermark(WatermarkStrategy watermarkStrategy) { log.info("Successfully applyWatermark"); this.watermarkStrategy = watermarkStrategy; } @Override public void applySourceWatermark() { log.info("Successfully applySourceWatermark"); this.enableSourceWatermark = true; } public static TableSchema projectSchemaWithMetadata(TableSchema tableSchema, int[][] projectedFields) { TableSchema.Builder builder = new TableSchema.Builder(); TableSchema physicalProjectedSchema = TableSchemaUtils.projectSchema(TableSchemaUtils.getPhysicalSchema(tableSchema), projectedFields); physicalProjectedSchema .getTableColumns() .forEach( tableColumn -> { if (tableColumn.isPhysical()) { builder.field(tableColumn.getName(), tableColumn.getType()); } }); tableSchema .getTableColumns() .forEach( tableColumn -> { if (tableColumn instanceof MetadataColumn) { builder.field(tableColumn.getName(), tableColumn.getType()); } }); return builder.build(); } public static TableSchema getSchemaWithMetadata(TableSchema tableSchema) { TableSchema.Builder builder = new TableSchema.Builder(); tableSchema .getTableColumns() .forEach( tableColumn -> { if (tableColumn.isPhysical()) { builder.field(tableColumn.getName(), tableColumn.getType()); } else if (tableColumn instanceof MetadataColumn) { builder.field(tableColumn.getName(), tableColumn.getType()); } }); return builder.build(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/abilities/source/Abilities_TableSourceFactory.java ================================================ package flink.examples.sql._03.source_sink.abilities.source; import java.util.HashSet; import java.util.Set; import org.apache.flink.api.common.serialization.DeserializationSchema; import org.apache.flink.configuration.ConfigOption; import org.apache.flink.configuration.ConfigOptions; import org.apache.flink.configuration.ReadableConfig; import org.apache.flink.table.api.Schema; import org.apache.flink.table.api.TableSchema; import org.apache.flink.table.connector.format.DecodingFormat; import org.apache.flink.table.connector.source.DynamicTableSource; import org.apache.flink.table.data.RowData; import org.apache.flink.table.factories.DeserializationFormatFactory; import org.apache.flink.table.factories.DynamicTableSourceFactory; import org.apache.flink.table.factories.FactoryUtil; import org.apache.flink.table.types.DataType; import org.apache.flink.table.utils.TableSchemaUtils; public class Abilities_TableSourceFactory implements DynamicTableSourceFactory { // define all options statically public static final ConfigOption CLASS_NAME = ConfigOptions.key("class.name") .stringType() .noDefaultValue(); @Override public String factoryIdentifier() { return "supports_reading_metadata_user_defined"; // used for matching to `connector = '...'` } @Override public Set> requiredOptions() { final Set> options = new HashSet<>(); options.add(CLASS_NAME); options.add(FactoryUtil.FORMAT); // use pre-defined option for format return options; } @Override public Set> optionalOptions() { final Set> options = new HashSet<>(); return options; } @Override public DynamicTableSource createDynamicTableSource(Context context) { // either implement your custom validation logic here ... // or use the provided helper utility final FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context); // discover a suitable decoding format final DecodingFormat> decodingFormat = helper.discoverDecodingFormat( DeserializationFormatFactory.class, FactoryUtil.FORMAT); // validate all options helper.validate(); // get the validated options final ReadableConfig options = helper.getOptions(); final String className = options.get(CLASS_NAME); // derive the produced data type (excluding computed columns) from the catalog table final DataType producedDataType = context.getCatalogTable().getResolvedSchema().toPhysicalRowDataType(); final DataType sourceRowDataType = context.getCatalogTable().getResolvedSchema().toSourceRowDataType(); final DataType sinkRowDataType = context.getCatalogTable().getResolvedSchema().toSinkRowDataType(); final Schema schema = context.getCatalogTable().getUnresolvedSchema(); TableSchema physicalSchema = TableSchemaUtils.getPhysicalSchema(context.getCatalogTable().getSchema()); TableSchema tableSchema = context.getCatalogTable().getSchema(); // create and return dynamic table source return new Abilities_TableSource(className , decodingFormat , sourceRowDataType , producedDataType , physicalSchema , tableSchema); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/abilities/source/_01_SupportsFilterPushDown_Test.java ================================================ package flink.examples.sql._03.source_sink.abilities.source; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class _01_SupportsFilterPushDown_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 用户自定义 SOURCE 案例"); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("state.backend", "rocksdb"); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " `name` STRING\n" + ") WITH (\n" + " 'connector' = 'supports_reading_metadata_user_defined',\n" + " 'format' = 'json',\n" + " 'class.name' = 'flink.examples.sql._03.source_sink.abilities.source.Abilities_SourceFunction'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " name STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " *\n" + "FROM source_table\n" + "WHERE user_id > 3333\n"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/abilities/source/_02_SupportsLimitPushDown_Test.java ================================================ package flink.examples.sql._03.source_sink.abilities.source; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class _02_SupportsLimitPushDown_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 用户自定义 SOURCE 案例"); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("state.backend", "rocksdb"); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " flink_read_timestamp BIGINT\n" + ") WITH (\n" + " 'connector' = 'supports_reading_metadata_user_defined',\n" + " 'format' = 'json',\n" + " 'class.name' = 'flink.examples.sql._03.source_sink.abilities.source.Abilities_SourceFunction'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " flink_read_timestamp BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " *\n" + "FROM source_table\n" + "LIMIT 100"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/abilities/source/_03_SupportsPartitionPushDown_Test.java ================================================ package flink.examples.sql._03.source_sink.abilities.source; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class _03_SupportsPartitionPushDown_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 用户自定义 SOURCE 案例"); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("state.backend", "rocksdb"); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " flink_read_timestamp BIGINT METADATA VIRTUAL,\n" + " `name` STRING\n" + ") WITH (\n" + " 'connector' = 'supports_reading_metadata_user_defined',\n" + " 'format' = 'json',\n" + " 'class.name' = 'flink.examples.sql._03.source_sink.abilities.source.Abilities_SourceFunction'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " flink_read_timestamp BIGINT,\n" + " name STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " *\n" + "FROM source_table\n" + "LIMIT 100"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/abilities/source/_04_SupportsProjectionPushDown_JDBC_Test.java ================================================ package flink.examples.sql._03.source_sink.abilities.source; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class _04_SupportsProjectionPushDown_JDBC_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 用户自定义 SOURCE 案例"); String sql = "CREATE TABLE source_table_1 (\n" + " id DECIMAL(20, 0),\n" + " name STRING,\n" + " owner STRING\n" + ") WITH (\n" + " 'connector' = 'jdbc',\n" + " 'url' = 'jdbc:mysql://localhost:3306/user_profile',\n" + " 'username' = 'root',\n" + " 'password' = 'root123456',\n" + " 'table-name' = 'user_test'\n" + ");\n" + "\n" + "CREATE TABLE sink_table_2 (\n" + " id DECIMAL(20, 0),\n" + " name STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table_2\n" + "SELECT\n" + " id\n" + " , name\n" + "FROM source_table_1\n"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/abilities/source/_04_SupportsProjectionPushDown_Test.java ================================================ package flink.examples.sql._03.source_sink.abilities.source; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class _04_SupportsProjectionPushDown_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 用户自定义 SOURCE 案例"); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("state.backend", "rocksdb"); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " `name1` STRING,\n" + " `name2` STRING,\n" + " `name3` STRING\n" + ") WITH (\n" + " 'connector' = 'supports_reading_metadata_user_defined',\n" + " 'format' = 'json',\n" + " 'class.name' = 'flink.examples.sql._03.source_sink.abilities.source.Abilities_SourceFunction'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " name STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " user_id\n" + " , name1 as name\n" + "FROM source_table"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/abilities/source/_05_SupportsReadingMetadata_Test.java ================================================ package flink.examples.sql._03.source_sink.abilities.source; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class _05_SupportsReadingMetadata_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 用户自定义 SOURCE 案例"); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("state.backend", "rocksdb"); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " flink_read_timestamp BIGINT METADATA VIRTUAL,\n" + " `name` STRING\n" + ") WITH (\n" + " 'connector' = 'supports_reading_metadata_user_defined',\n" + " 'format' = 'json',\n" + " 'class.name' = 'flink.examples.sql._03.source_sink.abilities.source.Abilities_SourceFunction'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " flink_read_timestamp BIGINT,\n" + " name STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " *\n" + "FROM source_table"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/abilities/source/_06_SupportsWatermarkPushDown_Test.java ================================================ package flink.examples.sql._03.source_sink.abilities.source; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class _06_SupportsWatermarkPushDown_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 用户自定义 SOURCE 案例"); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("state.backend", "rocksdb"); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " flink_read_timestamp BIGINT METADATA VIRTUAL,\n" + " time_ltz AS TO_TIMESTAMP_LTZ(flink_read_timestamp, 3),\n" + " `name` STRING,\n" + " WATERMARK FOR time_ltz AS time_ltz - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'supports_reading_metadata_user_defined',\n" + " 'format' = 'json',\n" + " 'class.name' = 'flink.examples.sql._03.source_sink.abilities.source.Abilities_SourceFunction'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " flink_read_timestamp BIGINT,\n" + " name STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " user_id,\n" + " flink_read_timestamp,\n" + " name\n" + "FROM source_table"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/abilities/source/_07_SupportsSourceWatermark_Test.java ================================================ package flink.examples.sql._03.source_sink.abilities.source; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class _07_SupportsSourceWatermark_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 用户自定义 SOURCE 案例"); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("state.backend", "rocksdb"); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " flink_read_timestamp BIGINT METADATA VIRTUAL,\n" + " row_time AS TO_TIMESTAMP_LTZ(flink_read_timestamp, 3),\n" + " WATERMARK FOR row_time AS SOURCE_WATERMARK()\n" + ") WITH (\n" + " 'connector' = 'supports_reading_metadata_user_defined',\n" + " 'format' = 'json',\n" + " 'class.name' = 'flink.examples.sql._03.source_sink.abilities.source.Abilities_SourceFunction'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " window_end bigint,\n" + " window_start timestamp(3),\n" + " count_distinct_id BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "insert into sink_table\n" + "SELECT UNIX_TIMESTAMP(CAST(window_end AS STRING)) * 1000 as window_end, \n" + " window_start, \n" + " count(distinct user_id) as count_distinct_id\n" + "FROM TABLE(CUMULATE(\n" + " TABLE source_table\n" + " , DESCRIPTOR(row_time)\n" + " , INTERVAL '10' SECOND\n" + " , INTERVAL '1' DAY))\n" + "GROUP BY window_start, \n" + " window_end"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/abilities/source/before/Before_Abilities_SourceFunction.java ================================================ package flink.examples.sql._03.source_sink.abilities.source.before; import org.apache.flink.api.common.serialization.DeserializationSchema; import org.apache.flink.streaming.api.functions.source.RichSourceFunction; import org.apache.flink.streaming.api.watermark.Watermark; import org.apache.flink.table.data.RowData; import com.google.common.collect.ImmutableMap; import flink.examples.JacksonUtils; public class Before_Abilities_SourceFunction extends RichSourceFunction { private DeserializationSchema dser; private long limit = -1; private volatile boolean isCancel = false; private boolean enableSourceWatermark = false; public Before_Abilities_SourceFunction(DeserializationSchema dser) { this.dser = dser; } public Before_Abilities_SourceFunction(DeserializationSchema dser, long limit) { this.dser = dser; this.limit = limit; } public Before_Abilities_SourceFunction(DeserializationSchema dser, boolean enableSourceWatermark) { this.dser = dser; this.enableSourceWatermark = enableSourceWatermark; } @Override public void run(SourceContext ctx) throws Exception { int i = 0; while (!this.isCancel) { long currentTimeMills = System.currentTimeMillis(); ctx.collect(this.dser.deserialize( JacksonUtils.bean2Json(ImmutableMap.of( "user_id", 11111L + i , "name", "antigeneral" , "flink_read_timestamp", currentTimeMills + "")).getBytes() )); Thread.sleep(1000); i++; if (limit >= 0 && i > limit) { this.isCancel = true; } if (enableSourceWatermark) { ctx.emitWatermark(new Watermark(currentTimeMills)); } } } @Override public void cancel() { this.isCancel = true; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/abilities/source/before/Before_Abilities_TableSource.java ================================================ package flink.examples.sql._03.source_sink.abilities.source.before; import org.apache.flink.api.common.eventtime.WatermarkStrategy; import org.apache.flink.api.common.serialization.DeserializationSchema; import org.apache.flink.streaming.api.functions.source.RichSourceFunction; import org.apache.flink.table.api.TableColumn.MetadataColumn; import org.apache.flink.table.api.TableSchema; import org.apache.flink.table.connector.ChangelogMode; import org.apache.flink.table.connector.format.DecodingFormat; import org.apache.flink.table.connector.source.DynamicTableSource; import org.apache.flink.table.connector.source.ScanTableSource; import org.apache.flink.table.connector.source.SourceFunctionProvider; import org.apache.flink.table.data.RowData; import org.apache.flink.table.types.DataType; import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; @Slf4j public class Before_Abilities_TableSource implements ScanTableSource { private final String className; private final DecodingFormat> decodingFormat; private final DataType sourceRowDataType; private final DataType producedDataType; private TableSchema physicalSchema; private TableSchema tableSchema; private long limit = -1; private WatermarkStrategy watermarkStrategy; boolean enableSourceWatermark; public Before_Abilities_TableSource( String className, DecodingFormat> decodingFormat, DataType sourceRowDataType, DataType producedDataType, TableSchema physicalSchema, TableSchema tableSchema) { this.className = className; this.decodingFormat = decodingFormat; this.sourceRowDataType = sourceRowDataType; this.producedDataType = producedDataType; this.physicalSchema = physicalSchema; this.tableSchema = tableSchema; } @Override public ChangelogMode getChangelogMode() { // in our example the format decides about the changelog mode // but it could also be the source itself return decodingFormat.getChangelogMode(); } @SneakyThrows @Override public ScanRuntimeProvider getScanRuntimeProvider(ScanContext runtimeProviderContext) { final DeserializationSchema deserializer = decodingFormat.createRuntimeDecoder( runtimeProviderContext, getSchemaWithMetadata(this.tableSchema).toRowDataType()); Class clazz = this.getClass().getClassLoader().loadClass(className); RichSourceFunction r; if (limit > 0) { r = (RichSourceFunction) clazz.getConstructor(DeserializationSchema.class, long.class).newInstance(deserializer, this.limit); } else if (enableSourceWatermark) { r = (RichSourceFunction) clazz.getConstructor(DeserializationSchema.class, boolean.class).newInstance(deserializer, this.enableSourceWatermark); } else { r = (RichSourceFunction) clazz.getConstructor(DeserializationSchema.class).newInstance(deserializer); } return SourceFunctionProvider.of(r, false); } @Override public DynamicTableSource copy() { return new Before_Abilities_TableSource(className, decodingFormat, sourceRowDataType, producedDataType, physicalSchema, tableSchema); } @Override public String asSummaryString() { return "Socket Table Source"; } public static TableSchema getSchemaWithMetadata(TableSchema tableSchema) { TableSchema.Builder builder = new TableSchema.Builder(); tableSchema .getTableColumns() .forEach( tableColumn -> { if (tableColumn.isPhysical()) { builder.field(tableColumn.getName(), tableColumn.getType()); } else if (tableColumn instanceof MetadataColumn) { builder.field(tableColumn.getName(), tableColumn.getType()); } }); return builder.build(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/abilities/source/before/Before_Abilities_TableSourceFactory.java ================================================ package flink.examples.sql._03.source_sink.abilities.source.before; import java.util.HashSet; import java.util.Set; import org.apache.flink.api.common.serialization.DeserializationSchema; import org.apache.flink.configuration.ConfigOption; import org.apache.flink.configuration.ConfigOptions; import org.apache.flink.configuration.ReadableConfig; import org.apache.flink.table.api.Schema; import org.apache.flink.table.api.TableSchema; import org.apache.flink.table.connector.format.DecodingFormat; import org.apache.flink.table.connector.source.DynamicTableSource; import org.apache.flink.table.data.RowData; import org.apache.flink.table.factories.DeserializationFormatFactory; import org.apache.flink.table.factories.DynamicTableSourceFactory; import org.apache.flink.table.factories.FactoryUtil; import org.apache.flink.table.types.DataType; import org.apache.flink.table.utils.TableSchemaUtils; public class Before_Abilities_TableSourceFactory implements DynamicTableSourceFactory { // define all options statically public static final ConfigOption CLASS_NAME = ConfigOptions.key("class.name") .stringType() .noDefaultValue(); @Override public String factoryIdentifier() { return "before_supports_reading_metadata_user_defined"; // used for matching to `connector = '...'` } @Override public Set> requiredOptions() { final Set> options = new HashSet<>(); options.add(CLASS_NAME); options.add(FactoryUtil.FORMAT); // use pre-defined option for format return options; } @Override public Set> optionalOptions() { final Set> options = new HashSet<>(); return options; } @Override public DynamicTableSource createDynamicTableSource(Context context) { // either implement your custom validation logic here ... // or use the provided helper utility final FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context); // discover a suitable decoding format final DecodingFormat> decodingFormat = helper.discoverDecodingFormat( DeserializationFormatFactory.class, FactoryUtil.FORMAT); // validate all options helper.validate(); // get the validated options final ReadableConfig options = helper.getOptions(); final String className = options.get(CLASS_NAME); // derive the produced data type (excluding computed columns) from the catalog table final DataType producedDataType = context.getCatalogTable().getResolvedSchema().toPhysicalRowDataType(); final DataType sourceRowDataType = context.getCatalogTable().getResolvedSchema().toSourceRowDataType(); final DataType sinkRowDataType = context.getCatalogTable().getResolvedSchema().toSinkRowDataType(); final Schema schema = context.getCatalogTable().getUnresolvedSchema(); TableSchema physicalSchema = TableSchemaUtils.getPhysicalSchema(context.getCatalogTable().getSchema()); TableSchema tableSchema = context.getCatalogTable().getSchema(); // create and return dynamic table source return new Before_Abilities_TableSource(className , decodingFormat , sourceRowDataType , producedDataType , physicalSchema , tableSchema); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/abilities/source/before/_01_Before_SupportsFilterPushDown_Test.java ================================================ package flink.examples.sql._03.source_sink.abilities.source.before; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class _01_Before_SupportsFilterPushDown_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 用户自定义 SOURCE 案例"); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("state.backend", "rocksdb"); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " `name` STRING\n" + ") WITH (\n" + " 'connector' = 'before_supports_reading_metadata_user_defined',\n" + " 'format' = 'json',\n" + " 'class.name' = 'flink.examples.sql._03.source_sink.abilities.source.before.Before_Abilities_SourceFunction'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " name STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " *\n" + "FROM source_table\n" + "WHERE user_id > 3333\n"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/abilities/source/before/_02_Before_SupportsLimitPushDown_Test.java ================================================ package flink.examples.sql._03.source_sink.abilities.source.before; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class _02_Before_SupportsLimitPushDown_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 用户自定义 SOURCE 案例"); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("state.backend", "rocksdb"); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " flink_read_timestamp BIGINT\n" + ") WITH (\n" + " 'connector' = 'before_supports_reading_metadata_user_defined',\n" + " 'format' = 'json',\n" + " 'class.name' = 'flink.examples.sql._03.source_sink.abilities.source.before.Before_Abilities_SourceFunction'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " flink_read_timestamp BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " *\n" + "FROM source_table\n" + "LIMIT 100"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/abilities/source/before/_03_Before_SupportsPartitionPushDown_Test.java ================================================ package flink.examples.sql._03.source_sink.abilities.source.before; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class _03_Before_SupportsPartitionPushDown_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 用户自定义 SOURCE 案例"); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("state.backend", "rocksdb"); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " flink_read_timestamp BIGINT METADATA VIRTUAL,\n" + " `name` STRING\n" + ") WITH (\n" + " 'connector' = 'before_supports_reading_metadata_user_defined',\n" + " 'format' = 'json',\n" + " 'class.name' = 'flink.examples.sql._03.source_sink.abilities.source.before.Before_Abilities_SourceFunction'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " flink_read_timestamp BIGINT,\n" + " name STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " *\n" + "FROM source_table\n" + "LIMIT 100"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/abilities/source/before/_04_Before_SupportsProjectionPushDown_Test.java ================================================ package flink.examples.sql._03.source_sink.abilities.source.before; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class _04_Before_SupportsProjectionPushDown_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 用户自定义 SOURCE 案例"); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("state.backend", "rocksdb"); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " `name1` STRING,\n" + " `name2` STRING,\n" + " `name3` STRING\n" + ") WITH (\n" + " 'connector' = 'before_supports_reading_metadata_user_defined',\n" + " 'format' = 'json',\n" + " 'class.name' = 'flink.examples.sql._03.source_sink.abilities.source.before.Before_Abilities_SourceFunction'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " name STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " user_id\n" + " , name1 as name\n" + "FROM source_table"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/abilities/source/before/_05_Before_SupportsReadingMetadata_Test.java ================================================ package flink.examples.sql._03.source_sink.abilities.source.before; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class _05_Before_SupportsReadingMetadata_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 用户自定义 SOURCE 案例"); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("state.backend", "rocksdb"); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " `name` STRING\n" + ") WITH (\n" + " 'connector' = 'before_supports_reading_metadata_user_defined',\n" + " 'format' = 'json',\n" + " 'class.name' = 'flink.examples.sql._03.source_sink.abilities.source.before.Before_Abilities_SourceFunction'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " name STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " *\n" + "FROM source_table"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/abilities/source/before/_06_Before_SupportsWatermarkPushDown_Test.java ================================================ package flink.examples.sql._03.source_sink.abilities.source.before; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class _06_Before_SupportsWatermarkPushDown_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 用户自定义 SOURCE 案例"); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("state.backend", "rocksdb"); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " time_ltz AS cast(CURRENT_TIMESTAMP as TIMESTAMP(3)),\n" + " `name` STRING,\n" + " WATERMARK FOR time_ltz AS time_ltz - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'before_supports_reading_metadata_user_defined',\n" + " 'format' = 'json',\n" + " 'class.name' = 'flink.examples.sql._03.source_sink.abilities.source.before.Before_Abilities_SourceFunction'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " name STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " user_id,\n" + " name\n" + "FROM source_table"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/abilities/source/before/_07_Before_SupportsSourceWatermark_Test.java ================================================ package flink.examples.sql._03.source_sink.abilities.source.before; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class _07_Before_SupportsSourceWatermark_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 用户自定义 SOURCE 案例"); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("state.backend", "rocksdb"); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " row_time AS cast(CURRENT_TIMESTAMP as TIMESTAMP(3)),\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'before_supports_reading_metadata_user_defined',\n" + " 'format' = 'json',\n" + " 'class.name' = 'flink.examples.sql._03.source_sink.abilities.source.before.Before_Abilities_SourceFunction'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " window_end bigint,\n" + " window_start timestamp(3),\n" + " count_distinct_id BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "insert into sink_table\n" + "SELECT UNIX_TIMESTAMP(CAST(window_end AS STRING)) * 1000 as window_end, \n" + " window_start, \n" + " count(distinct user_id) as count_distinct_id\n" + "FROM TABLE(CUMULATE(\n" + " TABLE source_table\n" + " , DESCRIPTOR(row_time)\n" + " , INTERVAL '10' SECOND\n" + " , INTERVAL '1' DAY))\n" + "GROUP BY window_start, \n" + " window_end"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/ddl/TableApiDDLTest.java ================================================ package flink.examples.sql._03.source_sink.ddl; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.typeutils.ResultTypeQueryable; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.functions.source.SourceFunction; import org.apache.flink.table.api.DataTypes; import org.apache.flink.table.api.Table; import org.apache.flink.table.descriptors.CustomConnectorDescriptor; import org.apache.flink.table.descriptors.Schema; import org.apache.flink.types.Row; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; import flink.examples.sql._05.format.formats.protobuf.descriptors.Protobuf; public class TableApiDDLTest { // https://nightlies.apache.org/flink/flink-docs-release-1.14/docs/dev/table/sql/queries/overview/ public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); // flinkEnv.getStreamTableEnvironment().getConfig().getConfiguration().setString("table.exec.emit.early-fire.enabled", "true"); // flinkEnv.getStreamTableEnvironment().getConfig().getConfiguration().setString("table.exec.emit.early-fire.delay", "60 s"); String sql = "CREATE TABLE redis_sink_table (\n" + " key STRING,\n" + " `value` STRING\n" + ") WITH (\n" + " 'connector' = 'redis',\n" + " 'hostname' = '127.0.0.1',\n" + " 'port' = '6379',\n" + " 'write.mode' = 'string'\n" + ")"; // create and register a TableSink final Schema schema = new Schema() .field("key", DataTypes.STRING()) .field("value", DataTypes.STRING()); flinkEnv.getStreamTableEnvironment() .connect( new CustomConnectorDescriptor("redis", 1, true) .property("hostname", "127.0.0.1") .property("port", "6379") .property("write.mode", "string") ) .withFormat(new Protobuf()) .withSchema(schema) .createTemporaryTable("redis_sink_table"); DataStream r = flinkEnv.getStreamExecutionEnvironment().addSource(new UserDefinedSource()); Table sourceTable = flinkEnv.getStreamTableEnvironment().fromDataStream(r, org.apache.flink.table.api.Schema.newBuilder() .columnByExpression("proctime", "PROCTIME()") .build()); flinkEnv.getStreamTableEnvironment() .createTemporaryView("leftTable", sourceTable); String insertSql = "INSERT INTO redis_sink_table\n" + "SELECT o.f0, o.f1\n" + "FROM leftTable AS o\n"; flinkEnv.getStreamTableEnvironment().executeSql(sql); flinkEnv.getStreamTableEnvironment().executeSql(insertSql); } private static class UserDefinedSource implements SourceFunction, ResultTypeQueryable { private volatile boolean isCancel; @Override public void run(SourceContext sourceContext) throws Exception { while (!this.isCancel) { sourceContext.collect(Row.of("a", "b", 1L)); Thread.sleep(10L); } } @Override public void cancel() { this.isCancel = true; } @Override public TypeInformation getProducedType() { return new RowTypeInfo(TypeInformation.of(String.class), TypeInformation.of(String.class), TypeInformation.of(Long.class)); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/table/redis/container/RedisCommandsContainer.java ================================================ /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package flink.examples.sql._03.source_sink.table.redis.container; import java.io.Closeable; import java.io.Serializable; import java.util.List; /** * The container for all available Redis commands. */ public interface RedisCommandsContainer extends Closeable, Serializable { void open() throws Exception; byte[] get(byte[] key); List multiGet(List key); byte[] hget(byte[] key, byte[] hashField); } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/table/redis/container/RedisCommandsContainerBuilder.java ================================================ package flink.examples.sql._03.source_sink.table.redis.container; import java.util.Objects; import org.apache.commons.pool2.impl.GenericObjectPoolConfig; import org.apache.flink.streaming.connectors.redis.common.config.FlinkJedisConfigBase; import org.apache.flink.streaming.connectors.redis.common.config.FlinkJedisPoolConfig; import redis.clients.jedis.JedisPool; public class RedisCommandsContainerBuilder { public static RedisCommandsContainer build(FlinkJedisConfigBase flinkJedisConfigBase) { if (flinkJedisConfigBase instanceof FlinkJedisPoolConfig) { FlinkJedisPoolConfig flinkJedisPoolConfig = (FlinkJedisPoolConfig) flinkJedisConfigBase; return RedisCommandsContainerBuilder.build(flinkJedisPoolConfig); } // else if (flinkJedisConfigBase instanceof FlinkJedisClusterConfig) { // FlinkJedisClusterConfig flinkJedisClusterConfig = (FlinkJedisClusterConfig) flinkJedisConfigBase; // return RedisCommandsContainerBuilder.build(flinkJedisClusterConfig); // } else if (flinkJedisConfigBase instanceof FlinkJedisSentinelConfig) { // FlinkJedisSentinelConfig flinkJedisSentinelConfig = (FlinkJedisSentinelConfig) flinkJedisConfigBase; // return RedisCommandsContainerBuilder.build(flinkJedisSentinelConfig); // } else { throw new IllegalArgumentException("Jedis configuration not found"); } } public static RedisCommandsContainer build(FlinkJedisPoolConfig jedisPoolConfig) { Objects.requireNonNull(jedisPoolConfig, "Redis pool config should not be Null"); GenericObjectPoolConfig genericObjectPoolConfig = new GenericObjectPoolConfig(); genericObjectPoolConfig.setMaxIdle(jedisPoolConfig.getMaxIdle()); genericObjectPoolConfig.setMaxTotal(jedisPoolConfig.getMaxTotal()); genericObjectPoolConfig.setMinIdle(jedisPoolConfig.getMinIdle()); JedisPool jedisPool = new JedisPool(genericObjectPoolConfig, jedisPoolConfig.getHost(), jedisPoolConfig.getPort(), jedisPoolConfig.getConnectionTimeout(), jedisPoolConfig.getPassword(), jedisPoolConfig.getDatabase()); return new RedisContainer(jedisPool); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/table/redis/container/RedisContainer.java ================================================ package flink.examples.sql._03.source_sink.table.redis.container; import java.io.Closeable; import java.io.IOException; import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import redis.clients.jedis.Jedis; import redis.clients.jedis.JedisPool; import redis.clients.jedis.JedisSentinelPool; import redis.clients.jedis.Pipeline; public class RedisContainer implements RedisCommandsContainer, Closeable { private static final long serialVersionUID = 1L; private transient JedisPool jedisPool; private transient JedisSentinelPool jedisSentinelPool; private static final Logger LOG = LoggerFactory.getLogger(RedisContainer.class); public RedisContainer(JedisPool jedisPool) { this.jedisPool = jedisPool; this.jedisSentinelPool = null; } public RedisContainer(JedisSentinelPool sentinelPool) { this.jedisPool = null; this.jedisSentinelPool = sentinelPool; } private Jedis getInstance() { if (jedisSentinelPool != null) { return jedisSentinelPool.getResource(); } else { return jedisPool.getResource(); } } private void releaseInstance(final Jedis jedis) { if (jedis == null) { return; } try { jedis.close(); } catch (Exception e) { LOG.error("Failed to close (return) instance to pool", e); } } @Override public void open() throws Exception { getInstance().echo("Test"); } @Override public List multiGet(List key) { Jedis jedis = null; try { jedis = getInstance(); Pipeline pipeline = jedis.pipelined(); key.forEach(pipeline::get); return pipeline.syncAndReturnAll(); } catch (Exception e) { if (LOG.isErrorEnabled()) { LOG.error("Cannot send Redis message with command GET to key {} error message {}", key, e.getMessage()); } throw e; } finally { releaseInstance(jedis); } } @Override public byte[] get(byte[] key) { Jedis jedis = null; try { jedis = getInstance(); return jedis.get(key); } catch (Exception e) { if (LOG.isErrorEnabled()) { LOG.error("Cannot send Redis message with command GET to key {} error message {}", key, e.getMessage()); } throw e; } finally { releaseInstance(jedis); } } @Override public byte[] hget(byte[] key, byte[] hashField) { Jedis jedis = null; try { jedis = getInstance(); return jedis.hget(key, hashField); } catch (Exception e) { if (LOG.isErrorEnabled()) { LOG.error("Cannot send Redis message with command HGET to key {} hashField {} error message {}", key, hashField, e.getMessage()); } throw e; } finally { releaseInstance(jedis); } } @Override public void close() throws IOException { if (this.jedisPool != null) { this.jedisPool.close(); } if (this.jedisSentinelPool != null) { this.jedisSentinelPool.close(); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/table/redis/demo/RedisDemo.java ================================================ package flink.examples.sql._03.source_sink.table.redis.demo; import java.util.HashMap; import com.google.gson.Gson; import redis.clients.jedis.Jedis; import redis.clients.jedis.JedisPool; /** * redis 安装:https://blog.csdn.net/realize_dream/article/details/106227622 * redis java client:https://www.cnblogs.com/chenyanbin/p/12088796.html */ public class RedisDemo { public static void main(String[] args) { singleConnect(); poolConnect(); } public static void singleConnect() { // jedis单实例连接 Jedis jedis = new Jedis("127.0.0.1", 6379); String result = jedis.get("a"); HashMap h = new HashMap<>(); h.put("name", "namehhh"); h.put("name1", "namehhh111"); h.put("score", 3L); String s = new Gson().toJson(h); jedis.set("a", s); System.out.println(result); jedis.close(); } public static void poolConnect() { //jedis连接池 JedisPool pool = new JedisPool("127.0.0.1", 6379); Jedis jedis = pool.getResource(); String result = jedis.get("a"); System.out.println(result); jedis.close(); pool.close(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/table/redis/mapper/LookupRedisMapper.java ================================================ package flink.examples.sql._03.source_sink.table.redis.mapper; import java.io.IOException; import org.apache.flink.api.common.serialization.AbstractDeserializationSchema; import org.apache.flink.api.common.serialization.DeserializationSchema; import org.apache.flink.api.common.serialization.SerializationSchema; import org.apache.flink.table.data.RowData; import com.google.common.base.Joiner; public class LookupRedisMapper extends AbstractDeserializationSchema implements SerializationSchema { private DeserializationSchema valueDeserializationSchema; public LookupRedisMapper(DeserializationSchema valueDeserializationSchema) { this.valueDeserializationSchema = valueDeserializationSchema; } public RedisCommandDescription getCommandDescription() { return new RedisCommandDescription(RedisCommand.GET); } @Override public RowData deserialize(byte[] message) { try { return this.valueDeserializationSchema.deserialize(message); } catch (IOException e) { throw new RuntimeException(e); } } @Override public byte[] serialize(Object[] element) { return Joiner.on(":").join(element).getBytes(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/table/redis/mapper/RedisCommand.java ================================================ package flink.examples.sql._03.source_sink.table.redis.mapper; import org.apache.flink.streaming.connectors.redis.common.mapper.RedisDataType; public enum RedisCommand { GET(RedisDataType.STRING), HGET(RedisDataType.HASH), ; private RedisDataType redisDataType; RedisCommand(RedisDataType redisDataType) { this.redisDataType = redisDataType; } public RedisDataType getRedisDataType() { return redisDataType; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/table/redis/mapper/RedisCommandDescription.java ================================================ package flink.examples.sql._03.source_sink.table.redis.mapper; import org.apache.flink.streaming.connectors.redis.common.mapper.RedisDataType; public class RedisCommandDescription { private static final long serialVersionUID = 1L; private RedisCommand redisCommand; private String additionalKey; public RedisCommandDescription(RedisCommand redisCommand, String additionalKey) { this.redisCommand = redisCommand; this.additionalKey = additionalKey; if (redisCommand.getRedisDataType() == RedisDataType.HASH) { if (additionalKey == null) { throw new IllegalArgumentException("Hash should have additional key"); } } } public RedisCommandDescription(RedisCommand redisCommand) { this(redisCommand, null); } public RedisCommand getRedisCommand() { return redisCommand; } public String getAdditionalKey() { return additionalKey; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/table/redis/mapper/SetRedisMapper.java ================================================ package flink.examples.sql._03.source_sink.table.redis.mapper; import org.apache.flink.streaming.connectors.redis.common.mapper.RedisCommand; import org.apache.flink.streaming.connectors.redis.common.mapper.RedisCommandDescription; import org.apache.flink.streaming.connectors.redis.common.mapper.RedisMapper; import org.apache.flink.table.data.RowData; public class SetRedisMapper implements RedisMapper { @Override public RedisCommandDescription getCommandDescription() { return new RedisCommandDescription(RedisCommand.SET); } @Override public String getKeyFromData(RowData data) { return data.getString(0).toString(); } @Override public String getValueFromData(RowData data) { return data.getString(1).toString(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/table/redis/options/RedisLookupOptions.java ================================================ package flink.examples.sql._03.source_sink.table.redis.options; import java.io.Serializable; public class RedisLookupOptions implements Serializable { private static final long serialVersionUID = 1L; private static final int DEFAULT_MAX_RETRY_TIMES = 3; protected final String hostname; protected final int port; public String getHostname() { return hostname; } public int getPort() { return port; } private final long cacheMaxSize; private final long cacheExpireMs; private final int maxRetryTimes; private final boolean lookupAsync; private final boolean isBatchMode; private final int batchSize; private final int batchMinTriggerDelayMs; public RedisLookupOptions( long cacheMaxSize , long cacheExpireMs , int maxRetryTimes , boolean lookupAsync , String hostname , int port , boolean isBatchMode , int batchSize , int batchMinTriggerDelayMs) { this.cacheMaxSize = cacheMaxSize; this.cacheExpireMs = cacheExpireMs; this.maxRetryTimes = maxRetryTimes; this.lookupAsync = lookupAsync; this.hostname = hostname; this.port = port; this.isBatchMode = isBatchMode; this.batchSize = batchSize; this.batchMinTriggerDelayMs = batchMinTriggerDelayMs; } public long getCacheMaxSize() { return cacheMaxSize; } public long getCacheExpireMs() { return cacheExpireMs; } public int getMaxRetryTimes() { return maxRetryTimes; } public boolean getLookupAsync() { return lookupAsync; } public static Builder builder() { return new Builder(); } public boolean isBatchMode() { return isBatchMode; } public int getBatchSize() { return batchSize; } public int getBatchMinTriggerDelayMs() { return batchMinTriggerDelayMs; } /** Builder of {@link RedisLookupOptions}. */ public static class Builder { private long cacheMaxSize = -1L; private long cacheExpireMs = 0L; private int maxRetryTimes = DEFAULT_MAX_RETRY_TIMES; private boolean lookupAsync = false; private boolean isBatchMode = false; public Builder setIsBatchMode(boolean isBatchMode) { this.isBatchMode = isBatchMode; return this; } private int batchSize = 30; public Builder setBatchSize(int batchSize) { this.batchSize = batchSize; return this; } private int batchMinTriggerDelayMs = 1000; public Builder setBatchMinTriggerDelayMs(int batchMinTriggerDelayMs) { this.batchMinTriggerDelayMs = batchMinTriggerDelayMs; return this; } /** optional, lookup cache max size, over this value, the old data will be eliminated. */ public Builder setCacheMaxSize(long cacheMaxSize) { this.cacheMaxSize = cacheMaxSize; return this; } /** optional, lookup cache expire mills, over this time, the old data will expire. */ public Builder setCacheExpireMs(long cacheExpireMs) { this.cacheExpireMs = cacheExpireMs; return this; } /** optional, max retry times for Hbase connector. */ public Builder setMaxRetryTimes(int maxRetryTimes) { this.maxRetryTimes = maxRetryTimes; return this; } /** optional, whether to set async lookup. */ public Builder setLookupAsync(boolean lookupAsync) { this.lookupAsync = lookupAsync; return this; } protected String hostname = "localhost"; protected int port = 6379; /** * optional, lookup cache max size, over this value, the old data will be eliminated. */ public Builder setHostname(String hostname) { this.hostname = hostname; return this; } /** * optional, lookup cache expire mills, over this time, the old data will expire. */ public Builder setPort(int port) { this.port = port; return this; } public RedisLookupOptions build() { return new RedisLookupOptions( cacheMaxSize , cacheExpireMs , maxRetryTimes , lookupAsync , hostname , port , isBatchMode , batchSize , batchMinTriggerDelayMs); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/table/redis/options/RedisOptions.java ================================================ package flink.examples.sql._03.source_sink.table.redis.options; import static flink.examples.sql._03.source_sink.table.redis.options.RedisWriteOptions.WRITE_MODE; import static flink.examples.sql._03.source_sink.table.redis.options.RedisWriteOptions.WRITE_TTL; import static org.apache.flink.table.types.logical.utils.LogicalTypeChecks.hasRoot; import java.time.Duration; import java.util.stream.IntStream; import org.apache.flink.configuration.ConfigOption; import org.apache.flink.configuration.ConfigOptions; import org.apache.flink.configuration.ReadableConfig; import org.apache.flink.table.types.DataType; import org.apache.flink.table.types.logical.LogicalType; import org.apache.flink.table.types.logical.LogicalTypeRoot; import org.apache.flink.table.types.logical.utils.LogicalTypeChecks; import org.apache.flink.util.Preconditions; public class RedisOptions { public static final ConfigOption TIMEOUT = ConfigOptions .key("timeout") .intType() .defaultValue(2000) .withDescription("Optional timeout for connect to redis"); public static final ConfigOption MAXIDLE = ConfigOptions .key("maxIdle") .intType() .defaultValue(2) .withDescription("Optional maxIdle for connect to redis"); public static final ConfigOption MINIDLE = ConfigOptions .key("minIdle") .intType() .defaultValue(1) .withDescription("Optional minIdle for connect to redis"); public static final ConfigOption PASSWORD = ConfigOptions .key("password") .stringType() .noDefaultValue() .withDescription("Optional password for connect to redis"); public static final ConfigOption PORT = ConfigOptions .key("port") .intType() .defaultValue(6379) .withDescription("Optional port for connect to redis"); public static final ConfigOption HOSTNAME = ConfigOptions .key("hostname") .stringType() .noDefaultValue() .withDescription("Optional host for connect to redis"); public static final ConfigOption CLUSTERNODES = ConfigOptions .key("cluster-nodes") .stringType() .noDefaultValue() .withDescription("Optional nodes for connect to redis cluster"); public static final ConfigOption DATABASE = ConfigOptions .key("database") .intType() .defaultValue(0) .withDescription("Optional database for connect to redis"); public static final ConfigOption COMMAND = ConfigOptions .key("command") .stringType() .noDefaultValue() .withDescription("Optional command for connect to redis"); public static final ConfigOption REDISMODE = ConfigOptions .key("redis-mode") .stringType() .noDefaultValue() .withDescription("Optional redis-mode for connect to redis"); public static final ConfigOption REDIS_MASTER_NAME = ConfigOptions .key("master.name") .stringType() .noDefaultValue() .withDescription("Optional master.name for connect to redis sentinels"); public static final ConfigOption SENTINELS_INFO = ConfigOptions .key("sentinels.info") .stringType() .noDefaultValue() .withDescription("Optional sentinels.info for connect to redis sentinels"); public static final ConfigOption SENTINELS_PASSWORD = ConfigOptions .key("sentinels.password") .stringType() .noDefaultValue() .withDescription("Optional sentinels.password for connect to redis sentinels"); public static final ConfigOption KEY_COLUMN = ConfigOptions .key("key-column") .stringType() .noDefaultValue() .withDescription("Optional key-column for insert to redis"); public static final ConfigOption VALUE_COLUMN = ConfigOptions .key("value-column") .stringType() .noDefaultValue() .withDescription("Optional value_column for insert to redis"); public static final ConfigOption FIELD_COLUMN = ConfigOptions .key("field-column") .stringType() .noDefaultValue() .withDescription("Optional field_column for insert to redis"); public static final ConfigOption PUT_IF_ABSENT = ConfigOptions .key("put-if-absent") .booleanType() .defaultValue(false) .withDescription("Optional put_if_absent for insert to redis"); public static final ConfigOption LOOKUP_ASYNC = ConfigOptions.key("lookup.async") .booleanType() .defaultValue(false) .withDescription("whether to set async lookup."); public static final ConfigOption LOOKUP_CACHE_MAX_ROWS = ConfigOptions.key("lookup.cache.max-rows") .longType() .defaultValue(-1L) .withDescription( "the max number of rows of lookup cache, over this value, the oldest rows will " + "be eliminated. \"cache.max-rows\" and \"cache.ttl\" options must all be " + "specified if any of them is " + "specified. Cache is not enabled as default."); public static final ConfigOption LOOKUP_CACHE_TTL = ConfigOptions.key("lookup.cache.ttl") .durationType() .defaultValue(Duration.ofSeconds(0)) .withDescription("the cache time to live."); public static final ConfigOption LOOKUP_MAX_RETRIES = ConfigOptions.key("lookup.max-retries") .intType() .defaultValue(3) .withDescription("the max retry times if lookup database failed."); public static RedisLookupOptions getRedisLookupOptions(ReadableConfig tableOptions) { return (RedisLookupOptions) RedisLookupOptions .builder() .setLookupAsync(tableOptions.get(LOOKUP_ASYNC)) .setMaxRetryTimes(tableOptions.get(LOOKUP_MAX_RETRIES)) .setCacheExpireMs(tableOptions.get(LOOKUP_CACHE_TTL).toMillis()) .setCacheMaxSize(tableOptions.get(LOOKUP_CACHE_MAX_ROWS)) .setHostname(tableOptions.get(HOSTNAME)) .setPort(tableOptions.get(PORT)) .build(); } public static RedisWriteOptions getRedisWriteOptions(ReadableConfig tableOptions) { return (RedisWriteOptions) RedisWriteOptions .builder() .setWriteTtl(tableOptions.get(WRITE_TTL)) .setWriteMode(tableOptions.get(WRITE_MODE)) .setHostname(tableOptions.get(HOSTNAME)) .setPort(tableOptions.get(PORT)) .build(); } /** * Creates an array of indices that determine which physical fields of the table schema to * include in the value format. * *

See {@link #VALUE_FORMAT}, {@link #VALUE_FIELDS_INCLUDE}, and {@link #KEY_FIELDS_PREFIX} * for more information. */ public static int[] createValueFormatProjection( DataType physicalDataType) { final LogicalType physicalType = physicalDataType.getLogicalType(); Preconditions.checkArgument( hasRoot(physicalType, LogicalTypeRoot.ROW), "Row data type expected."); final int physicalFieldCount = LogicalTypeChecks.getFieldCount(physicalType); final IntStream physicalFields = IntStream.range(0, physicalFieldCount); return physicalFields.toArray(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/table/redis/options/RedisWriteOptions.java ================================================ package flink.examples.sql._03.source_sink.table.redis.options; import org.apache.flink.configuration.ConfigOption; import org.apache.flink.configuration.ConfigOptions; public class RedisWriteOptions { protected final String hostname; protected final int port; public String getHostname() { return hostname; } public int getPort() { return port; } private int writeTtl; private final String writeMode; private final boolean isBatchMode; private final int batchSize; public static final ConfigOption WRITE_TTL = ConfigOptions .key("write.ttl") .intType() .defaultValue(24 * 3600) .withDescription("Optional ttl for insert to redis"); public static final ConfigOption WRITE_MODE = ConfigOptions .key("write.mode") .stringType() .defaultValue("string") .withDescription("mode for insert to redis"); public static final ConfigOption IS_BATCH_MODE = ConfigOptions .key("is.batch.mode") .booleanType() .defaultValue(false) .withDescription("if is.batch.mode is ture, means it can cache records and hit redis using jedis pipeline."); public static final ConfigOption BATCH_SIZE = ConfigOptions .key("batch.size") .intType() .defaultValue(30) .withDescription("jedis pipeline batch size."); public RedisWriteOptions(int writeTtl, String hostname, int port, String writeMode, boolean isBatchMode, int batchSize) { this.writeTtl = writeTtl; this.hostname = hostname; this.port = port; this.writeMode = writeMode; this.isBatchMode = isBatchMode; this.batchSize = batchSize; } public int getWriteTtl() { return writeTtl; } public static Builder builder() { return new Builder(); } public String getWriteMode() { return writeMode; } public boolean isBatchMode() { return isBatchMode; } public int getBatchSize() { return batchSize; } /** Builder of {@link RedisWriteOptions}. */ public static class Builder { private int writeTtl = 24 * 3600; /** optional, max retry times for Redis connector. */ public Builder setWriteTtl(int writeTtl) { this.writeTtl = writeTtl; return this; } protected String hostname = "localhost"; protected int port = 6379; private String writeMode = "string"; private boolean isBatchMode = false; private int batchSize = 30; /** * optional, lookup cache max size, over this value, the old data will be eliminated. */ public Builder setHostname(String hostname) { this.hostname = hostname; return this; } /** * optional, lookup cache expire mills, over this time, the old data will expire. */ public Builder setPort(int port) { this.port = port; return this; } public Builder setWriteMode(String writeMode) { this.writeMode = writeMode; return this; } public RedisWriteOptions build() { return new RedisWriteOptions(writeTtl, hostname, port, writeMode, isBatchMode, batchSize); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/table/redis/v1/RedisDynamicTableFactory.java ================================================ package flink.examples.sql._03.source_sink.table.redis.v1; import static flink.examples.sql._03.source_sink.table.redis.options.RedisOptions.HOSTNAME; import static flink.examples.sql._03.source_sink.table.redis.options.RedisOptions.PORT; import java.util.HashSet; import java.util.Set; import org.apache.flink.api.common.serialization.DeserializationSchema; import org.apache.flink.configuration.ConfigOption; import org.apache.flink.configuration.ReadableConfig; import org.apache.flink.table.api.TableSchema; import org.apache.flink.table.connector.format.DecodingFormat; import org.apache.flink.table.connector.sink.DynamicTableSink; import org.apache.flink.table.connector.source.DynamicTableSource; import org.apache.flink.table.data.RowData; import org.apache.flink.table.factories.DeserializationFormatFactory; import org.apache.flink.table.factories.DynamicTableSinkFactory; import org.apache.flink.table.factories.DynamicTableSourceFactory; import org.apache.flink.table.factories.FactoryUtil; import flink.examples.sql._03.source_sink.table.redis.options.RedisLookupOptions; import flink.examples.sql._03.source_sink.table.redis.options.RedisOptions; import flink.examples.sql._03.source_sink.table.redis.options.RedisWriteOptions; import flink.examples.sql._03.source_sink.table.redis.v1.source.RedisDynamicTableSource; //import flink.examples.sql._03.source_sink.table.redis.v1.sink.RedisDynamicTableSink; public class RedisDynamicTableFactory implements DynamicTableSourceFactory, DynamicTableSinkFactory { @Override public DynamicTableSink createDynamicTableSink(Context context) { // either implement your custom validation logic here ... // or use the provided helper utility final FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context); // discover a suitable decoding format final DecodingFormat> decodingFormat = helper.discoverDecodingFormat( DeserializationFormatFactory.class, FactoryUtil.FORMAT); // validate all options helper.validate(); // get the validated options final ReadableConfig options = helper.getOptions(); final RedisWriteOptions redisWriteOptions = RedisOptions.getRedisWriteOptions(options); TableSchema schema = context.getCatalogTable().getSchema(); // return new RedisDynamicTableSink( // schema.toPhysicalRowDataType() // , decodingFormat // , redisWriteOptions); return null; } @Override public String factoryIdentifier() { return "redis"; } @Override public Set> requiredOptions() { final Set> options = new HashSet<>(); options.add(HOSTNAME); options.add(PORT); options.add(FactoryUtil.FORMAT); // use pre-defined option for format return options; } @Override public Set> optionalOptions() { final Set> options = new HashSet<>(); // options.add(COMMAND); // options.add(KEY_COLUMN); // options.add(VALUE_COLUMN); // options.add(FIELD_COLUMN); // options.add(TTL); return options; } @Override public DynamicTableSource createDynamicTableSource(Context context) { // either implement your custom validation logic here ... // or use the provided helper utility final FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context); // discover a suitable decoding format final DecodingFormat> decodingFormat = helper.discoverDecodingFormat( DeserializationFormatFactory.class, FactoryUtil.FORMAT); // validate all options helper.validate(); // get the validated options final ReadableConfig options = helper.getOptions(); final RedisLookupOptions redisLookupOptions = RedisOptions.getRedisLookupOptions(options); TableSchema schema = context.getCatalogTable().getSchema(); return new RedisDynamicTableSource( schema.toPhysicalRowDataType() , decodingFormat , redisLookupOptions); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/table/redis/v1/sink/RedisDynamicTableSink.java ================================================ package flink.examples.sql._03.source_sink.table.redis.v1.sink;//package flink.examples.sql._03.source_sink.table.redis.v1.sink; // //import javax.annotation.Nullable; // //import org.apache.flink.api.common.serialization.DeserializationSchema; //import org.apache.flink.streaming.connectors.redis.RedisSink; //import org.apache.flink.table.connector.ChangelogMode; //import org.apache.flink.table.connector.format.DecodingFormat; //import org.apache.flink.table.connector.sink.DynamicTableSink; //import org.apache.flink.table.connector.sink.SinkFunctionProvider; //import org.apache.flink.table.data.RowData; //import org.apache.flink.table.types.DataType; //import org.apache.flink.util.Preconditions; // //import flink.examples.sql._03.source_sink.table.redis.options.RedisWriteOptions; // //public class RedisDynamicTableSink implements DynamicTableSink { // // /** // * Data type to configure the formats. // */ // protected final DataType physicalDataType; // // /** // * Optional format for decoding keys from Kafka. // */ // protected final @Nullable // DecodingFormat> decodingFormat; // // protected final RedisWriteOptions redisWriteOptions; // // public RedisDynamicTableSink(DataType physicalDataType // , DecodingFormat> decodingFormat // , RedisWriteOptions redisWriteOptions) { // // // Format attributes // this.physicalDataType = // Preconditions.checkNotNull( // physicalDataType, "Physical data type must not be null."); // this.decodingFormat = decodingFormat; // this.redisWriteOptions = redisWriteOptions; // } // // // @Override // public ChangelogMode getChangelogMode(ChangelogMode requestedMode) { // return null; // } // // @Override // public SinkRuntimeProvider getSinkRuntimeProvider(Context context) { // return SinkFunctionProvider.of(new RedisSink(flinkJedisConfigBase, redisMapper)); // } // // @Override // public DynamicTableSink copy() { // return new RedisDynamicTableSink(tableSchema, config); // } // // @Override // public String asSummaryString() { // return "REDIS"; // } //} ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/table/redis/v1/source/RedisDynamicTableSource.java ================================================ package flink.examples.sql._03.source_sink.table.redis.v1.source; import static flink.examples.sql._03.source_sink.table.redis.options.RedisOptions.createValueFormatProjection; import javax.annotation.Nullable; import org.apache.flink.api.common.serialization.DeserializationSchema; import org.apache.flink.table.connector.format.DecodingFormat; import org.apache.flink.table.connector.source.DynamicTableSource; import org.apache.flink.table.connector.source.LookupTableSource; import org.apache.flink.table.connector.source.TableFunctionProvider; import org.apache.flink.table.data.RowData; import org.apache.flink.table.types.DataType; import org.apache.flink.table.types.utils.DataTypeUtils; import org.apache.flink.util.Preconditions; import flink.examples.sql._03.source_sink.table.redis.options.RedisLookupOptions; public class RedisDynamicTableSource implements LookupTableSource { /** * Data type to configure the formats. */ protected final DataType physicalDataType; /** * Optional format for decoding keys from Kafka. */ protected final @Nullable DecodingFormat> decodingFormat; protected final RedisLookupOptions redisLookupOptions; public RedisDynamicTableSource( DataType physicalDataType , DecodingFormat> decodingFormat , RedisLookupOptions redisLookupOptions) { // Format attributes this.physicalDataType = Preconditions.checkNotNull( physicalDataType, "Physical data type must not be null."); this.decodingFormat = decodingFormat; this.redisLookupOptions = redisLookupOptions; } @Override public LookupRuntimeProvider getLookupRuntimeProvider(LookupContext context) { return TableFunctionProvider.of(new RedisRowDataLookupFunction( this.redisLookupOptions , this.createDeserialization(context, this.decodingFormat, createValueFormatProjection(this.physicalDataType)))); } private @Nullable DeserializationSchema createDeserialization( Context context, @Nullable DecodingFormat> format, int[] projection) { if (format == null) { return null; } DataType physicalFormatDataType = DataTypeUtils.projectRow(this.physicalDataType, projection); return format.createRuntimeDecoder(context, physicalFormatDataType); } @Override public DynamicTableSource copy() { return null; } @Override public String asSummaryString() { return null; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/table/redis/v1/source/RedisRowDataLookupFunction.java ================================================ /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package flink.examples.sql._03.source_sink.table.redis.v1.source; import java.io.IOException; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import org.apache.flink.annotation.Internal; import org.apache.flink.api.common.serialization.DeserializationSchema; import org.apache.flink.api.common.serialization.SerializationSchema; import org.apache.flink.metrics.Gauge; import org.apache.flink.shaded.guava18.com.google.common.cache.Cache; import org.apache.flink.shaded.guava18.com.google.common.cache.CacheBuilder; import org.apache.flink.table.data.RowData; import org.apache.flink.table.functions.FunctionContext; import org.apache.flink.table.functions.TableFunction; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.base.Joiner; import flink.examples.sql._03.source_sink.table.redis.options.RedisLookupOptions; import redis.clients.jedis.Jedis; /** * The RedisRowDataLookupFunction is a standard user-defined table function, it can be used in * tableAPI and also useful for temporal table join plan in SQL. It looks up the result as {@link * RowData}. */ @Internal public class RedisRowDataLookupFunction extends TableFunction { private static final Logger LOG = LoggerFactory.getLogger(RedisRowDataLookupFunction.class); private static final long serialVersionUID = 1L; private transient Jedis jedis; private final String hostname; private final int port; private final long cacheMaxSize; private final long cacheExpireMs; private final int maxRetryTimes; private transient Cache cache; private final SerializationSchema keySerializationSchema; private final DeserializationSchema valueDeserializationSchema; private transient Consumer evaler; public RedisRowDataLookupFunction( RedisLookupOptions lookupOptions , DeserializationSchema valueDeserializationSchema) { this.hostname = lookupOptions.getHostname(); this.port = lookupOptions.getPort(); this.cacheMaxSize = lookupOptions.getCacheMaxSize(); this.cacheExpireMs = lookupOptions.getCacheExpireMs(); this.maxRetryTimes = lookupOptions.getMaxRetryTimes(); this.valueDeserializationSchema = valueDeserializationSchema; this.keySerializationSchema = elements -> Joiner.on(":").join(elements).getBytes(); } /** * The invoke entry point of lookup function. * * @param objects the lookup key. Currently only support single rowkey. */ public void eval(Object... objects) throws IOException { for (int retry = 0; retry <= maxRetryTimes; retry++) { try { // fetch result this.evaler.accept(objects); break; } catch (Exception e) { LOG.error(String.format("HBase lookup error, retry times = %d", retry), e); if (retry >= maxRetryTimes) { throw new RuntimeException("Execution of HBase lookup failed.", e); } try { Thread.sleep(1000 * retry); } catch (InterruptedException e1) { throw new RuntimeException(e1); } } } } @Override public void open(FunctionContext context) { LOG.info("start open ..."); this.jedis = new Jedis(this.hostname, this.port); this.cache = cacheMaxSize <= 0 || cacheExpireMs <= 0 ? null : CacheBuilder.newBuilder() .recordStats() .expireAfterWrite(cacheExpireMs, TimeUnit.MILLISECONDS) .maximumSize(cacheMaxSize) .build(); if (cache != null) { context.getMetricGroup() .gauge("lookupCacheHitRate", (Gauge) () -> cache.stats().hitRate()); this.evaler = in -> { RowData cacheRowData = cache.getIfPresent(in); if (cacheRowData != null) { collect(cacheRowData); } else { // fetch result byte[] key = this.keySerializationSchema.serialize(in); byte[] result = this.jedis.get(key); if (null != result && result.length > 0) { RowData rowData = null; try { rowData = this.valueDeserializationSchema.deserialize(result); } catch (IOException e) { throw new RuntimeException(e); } // parse and collect collect(rowData); cache.put(key, rowData); } } }; } else { this.evaler = in -> { // fetch result byte[] key = this.keySerializationSchema.serialize(in); byte[] result = this.jedis.get(key); if (null != result && result.length > 0) { RowData rowData = null; try { rowData = this.valueDeserializationSchema.deserialize(result); } catch (IOException e) { throw new RuntimeException(e); } // parse and collect collect(rowData); } }; } LOG.info("end open."); } @Override public void close() { LOG.info("start close ..."); if (null != jedis) { this.jedis.close(); this.jedis = null; } LOG.info("end close."); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/table/redis/v2/RedisDynamicTableFactory.java ================================================ package flink.examples.sql._03.source_sink.table.redis.v2; import static flink.examples.sql._03.source_sink.table.redis.options.RedisOptions.HOSTNAME; import static flink.examples.sql._03.source_sink.table.redis.options.RedisOptions.LOOKUP_CACHE_MAX_ROWS; import static flink.examples.sql._03.source_sink.table.redis.options.RedisOptions.LOOKUP_CACHE_TTL; import static flink.examples.sql._03.source_sink.table.redis.options.RedisOptions.LOOKUP_MAX_RETRIES; import static flink.examples.sql._03.source_sink.table.redis.options.RedisOptions.PORT; import static flink.examples.sql._03.source_sink.table.redis.options.RedisWriteOptions.BATCH_SIZE; import static flink.examples.sql._03.source_sink.table.redis.options.RedisWriteOptions.IS_BATCH_MODE; import static flink.examples.sql._03.source_sink.table.redis.options.RedisWriteOptions.WRITE_MODE; import java.util.HashSet; import java.util.Set; import org.apache.flink.api.common.serialization.DeserializationSchema; import org.apache.flink.configuration.ConfigOption; import org.apache.flink.configuration.Configuration; import org.apache.flink.configuration.ReadableConfig; import org.apache.flink.table.api.TableSchema; import org.apache.flink.table.connector.format.DecodingFormat; import org.apache.flink.table.connector.sink.DynamicTableSink; import org.apache.flink.table.connector.source.DynamicTableSource; import org.apache.flink.table.data.RowData; import org.apache.flink.table.factories.DeserializationFormatFactory; import org.apache.flink.table.factories.DynamicTableSinkFactory; import org.apache.flink.table.factories.DynamicTableSourceFactory; import org.apache.flink.table.factories.FactoryUtil; import flink.examples.sql._03.source_sink.table.redis.options.RedisLookupOptions; import flink.examples.sql._03.source_sink.table.redis.options.RedisOptions; import flink.examples.sql._03.source_sink.table.redis.options.RedisWriteOptions; import flink.examples.sql._03.source_sink.table.redis.v2.sink.RedisDynamicTableSink; import flink.examples.sql._03.source_sink.table.redis.v2.source.RedisDynamicTableSource; public class RedisDynamicTableFactory implements DynamicTableSourceFactory, DynamicTableSinkFactory { @Override public String factoryIdentifier() { return "redis"; } @Override public Set> requiredOptions() { final Set> options = new HashSet<>(); options.add(HOSTNAME); options.add(PORT); return options; } @Override public Set> optionalOptions() { final Set> options = new HashSet<>(); options.add(FactoryUtil.FORMAT); // use pre-defined option for format options.add(LOOKUP_CACHE_MAX_ROWS); options.add(LOOKUP_CACHE_TTL); options.add(LOOKUP_MAX_RETRIES); options.add(WRITE_MODE); options.add(IS_BATCH_MODE); options.add(BATCH_SIZE); return options; } @Override public DynamicTableSource createDynamicTableSource(Context context) { // either implement your custom validation logic here ... // or use the provided helper utility final FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context); // discover a suitable decoding format final DecodingFormat> decodingFormat = helper.discoverDecodingFormat( DeserializationFormatFactory.class, FactoryUtil.FORMAT); // validate all options helper.validate(); // get the validated options final ReadableConfig options = helper.getOptions(); final RedisLookupOptions redisLookupOptions = RedisOptions.getRedisLookupOptions(options); TableSchema schema = context.getCatalogTable().getSchema(); Configuration c = (Configuration) context.getConfiguration(); boolean isDimBatchMode = c.getBoolean("is.dim.batch.mode", false); return new RedisDynamicTableSource( schema.toPhysicalRowDataType() , decodingFormat , redisLookupOptions , isDimBatchMode); } @Override public DynamicTableSink createDynamicTableSink(Context context) { // either implement your custom validation logic here ... // or use the provided helper utility final FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context); // discover a suitable decoding format // final EncodingFormat> encodingFormat = helper.discoverEncodingFormat( // SerializationFormatFactory.class, // FactoryUtil.FORMAT); // validate all options helper.validate(); // get the validated options final ReadableConfig options = helper.getOptions(); final RedisWriteOptions redisWriteOptions = RedisOptions.getRedisWriteOptions(options); TableSchema schema = context.getCatalogTable().getSchema(); return new RedisDynamicTableSink(schema.toPhysicalRowDataType() , redisWriteOptions); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/table/redis/v2/sink/RedisDynamicTableSink.java ================================================ package flink.examples.sql._03.source_sink.table.redis.v2.sink; import javax.annotation.Nullable; import org.apache.flink.api.common.serialization.SerializationSchema; import org.apache.flink.streaming.connectors.redis.RedisSink; import org.apache.flink.streaming.connectors.redis.common.config.FlinkJedisConfigBase; import org.apache.flink.streaming.connectors.redis.common.config.FlinkJedisPoolConfig; import org.apache.flink.streaming.connectors.redis.common.mapper.RedisMapper; import org.apache.flink.table.connector.ChangelogMode; import org.apache.flink.table.connector.format.EncodingFormat; import org.apache.flink.table.connector.sink.DynamicTableSink; import org.apache.flink.table.connector.sink.SinkFunctionProvider; import org.apache.flink.table.data.RowData; import org.apache.flink.table.types.DataType; import org.apache.flink.table.types.utils.DataTypeUtils; import org.apache.flink.types.RowKind; import org.apache.flink.util.Preconditions; import flink.examples.sql._03.source_sink.table.redis.mapper.SetRedisMapper; import flink.examples.sql._03.source_sink.table.redis.options.RedisWriteOptions; /** * https://ci.apache.org/projects/flink/flink-docs-release-1.13/docs/dev/table/sourcessinks/ * * https://www.alibabacloud.com/help/zh/faq-detail/118038.htm?spm=a2c63.q38357.a3.16.48fa711fo1gVUd */ public class RedisDynamicTableSink implements DynamicTableSink { /** * Data type to configure the formats. */ protected final DataType physicalDataType; protected final RedisWriteOptions redisWriteOptions; public RedisDynamicTableSink( DataType physicalDataType , RedisWriteOptions redisWriteOptions) { // Format attributes this.physicalDataType = Preconditions.checkNotNull( physicalDataType, "Physical data type must not be null."); this.redisWriteOptions = redisWriteOptions; } private @Nullable SerializationSchema createSerialization( Context context, @Nullable EncodingFormat> format, int[] projection) { if (format == null) { return null; } DataType physicalFormatDataType = DataTypeUtils.projectRow(this.physicalDataType, projection); return format.createRuntimeEncoder(context, physicalFormatDataType); } @Override public ChangelogMode getChangelogMode(ChangelogMode requestedMode) { // UPSERT mode ChangelogMode.Builder builder = ChangelogMode.newBuilder(); for (RowKind kind : requestedMode.getContainedKinds()) { if (kind != RowKind.UPDATE_BEFORE) { builder.addContainedKind(kind); } } return builder.build(); } @Override public SinkRuntimeProvider getSinkRuntimeProvider(Context context) { FlinkJedisConfigBase flinkJedisConfigBase = new FlinkJedisPoolConfig.Builder() .setHost(this.redisWriteOptions.getHostname()) .setPort(this.redisWriteOptions.getPort()) .build(); RedisMapper redisMapper = null; switch (this.redisWriteOptions.getWriteMode()) { case "string": redisMapper = new SetRedisMapper(); break; default: throw new RuntimeException("其他类型 write mode 请自定义实现"); } return SinkFunctionProvider.of(new RedisSink<>( flinkJedisConfigBase , redisMapper)); } @Override public DynamicTableSink copy() { return null; } @Override public String asSummaryString() { return "redis"; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/table/redis/v2/source/RedisDynamicTableSource.java ================================================ package flink.examples.sql._03.source_sink.table.redis.v2.source; import static flink.examples.sql._03.source_sink.table.redis.options.RedisOptions.createValueFormatProjection; import javax.annotation.Nullable; import org.apache.flink.api.common.serialization.DeserializationSchema; import org.apache.flink.streaming.connectors.redis.common.config.FlinkJedisConfigBase; import org.apache.flink.streaming.connectors.redis.common.config.FlinkJedisPoolConfig; import org.apache.flink.table.connector.format.DecodingFormat; import org.apache.flink.table.connector.source.DynamicTableSource; import org.apache.flink.table.connector.source.LookupTableSource; import org.apache.flink.table.connector.source.TableFunctionProvider; import org.apache.flink.table.data.RowData; import org.apache.flink.table.types.DataType; import org.apache.flink.table.types.utils.DataTypeUtils; import org.apache.flink.util.Preconditions; import flink.examples.sql._03.source_sink.table.redis.mapper.LookupRedisMapper; import flink.examples.sql._03.source_sink.table.redis.options.RedisLookupOptions; public class RedisDynamicTableSource implements LookupTableSource { /** * Data type to configure the formats. */ protected final DataType physicalDataType; /** * Optional format for decoding keys from Kafka. */ protected final @Nullable DecodingFormat> decodingFormat; protected final RedisLookupOptions redisLookupOptions; private final boolean isDimBatchMode; public RedisDynamicTableSource( DataType physicalDataType , DecodingFormat> decodingFormat , RedisLookupOptions redisLookupOptions , boolean isDimBatchMode) { // Format attributes this.physicalDataType = Preconditions.checkNotNull( physicalDataType, "Physical data type must not be null."); this.decodingFormat = decodingFormat; this.redisLookupOptions = redisLookupOptions; this.isDimBatchMode = isDimBatchMode; } @Override public LookupRuntimeProvider getLookupRuntimeProvider(LookupContext context) { FlinkJedisConfigBase flinkJedisConfigBase = new FlinkJedisPoolConfig.Builder() .setHost(this.redisLookupOptions.getHostname()) .setPort(this.redisLookupOptions.getPort()) .build(); LookupRedisMapper lookupRedisMapper = new LookupRedisMapper( this.createDeserialization(context, this.decodingFormat, createValueFormatProjection(this.physicalDataType))); if (isDimBatchMode) { return TableFunctionProvider.of(new RedisRowDataBatchLookupFunction( flinkJedisConfigBase , lookupRedisMapper , this.redisLookupOptions)); // return TableFunctionProvider.of(new RedisRowDataLookupFunction( // flinkJedisConfigBase // , lookupRedisMapper // , this.redisLookupOptions)); } else { return TableFunctionProvider.of(new RedisRowDataLookupFunction( flinkJedisConfigBase , lookupRedisMapper , this.redisLookupOptions)); } } private @Nullable DeserializationSchema createDeserialization( Context context, @Nullable DecodingFormat> format, int[] projection) { if (format == null) { return null; } DataType physicalFormatDataType = DataTypeUtils.projectRow(this.physicalDataType, projection); return format.createRuntimeDecoder(context, physicalFormatDataType); } @Override public DynamicTableSource copy() { return null; } @Override public String asSummaryString() { return null; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/table/redis/v2/source/RedisRowDataBatchLookupFunction.java ================================================ /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package flink.examples.sql._03.source_sink.table.redis.v2.source; import java.io.IOException; import java.util.List; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import java.util.stream.Collectors; import org.apache.flink.annotation.Internal; import org.apache.flink.metrics.Gauge; import org.apache.flink.shaded.guava18.com.google.common.cache.Cache; import org.apache.flink.shaded.guava18.com.google.common.cache.CacheBuilder; import org.apache.flink.streaming.connectors.redis.common.config.FlinkJedisConfigBase; import org.apache.flink.table.data.RowData; import org.apache.flink.table.data.binary.BinaryStringData; import org.apache.flink.table.functions.FunctionContext; import org.apache.flink.table.functions.TableFunction; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import flink.examples.sql._03.source_sink.table.redis.container.RedisCommandsContainer; import flink.examples.sql._03.source_sink.table.redis.container.RedisCommandsContainerBuilder; import flink.examples.sql._03.source_sink.table.redis.mapper.LookupRedisMapper; import flink.examples.sql._03.source_sink.table.redis.mapper.RedisCommand; import flink.examples.sql._03.source_sink.table.redis.mapper.RedisCommandDescription; import flink.examples.sql._03.source_sink.table.redis.options.RedisLookupOptions; /** * The RedisRowDataLookupFunction is a standard user-defined table function, it can be used in * tableAPI and also useful for temporal table join plan in SQL. It looks up the result as {@link * RowData}. */ @Internal public class RedisRowDataBatchLookupFunction extends TableFunction> { private static final Logger LOG = LoggerFactory.getLogger( RedisRowDataBatchLookupFunction.class); private static final long serialVersionUID = 1L; private String additionalKey; private LookupRedisMapper lookupRedisMapper; private RedisCommand redisCommand; protected final RedisLookupOptions redisLookupOptions; private FlinkJedisConfigBase flinkJedisConfigBase; private RedisCommandsContainer redisCommandsContainer; private final long cacheMaxSize; private final long cacheExpireMs; private final int maxRetryTimes; private final boolean isBatchMode; private final int batchSize; private final int batchMinTriggerDelayMs; private transient Cache cache; private transient Consumer evaler; private static final byte[] DEFAULT_JSON_BYTES = "{}".getBytes(); public RedisRowDataBatchLookupFunction( FlinkJedisConfigBase flinkJedisConfigBase , LookupRedisMapper lookupRedisMapper, RedisLookupOptions redisLookupOptions) { this.flinkJedisConfigBase = flinkJedisConfigBase; this.lookupRedisMapper = lookupRedisMapper; this.redisLookupOptions = redisLookupOptions; RedisCommandDescription redisCommandDescription = lookupRedisMapper.getCommandDescription(); this.redisCommand = redisCommandDescription.getRedisCommand(); this.additionalKey = redisCommandDescription.getAdditionalKey(); this.cacheMaxSize = this.redisLookupOptions.getCacheMaxSize(); this.cacheExpireMs = this.redisLookupOptions.getCacheExpireMs(); this.maxRetryTimes = this.redisLookupOptions.getMaxRetryTimes(); this.isBatchMode = this.redisLookupOptions.isBatchMode(); this.batchSize = this.redisLookupOptions.getBatchSize(); this.batchMinTriggerDelayMs = this.redisLookupOptions.getBatchMinTriggerDelayMs(); } /** * The invoke entry point of lookup function. * * @param objects the lookup key. Currently only support single rowkey. */ public void eval(Object... objects) throws IOException { for (int retry = 0; retry <= maxRetryTimes; retry++) { try { // fetch result this.evaler.accept(objects); break; } catch (Exception e) { LOG.error(String.format("Redis lookup error, retry times = %d", retry), e); if (retry >= maxRetryTimes) { throw new RuntimeException("Execution of Redis lookup failed.", e); } try { Thread.sleep(1000 * retry); } catch (InterruptedException e1) { throw new RuntimeException(e1); } } } } @Override public void open(FunctionContext context) { LOG.info("start open ..."); try { this.redisCommandsContainer = RedisCommandsContainerBuilder .build(this.flinkJedisConfigBase); this.redisCommandsContainer.open(); } catch (Exception e) { LOG.error("Redis has not been properly initialized: ", e); throw new RuntimeException(e); } this.cache = cacheMaxSize <= 0 || cacheExpireMs <= 0 ? null : CacheBuilder.newBuilder() .recordStats() .expireAfterWrite(cacheExpireMs, TimeUnit.MILLISECONDS) .maximumSize(cacheMaxSize) .build(); if (cache != null) { context.getMetricGroup() .gauge("lookupCacheHitRate", (Gauge) () -> cache.stats().hitRate()); this.evaler = in -> { List inner = (List) in[0]; List keys = inner .stream() .map(o -> { if (o instanceof BinaryStringData) { return ((BinaryStringData) o).getJavaObject().getBytes(); } else { return String.valueOf(o).getBytes(); } }) .collect(Collectors.toList()); List value = null; switch (redisCommand) { case GET: value = this.redisCommandsContainer.multiGet(keys); break; default: throw new IllegalArgumentException("Cannot process such data type: " + redisCommand); } List result = value .stream() .map(o -> { if (null == o) { return this.lookupRedisMapper.deserialize(DEFAULT_JSON_BYTES); } else { return this.lookupRedisMapper.deserialize((byte[]) o); } }) .collect(Collectors.toList()); collect(result); }; // this.evaler = in -> { // RowData cacheRowData = cache.getIfPresent(in); // if (cacheRowData != null) { //// collect(cacheRowData); // } else { // // fetch result // byte[] key = lookupRedisMapper.serialize(in); // // byte[] value = null; // // switch (redisCommand) { // case GET: // value = this.redisCommandsContainer.get(key); // break; // case HGET: // value = this.redisCommandsContainer.hget(key, this.additionalKey.getBytes()); // break; // default: // throw new IllegalArgumentException("Cannot process such data type: " + // redisCommand); // } // // RowData rowData = this.lookupRedisMapper.deserialize(value); // // collect(rowData); // // if (null != rowData) { // cache.put(key, rowData); // } // } // }; } else { this.evaler = in -> { List inner = (List) in[0]; List keys = inner .stream() .map(lookupRedisMapper::serialize) .collect(Collectors.toList()); List value = null; switch (redisCommand) { case GET: value = this.redisCommandsContainer.multiGet(keys); break; default: throw new IllegalArgumentException("Cannot process such data type: " + redisCommand); } List result = value .stream() .map(o -> this.lookupRedisMapper.deserialize((byte[]) o)) .collect(Collectors.toList()); collect(result); }; } LOG.info("end open."); } @Override public void close() { LOG.info("start close ..."); if (redisCommandsContainer != null) { try { redisCommandsContainer.close(); } catch (IOException e) { throw new RuntimeException(e); } } LOG.info("end close."); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/table/redis/v2/source/RedisRowDataLookupFunction.java ================================================ /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package flink.examples.sql._03.source_sink.table.redis.v2.source; import java.io.IOException; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import org.apache.flink.annotation.Internal; import org.apache.flink.metrics.Gauge; import org.apache.flink.shaded.guava18.com.google.common.cache.Cache; import org.apache.flink.shaded.guava18.com.google.common.cache.CacheBuilder; import org.apache.flink.streaming.connectors.redis.common.config.FlinkJedisConfigBase; import org.apache.flink.table.data.RowData; import org.apache.flink.table.functions.FunctionContext; import org.apache.flink.table.functions.TableFunction; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import flink.examples.sql._03.source_sink.table.redis.container.RedisCommandsContainer; import flink.examples.sql._03.source_sink.table.redis.container.RedisCommandsContainerBuilder; import flink.examples.sql._03.source_sink.table.redis.mapper.LookupRedisMapper; import flink.examples.sql._03.source_sink.table.redis.mapper.RedisCommand; import flink.examples.sql._03.source_sink.table.redis.mapper.RedisCommandDescription; import flink.examples.sql._03.source_sink.table.redis.options.RedisLookupOptions; /** * The RedisRowDataLookupFunction is a standard user-defined table function, it can be used in * tableAPI and also useful for temporal table join plan in SQL. It looks up the result as {@link * RowData}. */ @Internal public class RedisRowDataLookupFunction extends TableFunction { private static final Logger LOG = LoggerFactory.getLogger( RedisRowDataLookupFunction.class); private static final long serialVersionUID = 1L; private String additionalKey; private LookupRedisMapper lookupRedisMapper; private RedisCommand redisCommand; protected final RedisLookupOptions redisLookupOptions; private FlinkJedisConfigBase flinkJedisConfigBase; private RedisCommandsContainer redisCommandsContainer; private final long cacheMaxSize; private final long cacheExpireMs; private final int maxRetryTimes; private final boolean isBatchMode; private final int batchSize; private final int batchMinTriggerDelayMs; private transient Cache cache; private transient Consumer evaler; public RedisRowDataLookupFunction( FlinkJedisConfigBase flinkJedisConfigBase , LookupRedisMapper lookupRedisMapper, RedisLookupOptions redisLookupOptions) { this.flinkJedisConfigBase = flinkJedisConfigBase; this.lookupRedisMapper = lookupRedisMapper; this.redisLookupOptions = redisLookupOptions; RedisCommandDescription redisCommandDescription = lookupRedisMapper.getCommandDescription(); this.redisCommand = redisCommandDescription.getRedisCommand(); this.additionalKey = redisCommandDescription.getAdditionalKey(); this.cacheMaxSize = this.redisLookupOptions.getCacheMaxSize(); this.cacheExpireMs = this.redisLookupOptions.getCacheExpireMs(); this.maxRetryTimes = this.redisLookupOptions.getMaxRetryTimes(); this.isBatchMode = this.redisLookupOptions.isBatchMode(); this.batchSize = this.redisLookupOptions.getBatchSize(); this.batchMinTriggerDelayMs = this.redisLookupOptions.getBatchMinTriggerDelayMs(); } /** * The invoke entry point of lookup function. * * @param objects the lookup key. Currently only support single rowkey. */ public void eval(Object... objects) throws IOException { for (int retry = 0; retry <= maxRetryTimes; retry++) { try { // fetch result this.evaler.accept(objects); break; } catch (Exception e) { LOG.error(String.format("HBase lookup error, retry times = %d", retry), e); if (retry >= maxRetryTimes) { throw new RuntimeException("Execution of Redis lookup failed.", e); } try { Thread.sleep(1000 * retry); } catch (InterruptedException e1) { throw new RuntimeException(e1); } } } } @Override public void open(FunctionContext context) { LOG.info("start open ..."); try { this.redisCommandsContainer = RedisCommandsContainerBuilder .build(this.flinkJedisConfigBase); this.redisCommandsContainer.open(); } catch (Exception e) { LOG.error("Redis has not been properly initialized: ", e); throw new RuntimeException(e); } this.cache = cacheMaxSize <= 0 || cacheExpireMs <= 0 ? null : CacheBuilder.newBuilder() .recordStats() .expireAfterWrite(cacheExpireMs, TimeUnit.MILLISECONDS) .maximumSize(cacheMaxSize) .build(); if (cache != null) { context.getMetricGroup() .gauge("lookupCacheHitRate", (Gauge) () -> cache.stats().hitRate()); this.evaler = in -> { RowData cacheRowData = cache.getIfPresent(in); if (cacheRowData != null) { // collect(cacheRowData); } else { // fetch result byte[] key = lookupRedisMapper.serialize(in); byte[] value = null; switch (redisCommand) { case GET: value = this.redisCommandsContainer.get(key); break; case HGET: value = this.redisCommandsContainer.hget(key, this.additionalKey.getBytes()); break; default: throw new IllegalArgumentException("Cannot process such data type: " + redisCommand); } RowData rowData = this.lookupRedisMapper.deserialize(value); collect(rowData); if (null != rowData) { cache.put(key, rowData); } } }; } else { this.evaler = in -> { // fetch result byte[] key = lookupRedisMapper.serialize(in); byte[] value = null; switch (redisCommand) { case GET: value = this.redisCommandsContainer.get(key); break; case HGET: value = this.redisCommandsContainer.hget(key, this.additionalKey.getBytes()); break; default: throw new IllegalArgumentException("Cannot process such data type: " + redisCommand); } RowData rowData = this.lookupRedisMapper.deserialize(value); collect(rowData); }; } LOG.info("end open."); } @Override public void close() { LOG.info("start close ..."); if (redisCommandsContainer != null) { try { redisCommandsContainer.close(); } catch (IOException e) { throw new RuntimeException(e); } } LOG.info("end close."); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/table/socket/SocketDynamicTableFactory.java ================================================ package flink.examples.sql._03.source_sink.table.socket; import java.util.HashSet; import java.util.Set; import org.apache.flink.api.common.serialization.DeserializationSchema; import org.apache.flink.configuration.ConfigOption; import org.apache.flink.configuration.ConfigOptions; import org.apache.flink.configuration.ReadableConfig; import org.apache.flink.table.connector.format.DecodingFormat; import org.apache.flink.table.connector.source.DynamicTableSource; import org.apache.flink.table.data.RowData; import org.apache.flink.table.factories.DeserializationFormatFactory; import org.apache.flink.table.factories.DynamicTableSourceFactory; import org.apache.flink.table.factories.FactoryUtil; import org.apache.flink.table.types.DataType; public class SocketDynamicTableFactory implements DynamicTableSourceFactory { // define all options statically public static final ConfigOption HOSTNAME = ConfigOptions.key("hostname") .stringType() .noDefaultValue(); public static final ConfigOption PORT = ConfigOptions.key("port") .intType() .noDefaultValue(); public static final ConfigOption BYTE_DELIMITER = ConfigOptions.key("byte-delimiter") .intType() .defaultValue(10); // corresponds to '\n' @Override public String factoryIdentifier() { return "socket"; // used for matching to `connector = '...'` } @Override public Set> requiredOptions() { final Set> options = new HashSet<>(); options.add(HOSTNAME); options.add(PORT); options.add(FactoryUtil.FORMAT); // use pre-defined option for format return options; } @Override public Set> optionalOptions() { final Set> options = new HashSet<>(); options.add(BYTE_DELIMITER); return options; } @Override public DynamicTableSource createDynamicTableSource(Context context) { // either implement your custom validation logic here ... // or use the provided helper utility final FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context); // discover a suitable decoding format final DecodingFormat> decodingFormat = helper.discoverDecodingFormat( DeserializationFormatFactory.class, FactoryUtil.FORMAT); // validate all options helper.validate(); // get the validated options final ReadableConfig options = helper.getOptions(); final String hostname = options.get(HOSTNAME); final int port = options.get(PORT); final byte byteDelimiter = (byte) (int) options.get(BYTE_DELIMITER); // derive the produced data type (excluding computed columns) from the catalog table final DataType producedDataType = context.getCatalogTable().getResolvedSchema().toPhysicalRowDataType(); // create and return dynamic table source return new SocketDynamicTableSource(hostname, port, byteDelimiter, decodingFormat, producedDataType); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/table/socket/SocketDynamicTableSource.java ================================================ package flink.examples.sql._03.source_sink.table.socket; import org.apache.flink.api.common.serialization.DeserializationSchema; import org.apache.flink.streaming.api.functions.source.SourceFunction; import org.apache.flink.table.connector.ChangelogMode; import org.apache.flink.table.connector.format.DecodingFormat; import org.apache.flink.table.connector.source.DynamicTableSource; import org.apache.flink.table.connector.source.ScanTableSource; import org.apache.flink.table.connector.source.SourceFunctionProvider; import org.apache.flink.table.data.RowData; import org.apache.flink.table.types.DataType; public class SocketDynamicTableSource implements ScanTableSource { private final String hostname; private final int port; private final byte byteDelimiter; private final DecodingFormat> decodingFormat; private final DataType producedDataType; public SocketDynamicTableSource( String hostname, int port, byte byteDelimiter, DecodingFormat> decodingFormat, DataType producedDataType) { this.hostname = hostname; this.port = port; this.byteDelimiter = byteDelimiter; this.decodingFormat = decodingFormat; this.producedDataType = producedDataType; } @Override public ChangelogMode getChangelogMode() { // in our example the format decides about the changelog mode // but it could also be the source itself return decodingFormat.getChangelogMode(); } @Override public ScanRuntimeProvider getScanRuntimeProvider(ScanContext runtimeProviderContext) { // create runtime classes that are shipped to the cluster final DeserializationSchema deserializer = decodingFormat.createRuntimeDecoder( runtimeProviderContext, producedDataType); final SourceFunction sourceFunction = new SocketSourceFunction( hostname, port, byteDelimiter, deserializer); return SourceFunctionProvider.of(sourceFunction, false); } @Override public DynamicTableSource copy() { return new SocketDynamicTableSource(hostname, port, byteDelimiter, decodingFormat, producedDataType); } @Override public String asSummaryString() { return "Socket Table Source"; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/table/socket/SocketSourceFunction.java ================================================ package flink.examples.sql._03.source_sink.table.socket; import java.io.InputStream; import java.net.InetSocketAddress; import java.net.Socket; import org.apache.flink.api.common.serialization.DeserializationSchema; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.typeutils.ResultTypeQueryable; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.functions.source.RichSourceFunction; import org.apache.flink.table.data.RowData; public class SocketSourceFunction extends RichSourceFunction implements ResultTypeQueryable { private final String hostname; private final int port; private final byte byteDelimiter; private final DeserializationSchema deserializer; private volatile boolean isRunning = true; private Socket currentSocket; public SocketSourceFunction(String hostname, int port, byte byteDelimiter, DeserializationSchema deserializer) { this.hostname = hostname; this.port = port; this.byteDelimiter = byteDelimiter; this.deserializer = deserializer; } @Override public TypeInformation getProducedType() { return deserializer.getProducedType(); } @Override public void open(Configuration parameters) throws Exception { deserializer.open(null); this.currentSocket = new Socket(); this.currentSocket.connect(new InetSocketAddress(hostname, port), 0); } @Override public void run(SourceContext ctx) throws Exception { InputStream stream = this.currentSocket.getInputStream(); while (isRunning) { // open and consume from socket byte[] b = new byte[46]; stream.read(b, 0, 46); RowData rowData = deserializer.deserialize(b); ctx.collect(rowData); Thread.sleep(1000); } } @Override public void cancel() { isRunning = false; try { currentSocket.close(); } catch (Throwable t) { // ignore } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/table/user_defined/UserDefinedDynamicTableFactory.java ================================================ package flink.examples.sql._03.source_sink.table.user_defined; import java.util.HashSet; import java.util.Set; import org.apache.flink.api.common.serialization.DeserializationSchema; import org.apache.flink.configuration.ConfigOption; import org.apache.flink.configuration.ConfigOptions; import org.apache.flink.configuration.ReadableConfig; import org.apache.flink.table.connector.format.DecodingFormat; import org.apache.flink.table.connector.source.DynamicTableSource; import org.apache.flink.table.data.RowData; import org.apache.flink.table.factories.DeserializationFormatFactory; import org.apache.flink.table.factories.DynamicTableSourceFactory; import org.apache.flink.table.factories.FactoryUtil; import org.apache.flink.table.types.DataType; public class UserDefinedDynamicTableFactory implements DynamicTableSourceFactory { // define all options statically public static final ConfigOption CLASS_NAME = ConfigOptions.key("class.name") .stringType() .noDefaultValue(); @Override public String factoryIdentifier() { return "user_defined"; // used for matching to `connector = '...'` } @Override public Set> requiredOptions() { final Set> options = new HashSet<>(); options.add(CLASS_NAME); options.add(FactoryUtil.FORMAT); // use pre-defined option for format return options; } @Override public Set> optionalOptions() { final Set> options = new HashSet<>(); return options; } @Override public DynamicTableSource createDynamicTableSource(Context context) { // either implement your custom validation logic here ... // or use the provided helper utility final FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context); // discover a suitable decoding format final DecodingFormat> decodingFormat = helper.discoverDecodingFormat( DeserializationFormatFactory.class, FactoryUtil.FORMAT); // validate all options helper.validate(); // get the validated options final ReadableConfig options = helper.getOptions(); final String className = options.get(CLASS_NAME); // derive the produced data type (excluding computed columns) from the catalog table final DataType producedDataType = context.getCatalogTable().getResolvedSchema().toPhysicalRowDataType(); // create and return dynamic table source return new UserDefinedDynamicTableSource(className, decodingFormat, producedDataType); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/table/user_defined/UserDefinedDynamicTableSource.java ================================================ package flink.examples.sql._03.source_sink.table.user_defined; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import org.apache.flink.api.common.eventtime.WatermarkStrategy; import org.apache.flink.api.common.serialization.DeserializationSchema; import org.apache.flink.streaming.api.functions.source.RichSourceFunction; import org.apache.flink.table.api.DataTypes; import org.apache.flink.table.connector.ChangelogMode; import org.apache.flink.table.connector.format.DecodingFormat; import org.apache.flink.table.connector.source.DynamicTableSource; import org.apache.flink.table.connector.source.ScanTableSource; import org.apache.flink.table.connector.source.SourceFunctionProvider; import org.apache.flink.table.connector.source.abilities.SupportsFilterPushDown; import org.apache.flink.table.connector.source.abilities.SupportsLimitPushDown; import org.apache.flink.table.connector.source.abilities.SupportsPartitionPushDown; import org.apache.flink.table.connector.source.abilities.SupportsProjectionPushDown; import org.apache.flink.table.connector.source.abilities.SupportsReadingMetadata; import org.apache.flink.table.connector.source.abilities.SupportsSourceWatermark; import org.apache.flink.table.connector.source.abilities.SupportsWatermarkPushDown; import org.apache.flink.table.data.RowData; import org.apache.flink.table.expressions.ResolvedExpression; import org.apache.flink.table.types.DataType; import com.google.common.collect.Lists; import lombok.SneakyThrows; public class UserDefinedDynamicTableSource implements ScanTableSource , SupportsFilterPushDown // 过滤条件下推 , SupportsLimitPushDown // limit 条件下推 , SupportsPartitionPushDown // , SupportsProjectionPushDown // select 下推 , SupportsReadingMetadata // 元数据 , SupportsWatermarkPushDown , SupportsSourceWatermark { private final String className; private final DecodingFormat> decodingFormat; private final DataType producedDataType; public UserDefinedDynamicTableSource( String className, DecodingFormat> decodingFormat, DataType producedDataType) { this.className = className; this.decodingFormat = decodingFormat; this.producedDataType = producedDataType; } @Override public ChangelogMode getChangelogMode() { // in our example the format decides about the changelog mode // but it could also be the source itself return decodingFormat.getChangelogMode(); } @SneakyThrows @Override public ScanRuntimeProvider getScanRuntimeProvider(ScanContext runtimeProviderContext) { // create runtime classes that are shipped to the cluster final DeserializationSchema deserializer = decodingFormat.createRuntimeDecoder( runtimeProviderContext, producedDataType); Map readableMetadata = decodingFormat.listReadableMetadata(); Class clazz = this.getClass().getClassLoader().loadClass(className); RichSourceFunction r = (RichSourceFunction) clazz.getConstructors()[0].newInstance(deserializer); return SourceFunctionProvider.of(r, false); } @Override public DynamicTableSource copy() { return new UserDefinedDynamicTableSource(className, decodingFormat, producedDataType); } @Override public String asSummaryString() { return "Socket Table Source"; } @Override public Result applyFilters(List filters) { return Result.of(Lists.newLinkedList(), filters); } @Override public void applyLimit(long limit) { System.out.println(1); } @Override public Optional>> listPartitions() { return Optional.empty(); } @Override public void applyPartitions(List> remainingPartitions) { System.out.println(1); } @Override public boolean supportsNestedProjection() { return false; } @Override public void applyProjection(int[][] projectedFields) { System.out.println(1); } @Override public Map listReadableMetadata() { return new HashMap() {{ put("flink_read_timestamp", DataTypes.BIGINT()); }}; } @Override public void applyReadableMetadata(List metadataKeys, DataType producedDataType) { System.out.println(1); } @Override public void applyWatermark(WatermarkStrategy watermarkStrategy) { System.out.println(1); } @Override public void applySourceWatermark() { System.out.println(1); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_03/source_sink/table/user_defined/UserDefinedSource.java ================================================ package flink.examples.sql._03.source_sink.table.user_defined; import org.apache.flink.api.common.serialization.DeserializationSchema; import org.apache.flink.streaming.api.functions.source.RichSourceFunction; import org.apache.flink.table.data.RowData; import com.google.common.collect.ImmutableMap; import flink.examples.JacksonUtils; public class UserDefinedSource extends RichSourceFunction { private DeserializationSchema dser; private volatile boolean isCancel; public UserDefinedSource(DeserializationSchema dser) { this.dser = dser; } @Override public void run(SourceContext ctx) throws Exception { while (!this.isCancel) { ctx.collect(this.dser.deserialize( JacksonUtils.bean2Json(ImmutableMap.of("user_id", 1111L, "name", "antigeneral")).getBytes() )); Thread.sleep(1000); } } @Override public void cancel() { this.isCancel = true; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_04/type/BlinkPlannerTest.java ================================================ package flink.examples.sql._04.type; import java.util.Arrays; import org.apache.flink.api.java.tuple.Tuple3; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor; import org.apache.flink.streaming.api.windowing.time.Time; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.Table; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; import org.apache.flink.types.Row; import flink.examples.sql._01.countdistincterror.udf.Mod_UDF; import flink.examples.sql._01.countdistincterror.udf.StatusMapper_UDF; public class BlinkPlannerTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(10); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode() .build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); DataStream> tuple3DataStream = env.fromCollection(Arrays.asList( Tuple3.of("2", 1L, 1627254000000L), Tuple3.of("2", 1L, 1627218000000L + 5000L), Tuple3.of("2", 101L, 1627218000000L + 6000L), Tuple3.of("2", 201L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 86400000 + 7000L))) .assignTimestampsAndWatermarks( new BoundedOutOfOrdernessTimestampExtractor>(Time.seconds(0L)) { @Override public long extractTimestamp(Tuple3 element) { return element.f2; } }); tEnv.registerFunction("mod", new Mod_UDF()); tEnv.registerFunction("status_mapper", new StatusMapper_UDF()); tEnv.createTemporaryView("source_db.source_table", tuple3DataStream, "status, id, timestamp, rowtime.rowtime"); String sql = "SELECT\n" + " count(1),\n" + " cast(tumble_start(rowtime, INTERVAL '1' DAY) as string)\n" + "FROM\n" + " source_db.source_table\n" + "GROUP BY\n" + " tumble(rowtime, INTERVAL '1' DAY)"; Table result = tEnv.sqlQuery(sql); tEnv.toAppendStream(result, Row.class).print(); env.execute(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_04/type/JavaEnvTest.java ================================================ package flink.examples.sql._04.type;//package flink.examples.sql._04.type; // // //import java.util.Arrays; // //import org.apache.flink.api.java.tuple.Tuple3; //import org.apache.flink.streaming.api.datastream.DataStream; //import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor; //import org.apache.flink.streaming.api.windowing.time.Time; //import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; //import org.apache.flink.types.Row; // //public class JavaEnvTest { // // public static void main(String[] args) throws Exception { // // // StreamExecutionEnvironment sEnv = StreamExecutionEnvironment.getExecutionEnvironment(); // sEnv.setParallelism(1); // // // create a TableEnvironment for streaming queries // StreamTableEnvironment sTableEnv = StreamTableEnvironment.create(sEnv); // // sTableEnv.registerFunction("table1", new TableFunc0()); // // DataStream> tuple3DataStream = // sEnv.fromCollection(Arrays.asList( // Tuple3.of("2", 1L, 1627254000000L), // Tuple3.of("2", 1L, 1627218000000L + 5000L), // Tuple3.of("2", 101L, 1627218000000L + 6000L), // Tuple3.of("2", 201L, 1627218000000L + 7000L), // Tuple3.of("2", 301L, 1627218000000L + 7000L), // Tuple3.of("2", 301L, 1627218000000L + 7000L), // Tuple3.of("2", 301L, 1627218000000L + 7000L), // Tuple3.of("2", 301L, 1627218000000L + 7000L), // Tuple3.of("2", 301L, 1627218000000L + 7000L), // Tuple3.of("2", 301L, 1627218000000L + 86400000 + 7000L))) // .assignTimestampsAndWatermarks( // new BoundedOutOfOrdernessTimestampExtractor>(Time.seconds(0L)) { // @Override // public long extractTimestamp(Tuple3 element) { // return element.f2; // } // }); // // sTableEnv.createTemporaryView("source_db.source_table", tuple3DataStream, // "status, id, timestamp, rowtime.rowtime"); // // String sql = "select * \n" // + "from source_db.source_table as a\n" // + "LEFT JOIN LATERAL TABLE(table1(a.status)) AS DIM(status_new) ON TRUE"; // // sTableEnv.toAppendStream(sTableEnv.sqlQuery(sql), Row.class).print(); // // sEnv.execute(); // // } // //} ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_04/type/OldPlannerTest.java ================================================ package flink.examples.sql._04.type; import java.util.Arrays; import org.apache.flink.api.java.tuple.Tuple3; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor; import org.apache.flink.streaming.api.windowing.time.Time; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.Table; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; import org.apache.flink.types.Row; import flink.examples.sql._01.countdistincterror.udf.Mod_UDF; import flink.examples.sql._01.countdistincterror.udf.StatusMapper_UDF; public class OldPlannerTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(10); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useOldPlanner() .inStreamingMode() .build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); DataStream> tuple3DataStream = env.fromCollection(Arrays.asList( Tuple3.of("2", 1L, 1627254000000L), Tuple3.of("2", 1L, 1627218000000L + 5000L), Tuple3.of("2", 101L, 1627218000000L + 6000L), Tuple3.of("2", 201L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 86400000 + 7000L))) .assignTimestampsAndWatermarks( new BoundedOutOfOrdernessTimestampExtractor>(Time.seconds(0L)) { @Override public long extractTimestamp(Tuple3 element) { return element.f2; } }); tEnv.registerFunction("mod", new Mod_UDF()); tEnv.registerFunction("status_mapper", new StatusMapper_UDF()); tEnv.createTemporaryView("source_db.source_table", tuple3DataStream, "status, id, timestamp, rowtime.rowtime"); String sql = "SELECT\n" + " count(1),\n" + " cast(tumble_start(rowtime, INTERVAL '1' DAY) as string)\n" + "FROM\n" + " source_db.source_table\n" + "GROUP BY\n" + " tumble(rowtime, INTERVAL '1' DAY)"; Table result = tEnv.sqlQuery(sql); tEnv.toAppendStream(result, Row.class).print(); env.execute(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_05/format/formats/ProtobufFormatTest.java ================================================ package flink.examples.sql._05.format.formats; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.TimeCharacteristic; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; /** * nc -lk 9999 */ public class ProtobufFormatTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); env.setParallelism(1); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); String sourceTableSql = "CREATE TABLE protobuf_source (" + " name STRING\n" + " , names ARRAY\n" + " , si_map MAP\n" + ")\n" + "WITH (\n" + " 'connector' = 'socket',\n" + " 'hostname' = 'localhost',\n" + " 'port' = '9999',\n" + " 'format' = 'protobuf',\n" + " 'protobuf.class-name' = 'flink.examples.sql._04.format.formats.protobuf.Test'\n" + ")"; String sinkTableSql = "CREATE TABLE print_sink (\n" + " name STRING\n" + " , names ARRAY\n" + " , si_map MAP\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ")"; String selectSql = "INSERT INTO print_sink\n" + "SELECT *\n" + "FROM protobuf_source\n"; tEnv.executeSql(sourceTableSql); tEnv.executeSql(sinkTableSql); tEnv.executeSql(selectSql); env.execute(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_05/format/formats/SocketWriteTest.java ================================================ package flink.examples.sql._05.format.formats; import java.io.IOException; import java.net.ServerSocket; import java.net.Socket; import java.util.Map; import com.google.common.collect.ImmutableMap; import flink.examples.JacksonUtils; import flink.examples.sql._05.format.formats.protobuf.Test; public class SocketWriteTest { public static void main(String[] args) throws IOException, InterruptedException { ServerSocket serversocket = new ServerSocket(9999); final Socket socket = serversocket.accept(); int i = 0; while (true) { Map map = ImmutableMap.of("key1", 1, "地图", i); Test test = Test.newBuilder() .setName("姓名" + i) .addNames("姓名列表" + i) .putAllSiMap(map) .build(); System.out.println(JacksonUtils.bean2Json(test)); byte[] b = test.toByteArray(); socket.getOutputStream().write(b); socket.getOutputStream().flush(); i++; if (i == 10) { break; } Thread.sleep(500); } socket.close(); serversocket.close(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_05/format/formats/csv/ChangelogCsvDeserializer.java ================================================ package flink.examples.sql._05.format.formats.csv; import java.util.List; import java.util.regex.Pattern; import org.apache.flink.api.common.serialization.DeserializationSchema; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.table.connector.RuntimeConverter.Context; import org.apache.flink.table.connector.source.DynamicTableSource.DataStructureConverter; import org.apache.flink.table.data.RowData; import org.apache.flink.table.types.logical.LogicalType; import org.apache.flink.table.types.logical.LogicalTypeRoot; import org.apache.flink.types.Row; import org.apache.flink.types.RowKind; public class ChangelogCsvDeserializer implements DeserializationSchema { private final List parsingTypes; private final DataStructureConverter converter; private final TypeInformation producedTypeInfo; private final String columnDelimiter; public ChangelogCsvDeserializer( List parsingTypes, DataStructureConverter converter, TypeInformation producedTypeInfo, String columnDelimiter) { this.parsingTypes = parsingTypes; this.converter = converter; this.producedTypeInfo = producedTypeInfo; this.columnDelimiter = columnDelimiter; } @Override public TypeInformation getProducedType() { // return the type information required by Flink's core interfaces return producedTypeInfo; } @Override public void open(InitializationContext context) { // converters must be open converter.open(Context.create(ChangelogCsvDeserializer.class.getClassLoader())); } @Override public RowData deserialize(byte[] message) { // parse the columns including a changelog flag final String[] columns = new String(message).split(Pattern.quote(columnDelimiter)); final RowKind kind = RowKind.valueOf(columns[0]); final Row row = new Row(kind, parsingTypes.size()); for (int i = 0; i < parsingTypes.size(); i++) { row.setField(i, parse(parsingTypes.get(i).getTypeRoot(), columns[i + 1])); } // convert to internal data structure return (RowData) converter.toInternal(row); } private static Object parse(LogicalTypeRoot root, String value) { switch (root) { case INTEGER: return Integer.parseInt(value); case VARCHAR: return value; default: throw new IllegalArgumentException(); } } @Override public boolean isEndOfStream(RowData nextElement) { return false; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_05/format/formats/csv/ChangelogCsvFormat.java ================================================ package flink.examples.sql._05.format.formats.csv; import java.util.List; import org.apache.flink.api.common.serialization.DeserializationSchema; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.table.connector.ChangelogMode; import org.apache.flink.table.connector.format.DecodingFormat; import org.apache.flink.table.connector.source.DynamicTableSource; import org.apache.flink.table.connector.source.DynamicTableSource.DataStructureConverter; import org.apache.flink.table.data.RowData; import org.apache.flink.table.types.DataType; import org.apache.flink.table.types.logical.LogicalType; import org.apache.flink.types.RowKind; public class ChangelogCsvFormat implements DecodingFormat> { private final String columnDelimiter; public ChangelogCsvFormat(String columnDelimiter) { this.columnDelimiter = columnDelimiter; } @Override @SuppressWarnings("unchecked") public DeserializationSchema createRuntimeDecoder( DynamicTableSource.Context context, DataType producedDataType) { // create type information for the DeserializationSchema final TypeInformation producedTypeInfo = context.createTypeInformation( producedDataType); // most of the code in DeserializationSchema will not work on internal data structures // create a converter for conversion at the end final DataStructureConverter converter = context.createDataStructureConverter(producedDataType); // use logical types during runtime for parsing final List parsingTypes = producedDataType.getLogicalType().getChildren(); // create runtime class return new ChangelogCsvDeserializer(parsingTypes, converter, producedTypeInfo, columnDelimiter); } @Override public ChangelogMode getChangelogMode() { // define that this format can produce INSERT and DELETE rows return ChangelogMode.newBuilder() .addContainedKind(RowKind.INSERT) .addContainedKind(RowKind.DELETE) .build(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_05/format/formats/csv/ChangelogCsvFormatFactory.java ================================================ package flink.examples.sql._05.format.formats.csv; import java.util.Collections; import java.util.HashSet; import java.util.Set; import org.apache.flink.api.common.serialization.DeserializationSchema; import org.apache.flink.configuration.ConfigOption; import org.apache.flink.configuration.ConfigOptions; import org.apache.flink.configuration.ReadableConfig; import org.apache.flink.table.connector.format.DecodingFormat; import org.apache.flink.table.data.RowData; import org.apache.flink.table.factories.DeserializationFormatFactory; import org.apache.flink.table.factories.DynamicTableFactory; import org.apache.flink.table.factories.FactoryUtil; public class ChangelogCsvFormatFactory implements DeserializationFormatFactory { // define all options statically public static final ConfigOption COLUMN_DELIMITER = ConfigOptions.key("column-delimiter") .stringType() .defaultValue("|"); @Override public String factoryIdentifier() { return "changelog-csv"; } @Override public Set> requiredOptions() { return Collections.emptySet(); } @Override public Set> optionalOptions() { final Set> options = new HashSet<>(); options.add(COLUMN_DELIMITER); return options; } @Override public DecodingFormat> createDecodingFormat( DynamicTableFactory.Context context, ReadableConfig formatOptions) { // either implement your custom validation logic here ... // or use the provided helper method FactoryUtil.validateFactoryOptions(this, formatOptions); // get the validated options final String columnDelimiter = formatOptions.get(COLUMN_DELIMITER); // create and return the format return new ChangelogCsvFormat(columnDelimiter); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_05/format/formats/protobuf/descriptors/Protobuf.java ================================================ package flink.examples.sql._05.format.formats.protobuf.descriptors; import java.util.Map; import org.apache.flink.annotation.PublicEvolving; import org.apache.flink.table.descriptors.DescriptorProperties; import org.apache.flink.table.descriptors.FormatDescriptor; import org.apache.flink.util.Preconditions; import com.google.protobuf.Message; /** * Format descriptor for Apache Protobuf messages. */ @PublicEvolving public class Protobuf extends FormatDescriptor { private Class messageClass; private String protobufDescriptorHttpGetUrl; public Protobuf() { super(ProtobufValidator.FORMAT_TYPE_VALUE, 1); } /** * Sets the class of the Protobuf message. * * @param messageClass class of the Protobuf message. */ public Protobuf messageClass(Class messageClass) { Preconditions.checkNotNull(messageClass); this.messageClass = messageClass; return this; } /** * Sets the Protobuf for protobuf messages. * * @param protobufDescriptorHttpGetUrl protobuf descriptor http get url */ public Protobuf protobufDescriptorHttpGetUrl(String protobufDescriptorHttpGetUrl) { Preconditions.checkNotNull(protobufDescriptorHttpGetUrl); this.protobufDescriptorHttpGetUrl = protobufDescriptorHttpGetUrl; return this; } @Override protected Map toFormatProperties() { final DescriptorProperties properties = new DescriptorProperties(); if (null != messageClass) { properties.putClass(ProtobufValidator.FORMAT_MESSAGE_CLASS, messageClass); } if (null != protobufDescriptorHttpGetUrl) { properties.putString(ProtobufValidator.FORMAT_PROTOBUF_DESCRIPTOR_HTTP_GET_URL, protobufDescriptorHttpGetUrl); } return properties.asMap(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_05/format/formats/protobuf/descriptors/ProtobufValidator.java ================================================ package flink.examples.sql._05.format.formats.protobuf.descriptors; import org.apache.flink.table.api.ValidationException; import org.apache.flink.table.descriptors.DescriptorProperties; import org.apache.flink.table.descriptors.FormatDescriptorValidator; /** * Validator for {@link Protobuf}. */ public class ProtobufValidator extends FormatDescriptorValidator { public static final String FORMAT_TYPE_VALUE = "protobuf"; public static final String FORMAT_MESSAGE_CLASS = "format.message-class"; public static final String FORMAT_PROTOBUF_DESCRIPTOR_HTTP_GET_URL = "format.protobuf-descriptor-http-get-url"; @Override public void validate(DescriptorProperties properties) { super.validate(properties); final boolean hasMessageClass = properties.containsKey(FORMAT_MESSAGE_CLASS); final boolean hasProtobufDescriptorHttpGetUrl = properties.containsKey(FORMAT_PROTOBUF_DESCRIPTOR_HTTP_GET_URL); if (hasMessageClass && hasProtobufDescriptorHttpGetUrl) { throw new ValidationException("A definition of both a Protobuf message class and Protobuf get descriptor http url is not allowed."); } else if (hasMessageClass) { properties.validateString(FORMAT_MESSAGE_CLASS, false, 1); } else if (hasProtobufDescriptorHttpGetUrl) { properties.validateString(FORMAT_PROTOBUF_DESCRIPTOR_HTTP_GET_URL, false, 1); } else { throw new ValidationException("A definition of an Protobuf message class or Protobuf get descriptor http url is required."); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_05/format/formats/protobuf/row/ProtobufDeserializationSchema.java ================================================ package flink.examples.sql._05.format.formats.protobuf.row; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import org.apache.flink.api.common.serialization.AbstractDeserializationSchema; import org.apache.flink.util.Preconditions; import com.google.protobuf.Descriptors; import com.google.protobuf.DynamicMessage; import com.google.protobuf.GeneratedMessageV3; import com.google.protobuf.Message; import flink.examples.sql._05.format.formats.utils.MoreSuppliers; /** * Deserialization schema that deserializes from Protobuf binary format. */ public class ProtobufDeserializationSchema extends AbstractDeserializationSchema { private static final long serialVersionUID = 2098447220136965L; /** Class to deserialize to. */ private Class messageClazz; /** DescriptorBytes in case of Message for serialization purpose. */ private byte[] descriptorBytes; /** Descriptor generated from DescriptorBytes */ private transient Descriptors.Descriptor descriptor; /** Default instance for this T message */ private transient T defaultInstance; /** * Creates {@link ProtobufDeserializationSchema} that produces {@link Message} using provided schema. * * @param descriptorBytes of produced messages * @return deserialized message in form of {@link Message} */ public static ProtobufDeserializationSchema forGenericMessage(byte[] descriptorBytes) { return new ProtobufDeserializationSchema<>(Message.class, descriptorBytes); } /** * Creates {@link ProtobufDeserializationSchema} that produces classes that were generated from protobuf schema. * * @param messageClazz class of message to be produced * @return deserialized message */ public static ProtobufDeserializationSchema forSpecificMessage(Class messageClazz) { return new ProtobufDeserializationSchema<>(messageClazz, null); } /** * Creates a Protobuf deserialization schema. * * @param messageClazz class to which deserialize. * @param descriptorBytes descriptor to which deserialize. */ @SuppressWarnings("unchecked") ProtobufDeserializationSchema(Class messageClazz, byte[] descriptorBytes) { Preconditions.checkNotNull(messageClazz, "Protobuf message class must not be null."); this.messageClazz = messageClazz; this.descriptorBytes = descriptorBytes; if (null != this.descriptorBytes) { this.descriptor = ProtobufUtils.getDescriptor(descriptorBytes); this.defaultInstance = (T) DynamicMessage.newBuilder(this.descriptor).getDefaultInstanceForType(); } else { this.descriptor = ProtobufUtils.getDescriptor(messageClazz); this.defaultInstance = ProtobufUtils.getDefaultInstance(messageClazz); } } @SuppressWarnings("unchecked") @Override public T deserialize(byte[] bytes) throws IOException { // read message return (T) this.defaultInstance.newBuilderForType().mergeFrom(bytes); } @SuppressWarnings("unchecked") private void readObject(ObjectInputStream inputStream) throws ClassNotFoundException, IOException { this.messageClazz = (Class) inputStream.readObject(); this.descriptorBytes = MoreSuppliers.throwing(() -> ProtobufUtils.getBytes(inputStream)); if (null != this.descriptorBytes) { this.descriptor = ProtobufUtils.getDescriptor(descriptorBytes); this.defaultInstance = (T) DynamicMessage.newBuilder(this.descriptor).getDefaultInstanceForType(); } else { this.descriptor = ProtobufUtils.getDescriptor(messageClazz); this.defaultInstance = ProtobufUtils.getDefaultInstance(messageClazz); } } private void writeObject(ObjectOutputStream outputStream) throws IOException { outputStream.writeObject(this.messageClazz); outputStream.write(this.descriptorBytes); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_05/format/formats/protobuf/row/ProtobufRowDeserializationSchema.java ================================================ package flink.examples.sql._05.format.formats.protobuf.row; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.io.Serializable; import java.math.BigDecimal; import java.math.BigInteger; import java.sql.Date; import java.sql.Time; import java.sql.Timestamp; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.TimeZone; import java.util.stream.Collectors; import org.apache.flink.api.common.serialization.AbstractDeserializationSchema; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.common.typeinfo.Types; import org.apache.flink.api.java.typeutils.ListTypeInfo; import org.apache.flink.api.java.typeutils.MapTypeInfo; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.types.Row; import org.apache.flink.util.Preconditions; import org.joda.time.DateTime; import org.joda.time.DateTimeFieldType; import org.joda.time.LocalDate; import org.joda.time.LocalTime; import com.google.protobuf.ByteString; import com.google.protobuf.Descriptors; import com.google.protobuf.Descriptors.FieldDescriptor; import com.google.protobuf.DynamicMessage; import com.google.protobuf.GeneratedMessageV3; import com.google.protobuf.MapEntry; import com.google.protobuf.Message; import flink.examples.sql._05.format.formats.protobuf.row.typeutils.ProtobufSchemaConverter; public class ProtobufRowDeserializationSchema extends AbstractDeserializationSchema { /** * Used for time conversions into SQL types. */ private static final TimeZone LOCAL_TZ = TimeZone.getDefault(); /** * Protobuf message class for deserialization. Might be null if message class is not available. */ private Class messageClazz; /** * Protobuf serialization descriptorBytes */ private byte[] descriptorBytes; /** * Protobuf serialization descriptor. */ private transient Descriptors.Descriptor descriptor; /** * Type information describing the result type. */ private transient RowTypeInfo typeInfo; /** * Protobuf defaultInstance for descriptor */ private transient Message defaultInstance; private transient DeserializationRuntimeConverter deserializationRuntimeConverter; @FunctionalInterface interface DeserializationRuntimeConverter extends Serializable { Object convert(Object object); } /** * Creates a Protobuf deserialization descriptor for the given message class. Having the * concrete Protobuf message class might improve performance. * * @param messageClazz Protobuf message class used to deserialize Protobuf's message to Flink's row */ public ProtobufRowDeserializationSchema(Class messageClazz) { Preconditions.checkNotNull(messageClazz, "Protobuf message class must not be null."); this.messageClazz = messageClazz; this.descriptorBytes = null; this.descriptor = ProtobufUtils.getDescriptor(messageClazz); this.typeInfo = (RowTypeInfo) ProtobufSchemaConverter.convertToTypeInfo(messageClazz); this.defaultInstance = ProtobufUtils.getDefaultInstance(messageClazz); this.deserializationRuntimeConverter = this.createRowConverter(this.descriptor, this.typeInfo); } /** * Creates a Protobuf deserialization descriptor for the given Protobuf descriptorBytes. * * @param descriptorBytes Protobuf descriptorBytes to deserialize Protobuf's message to Flink's row */ public ProtobufRowDeserializationSchema(byte[] descriptorBytes) { Preconditions.checkNotNull(descriptorBytes, "Protobuf descriptorBytes must not be null."); this.messageClazz = null; this.descriptorBytes = descriptorBytes; this.descriptor = ProtobufUtils.getDescriptor(descriptorBytes); this.typeInfo = (RowTypeInfo) ProtobufSchemaConverter.convertToTypeInfo(this.descriptor); this.defaultInstance = DynamicMessage.newBuilder(this.descriptor).getDefaultInstanceForType(); this.deserializationRuntimeConverter = createRowConverter(this.descriptor, this.typeInfo); } @Override public Row deserialize(byte[] bytes) throws IOException { try { Message message = this.defaultInstance .newBuilderForType() .mergeFrom(bytes) .build(); return (Row) this.deserializationRuntimeConverter.convert(message); } catch (Exception e) { throw new IOException("Failed to deserialize Protobuf message.", e); } } @Override public TypeInformation getProducedType() { return this.typeInfo; } // -------------------------------------------------------------------------------------------- private DeserializationRuntimeConverter createRowConverter( Descriptors.Descriptor descriptor, RowTypeInfo rowTypeInfo) { final FieldDescriptor[] fieldDescriptors = descriptor.getFields().toArray(new FieldDescriptor[0]); final TypeInformation[] fieldTypeInfos = rowTypeInfo.getFieldTypes(); final int length = fieldDescriptors.length; final DeserializationRuntimeConverter[] deserializationRuntimeConverters = new DeserializationRuntimeConverter[length]; for (int i = 0; i < length; i++) { final FieldDescriptor fieldDescriptor = fieldDescriptors[i]; final TypeInformation fieldTypeInfo = fieldTypeInfos[i]; deserializationRuntimeConverters[i] = createConverter(fieldDescriptor, fieldTypeInfo); } return (Object o) -> { Message message = (Message) o; final Row row = new Row(length); for (int i = 0; i < length; i++) { Object fieldO = message.getField(fieldDescriptors[i]); row.setField(i, deserializationRuntimeConverters[i].convert(fieldO)); } return row; }; } @SuppressWarnings("unchecked") private DeserializationRuntimeConverter createConverter(Descriptors.GenericDescriptor genericDescriptor, TypeInformation info) { // we perform the conversion based on descriptor information but enriched with pre-computed // type information where useful (i.e., for list) if (genericDescriptor instanceof Descriptors.Descriptor) { return createRowConverter((Descriptors.Descriptor) genericDescriptor, (RowTypeInfo) info); } else if (genericDescriptor instanceof FieldDescriptor) { FieldDescriptor fieldDescriptor = ((FieldDescriptor) genericDescriptor); // field switch (fieldDescriptor.getType()) { case INT32: case FIXED32: case UINT32: case SFIXED32: case SINT32: case INT64: case UINT64: case FIXED64: case SFIXED64: case SINT64: case DOUBLE: case FLOAT: case BOOL: case STRING: if (info instanceof ListTypeInfo) { // list TypeInformation elementTypeInfo = ((ListTypeInfo) info).getElementTypeInfo(); return this.createListConverter(elementTypeInfo); } else { return this.createObjectConverter(info); } case ENUM: if (info instanceof ListTypeInfo) { // list return (Object o) -> ((List) o) .stream() .map(Object::toString) .collect(Collectors.toList()); } else { return Object::toString; } case GROUP: case MESSAGE: if (info instanceof ListTypeInfo) { // list TypeInformation elementTypeInfo = ((ListTypeInfo) info).getElementTypeInfo(); Descriptors.Descriptor elementDescriptor = fieldDescriptor.getMessageType(); DeserializationRuntimeConverter elementConverter = this.createConverter(elementDescriptor, elementTypeInfo); return (Object o) -> ((List) o) .stream() .map(elementConverter::convert) .collect(Collectors.toList()); } else if (info instanceof MapTypeInfo) { // map final MapTypeInfo mapTypeInfo = (MapTypeInfo) info; boolean isDynamicMessage = false; if (this.messageClazz == null) { isDynamicMessage = true; } // todo map's key only support string final DeserializationRuntimeConverter keyConverter = Object::toString; final FieldDescriptor keyFieldDescriptor = fieldDescriptor.getMessageType().getFields().get(0); final FieldDescriptor valueFieldDescriptor = fieldDescriptor.getMessageType().getFields().get(1); final TypeInformation valueTypeInfo = mapTypeInfo.getValueTypeInfo(); final DeserializationRuntimeConverter valueConverter = createConverter(valueFieldDescriptor, valueTypeInfo); if (isDynamicMessage) { return (Object o) -> { final List dynamicMessages = (List) o; final Map convertedMap = new HashMap<>(dynamicMessages.size()); dynamicMessages.forEach((DynamicMessage dynamicMessage) -> { convertedMap.put( (String) keyConverter.convert(dynamicMessage.getField(keyFieldDescriptor)) , valueConverter.convert(dynamicMessage.getField(valueFieldDescriptor))); }); return convertedMap; }; } else { return (Object o) -> { final List mapEntryList = (List) o; final Map convertedMap = new HashMap<>(mapEntryList.size()); mapEntryList.forEach((MapEntry message) -> { convertedMap.put( (String) keyConverter.convert(message.getKey()) , valueConverter.convert(message.getValue())); }); return convertedMap; }; } } else if (info instanceof RowTypeInfo) { // row return createRowConverter(((FieldDescriptor) genericDescriptor).getMessageType(), (RowTypeInfo) info); } throw new IllegalStateException("Message expected but was: "); case BYTES: return (Object o) -> { final byte[] bytes = ((ByteString) o).toByteArray(); if (Types.BIG_DEC == info) { return convertToDecimal(bytes); } return bytes; }; } } throw new IllegalArgumentException("Unsupported Protobuf type '" + genericDescriptor.getName() + "'."); } @SuppressWarnings("unchecked") private DeserializationRuntimeConverter createListConverter(TypeInformation info) { DeserializationRuntimeConverter elementConverter; if (Types.SQL_DATE == info) { elementConverter = this::convertToDate; } else if (Types.SQL_TIME == info) { elementConverter = this::convertToTime; } else { elementConverter = (Object fieldO) -> (fieldO); } return (Object o) -> ((List) o) .stream() .map(elementConverter::convert) .collect(Collectors.toList()); } private DeserializationRuntimeConverter createObjectConverter(TypeInformation info) { if (Types.SQL_DATE == info) { return this::convertToDate; } else if (Types.SQL_TIME == info) { return this::convertToTime; } else { return (Object o) -> o; } } // -------------------------------------------------------------------------------------------- private BigDecimal convertToDecimal(byte[] bytes) { return new BigDecimal(new BigInteger(bytes)); } private Date convertToDate(Object object) { final long millis; if (object instanceof Integer) { final Integer value = (Integer) object; // adopted from Apache Calcite final long t = (long) value * 86400000L; millis = t - (long) LOCAL_TZ.getOffset(t); } else { // use 'provided' Joda time final LocalDate value = (LocalDate) object; millis = value.toDate().getTime(); } return new Date(millis); } private Time convertToTime(Object object) { final long millis; if (object instanceof Integer) { millis = (Integer) object; } else { // use 'provided' Joda time final LocalTime value = (LocalTime) object; millis = value.get(DateTimeFieldType.millisOfDay()); } return new Time(millis - LOCAL_TZ.getOffset(millis)); } private Timestamp convertToTimestamp(Object object) { final long millis; if (object instanceof Long) { millis = (Long) object; } else { // use 'provided' Joda time final DateTime value = (DateTime) object; millis = value.toDate().getTime(); } return new Timestamp(millis - LOCAL_TZ.getOffset(millis)); } private void writeObject(ObjectOutputStream outputStream) throws IOException { if (Objects.nonNull(this.messageClazz)) { outputStream.writeObject(this.messageClazz); } else { outputStream.writeObject(this.descriptorBytes); } } @SuppressWarnings("unchecked") private void readObject(ObjectInputStream inputStream) throws ClassNotFoundException, IOException { Object o = inputStream.readObject(); if (o instanceof Class) { this.messageClazz = (Class) o; this.descriptor = ProtobufUtils.getDescriptor(this.messageClazz); this.typeInfo = (RowTypeInfo) ProtobufSchemaConverter.convertToTypeInfo(this.messageClazz); this.defaultInstance = ProtobufUtils.getDefaultInstance(this.messageClazz); } else { this.descriptorBytes = (byte[]) o; this.descriptor = ProtobufUtils.getDescriptor(this.descriptorBytes); this.typeInfo = (RowTypeInfo) ProtobufSchemaConverter.convertToTypeInfo(this.descriptor); this.defaultInstance = DynamicMessage.newBuilder(this.descriptor).getDefaultInstanceForType(); } this.deserializationRuntimeConverter = this.createConverter(this.descriptor, this.typeInfo); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_05/format/formats/protobuf/row/ProtobufRowFormatFactory.java ================================================ package flink.examples.sql._05.format.formats.protobuf.row; import java.io.InputStream; import java.util.ArrayList; import java.util.List; import java.util.Map; import org.apache.flink.api.common.serialization.DeserializationSchema; import org.apache.flink.api.common.serialization.SerializationSchema; import org.apache.flink.table.descriptors.DescriptorProperties; import org.apache.flink.table.factories.DeserializationSchemaFactory; import org.apache.flink.table.factories.SerializationSchemaFactory; import org.apache.flink.table.factories.TableFormatFactoryBase; import org.apache.flink.types.Row; import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpGet; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClients; import com.google.protobuf.GeneratedMessageV3; import flink.examples.sql._05.format.formats.protobuf.descriptors.ProtobufValidator; import flink.examples.sql._05.format.formats.utils.MoreRunnables; import flink.examples.sql._05.format.formats.utils.MoreSuppliers; /** * Table format factory for providing configured instances of Protobuf-to-row {@link SerializationSchema} * and {@link DeserializationSchema}. */ public class ProtobufRowFormatFactory extends TableFormatFactoryBase implements SerializationSchemaFactory, DeserializationSchemaFactory { public ProtobufRowFormatFactory() { super(ProtobufValidator.FORMAT_TYPE_VALUE, 1, false); } @Override protected List supportedFormatProperties() { List properties = new ArrayList<>(2); properties.add(ProtobufValidator.FORMAT_TYPE_VALUE); properties.add(ProtobufValidator.FORMAT_PROTOBUF_DESCRIPTOR_HTTP_GET_URL); return properties; } @Override public DeserializationSchema createDeserializationSchema(Map properties) { final DescriptorProperties descriptorProperties = getValidatedProperties(properties); // create and configure if (descriptorProperties.containsKey(ProtobufValidator.FORMAT_MESSAGE_CLASS)) { return new ProtobufRowDeserializationSchema( descriptorProperties.getClass(ProtobufValidator.FORMAT_MESSAGE_CLASS, GeneratedMessageV3.class)); } else { String descriptorHttpGetUrl = descriptorProperties.getString(ProtobufValidator.FORMAT_PROTOBUF_DESCRIPTOR_HTTP_GET_URL); byte[] descriptorBytes = httpGetDescriptorBytes(descriptorHttpGetUrl); return new ProtobufRowDeserializationSchema(descriptorBytes); } } @Override public SerializationSchema createSerializationSchema(Map properties) { final DescriptorProperties descriptorProperties = getValidatedProperties(properties); // create and configure if (descriptorProperties.containsKey(ProtobufValidator.FORMAT_MESSAGE_CLASS)) { return new ProtobufRowSerializationSchema( descriptorProperties.getClass(ProtobufValidator.FORMAT_MESSAGE_CLASS, GeneratedMessageV3.class)); } else { String descriptorHttpGetUrl = descriptorProperties.getString(ProtobufValidator.FORMAT_PROTOBUF_DESCRIPTOR_HTTP_GET_URL); byte[] descriptorBytes = httpGetDescriptorBytes(descriptorHttpGetUrl); return new ProtobufRowSerializationSchema(descriptorBytes); } } public static byte[] httpGetDescriptorBytes(final String descriptorHttpGetUrl) { byte[] descriptorBytes = null; HttpGet get = new HttpGet(descriptorHttpGetUrl); CloseableHttpClient httpClient = HttpClients.createDefault(); CloseableHttpResponse httpResponse = MoreSuppliers.throwing(() -> httpClient.execute(get)); if (200 == httpResponse.getStatusLine().getStatusCode()) { long length = httpResponse.getEntity().getContentLength(); byte[] buffer = new byte[(int) length]; InputStream is = MoreSuppliers.throwing(() -> httpResponse.getEntity().getContent()); MoreSuppliers.throwing(() -> is.read(buffer)); descriptorBytes = buffer; MoreRunnables.throwing(is::close); } MoreRunnables.throwing(httpResponse::close); MoreRunnables.throwing(httpClient::close); if (null != descriptorBytes && 0 != descriptorBytes.length) { return descriptorBytes; } else { throw new RuntimeException(String.format("Try to get Protobuf descriptorBytes http response by [%s], find null or empty descriptorBytes, please check you descriptorBytes", descriptorHttpGetUrl)); } } private static DescriptorProperties getValidatedProperties(Map propertiesMap) { DescriptorProperties descriptorProperties = new DescriptorProperties(); descriptorProperties.putProperties(propertiesMap); // validate (new ProtobufValidator()).validate(descriptorProperties); return descriptorProperties; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_05/format/formats/protobuf/row/ProtobufRowSerializationSchema.java ================================================ package flink.examples.sql._05.format.formats.protobuf.row; import static com.google.protobuf.Descriptors.FieldDescriptor.Type.ENUM; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.io.Serializable; import java.math.BigDecimal; import java.sql.Date; import java.sql.Time; import java.sql.Timestamp; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.NoSuchElementException; import java.util.Objects; import java.util.TimeZone; import java.util.stream.Collectors; import org.apache.flink.api.common.serialization.SerializationSchema; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.typeutils.ListTypeInfo; import org.apache.flink.api.java.typeutils.MapTypeInfo; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.types.Row; import org.apache.flink.util.Preconditions; import com.google.protobuf.Descriptors; import com.google.protobuf.Descriptors.FieldDescriptor; import com.google.protobuf.DynamicMessage; import com.google.protobuf.GeneratedMessageV3; import com.google.protobuf.MapEntry; import com.google.protobuf.Message; import com.google.protobuf.WireFormat; import flink.examples.sql._05.format.formats.protobuf.row.typeutils.ProtobufSchemaConverter; /** * Serialization schema that serializes {@link Row} into Protobuf bytes. * *

Serializes objects that are represented in (nested) Flink rows. It support types that * are compatible with Flink's Table & SQL API. * *

Note: Changes in this class need to be kept in sync with the corresponding runtime * class {@link ProtobufRowDeserializationSchema} and schema converter {@link flink.formats.protobuf.typeutils.ProtobufSchemaConverter}. */ public class ProtobufRowSerializationSchema implements SerializationSchema { private static final long serialVersionUID = 2098447220136965L; /** * Used for time conversions from SQL types. */ private static final TimeZone LOCAL_TZ = TimeZone.getDefault(); /** * Protobuf message class for serialization. Might be null if message class is not available. */ private Class messageClazz; /** * DescriptorBytes for deserialization. */ private byte[] descriptorBytes; /** * Type information describing the result type. */ private transient RowTypeInfo typeInfo; private transient Descriptors.Descriptor descriptor; private transient Message defaultInstance; private transient SerializationRuntimeConverter serializationRuntimeConverter; @FunctionalInterface interface SerializationRuntimeConverter extends Serializable { Object convert(Object object); } /** * Creates an Protobuf serialization schema for the given message class. * * @param messageClazz Protobuf message class used to serialize Flink's row to Protobuf's message */ public ProtobufRowSerializationSchema(Class messageClazz) { Preconditions.checkNotNull(messageClazz, "Protobuf message class must not be null."); this.messageClazz = messageClazz; this.descriptorBytes = null; this.descriptor = ProtobufUtils.getDescriptor(this.messageClazz); this.typeInfo = (RowTypeInfo) ProtobufSchemaConverter.convertToTypeInfo(this.messageClazz); this.defaultInstance = ProtobufUtils.getDefaultInstance(this.messageClazz); this.serializationRuntimeConverter = this.createRowConverter(this.descriptor, this.typeInfo); } /** * Creates an Protobuf serialization schema for the given descriptorBytes. * * @param descriptorBytes descriptorBytes used to serialize Flink's row to Protobuf's message */ public ProtobufRowSerializationSchema(byte[] descriptorBytes) { Preconditions.checkNotNull(descriptorBytes, "Protobuf message class must not be null."); this.messageClazz = null; this.descriptorBytes = descriptorBytes; this.descriptor = ProtobufUtils.getDescriptor(this.descriptorBytes); this.typeInfo = (RowTypeInfo) ProtobufSchemaConverter.convertToTypeInfo(descriptorBytes); this.defaultInstance = ProtobufUtils.getDefaultInstance(descriptorBytes); this.serializationRuntimeConverter = this.createRowConverter(this.descriptor, this.typeInfo); } @Override public byte[] serialize(Row row) { try { // convert to message Message message = (Message) this.serializationRuntimeConverter.convert(row); return message.toByteArray(); } catch (Throwable e) { throw new RuntimeException("Failed to serialize row.", e); } } private SerializationRuntimeConverter createRowConverter(Descriptors.Descriptor descriptor, RowTypeInfo rowTypeInfo) { final FieldDescriptor[] fieldDescriptors = descriptor.getFields().toArray(new FieldDescriptor[0]); final TypeInformation[] fieldTypeInfos = rowTypeInfo.getFieldTypes(); final int length = fieldDescriptors.length; final SerializationRuntimeConverter[] serializationRuntimeConverters = new SerializationRuntimeConverter[length]; for (int i = 0; i < length; ++i) { final FieldDescriptor fieldDescriptor = fieldDescriptors[i]; final TypeInformation fieldTypeInfo = fieldTypeInfos[i]; serializationRuntimeConverters[i] = createConverter(fieldDescriptor, fieldTypeInfo); } return (Object o) -> { Row row = (Row) o; final DynamicMessage.Builder dynamicMessageBuilder = DynamicMessage.newBuilder(descriptor); for (int i = 0; i < length; i++) { Object fieldO = row.getField(i); dynamicMessageBuilder.setField(fieldDescriptors[i], serializationRuntimeConverters[i].convert(fieldO)); } return dynamicMessageBuilder.build(); }; } private SerializationRuntimeConverter createListConverter(TypeInformation info) { if (info instanceof ListTypeInfo) { // list return (Object o) -> { List results = new ArrayList<>(((List) o).size()); for (Object fieldO : ((List) o)) { if (fieldO instanceof Date) { results.add(this.convertFromDate((Date) fieldO)); } else if (fieldO instanceof Time) { results.add(this.convertFromTime((Time) fieldO)); } else if (fieldO instanceof Timestamp) { results.add(convertFromTimestamp((Timestamp) fieldO)); } else { results.add(fieldO); } } return results; }; } else { return (Object o) -> { if (o instanceof Date) { return this.convertFromDate((Date) o); } else if (o instanceof Time) { return this.convertFromTime((Time) o); } else if (o instanceof Timestamp) { return convertFromTimestamp((Timestamp) o); } else { return o; } }; } } @SuppressWarnings("unchecked") private SerializationRuntimeConverter createConverter(Descriptors.GenericDescriptor genericDescriptor, TypeInformation info) { if (genericDescriptor instanceof Descriptors.Descriptor) { return createRowConverter((Descriptors.Descriptor) genericDescriptor, (RowTypeInfo) info); } else if (genericDescriptor instanceof FieldDescriptor) { FieldDescriptor fieldDescriptor = ((FieldDescriptor) genericDescriptor); // field switch (fieldDescriptor.getType()) { case INT32: case FIXED32: case UINT32: case SFIXED32: case SINT32: case INT64: case UINT64: case FIXED64: case SFIXED64: case SINT64: case DOUBLE: case FLOAT: case BOOL: // check for logical type return createListConverter(info); case STRING: case ENUM: if (info instanceof ListTypeInfo) { // list return (Object o) -> new ArrayList<>((List) o) .stream() .map((Object fieldO) -> convertFromEnum(fieldDescriptor, fieldO)) .collect(Collectors.toList()); } else { return (Object o) -> convertFromEnum(fieldDescriptor, o); } case GROUP: case MESSAGE: if (info instanceof ListTypeInfo) { // list TypeInformation elementTypeInfo = ((ListTypeInfo) info).getElementTypeInfo(); Descriptors.Descriptor elementDescriptor = fieldDescriptor.getMessageType(); SerializationRuntimeConverter elementConverter = this.createConverter(elementDescriptor, elementTypeInfo); return (Object o) -> ((List) o) .stream() .map(elementConverter::convert) .collect(Collectors.toList()); } else if (info instanceof MapTypeInfo) { // map final Descriptors.Descriptor messageType = fieldDescriptor.getMessageType(); final WireFormat.FieldType keyFieldType = fieldDescriptor.getMessageType().getFields().get(0).getLiteType(); final WireFormat.FieldType valueFieldType = fieldDescriptor.getMessageType().getFields().get(1).getLiteType(); final FieldDescriptor valueFieldDescriptor = fieldDescriptor.getMessageType().getFields().get(1); final TypeInformation valueTypeInfo = ((MapTypeInfo) info).getValueTypeInfo(); SerializationRuntimeConverter valueConverter = createConverter(valueFieldDescriptor, valueTypeInfo); return (Object o) -> { final List> pbMapEntries = new ArrayList<>(((Map) o).size()); for (Map.Entry mapEntry : ((Map) o).entrySet()) { pbMapEntries.add(MapEntry.newDefaultInstance( messageType , keyFieldType , mapEntry.getKey() , valueFieldType , valueConverter.convert(mapEntry.getValue()))); } return pbMapEntries; }; } else if (info instanceof RowTypeInfo) { // row return createRowConverter(fieldDescriptor.getMessageType(), (RowTypeInfo) info); } throw new IllegalStateException("Message expected but was: "); case BYTES: // check for logical type return (Object o) -> { if (o instanceof BigDecimal) { return convertFromDecimal((BigDecimal) o); } return o; }; } } throw new RuntimeException("error"); } private byte[] convertFromDecimal(BigDecimal decimal) { // byte array must contain the two's-complement representation of the // unscaled integer value in big-endian byte order return decimal.unscaledValue().toByteArray(); } private int convertFromDate(Date date) { final long time = date.getTime(); final long converted = time + (long) LOCAL_TZ.getOffset(time); return (int) (converted / 86400000L); } private int convertFromTime(Time date) { final long time = date.getTime(); final long converted = time + (long) LOCAL_TZ.getOffset(time); return (int) (converted % 86400000L); } private long convertFromTimestamp(Timestamp date) { // adopted from Apache Calcite final long time = date.getTime(); return time + (long) LOCAL_TZ.getOffset(time); } private Object convertFromEnum(FieldDescriptor fieldDescriptor, Object object) { if (ENUM == fieldDescriptor.getType()) { Descriptors.EnumDescriptor enumDescriptor = fieldDescriptor.getEnumType(); Descriptors.EnumValueDescriptor enumValue = null; for (Descriptors.EnumValueDescriptor enumValueDescriptor : enumDescriptor.getValues()) { if (enumValueDescriptor.toString().equals(object)) { enumValue = enumValueDescriptor; } } if (null != enumValue) { return enumValue; } else { throw new NoSuchElementException(String.format(fieldDescriptor.getFullName() + " enumValues has not such element [%s]", object)); } } else { return object.toString(); } } private void writeObject(ObjectOutputStream outputStream) throws IOException { if (Objects.nonNull(this.messageClazz)) { outputStream.writeObject(this.messageClazz); } else { outputStream.writeObject(this.descriptorBytes); } } @SuppressWarnings("unchecked") private void readObject(ObjectInputStream inputStream) throws ClassNotFoundException, IOException { Object o = inputStream.readObject(); if (o instanceof Class) { this.messageClazz = (Class) o; this.descriptor = ProtobufUtils.getDescriptor(this.messageClazz); this.typeInfo = (RowTypeInfo) ProtobufSchemaConverter.convertToTypeInfo(this.messageClazz); this.defaultInstance = ProtobufUtils.getDefaultInstance(this.messageClazz); } else { this.descriptorBytes = (byte[]) o; this.descriptor = ProtobufUtils.getDescriptor(this.descriptorBytes); this.typeInfo = (RowTypeInfo) ProtobufSchemaConverter.convertToTypeInfo(this.descriptorBytes); this.defaultInstance = DynamicMessage.newBuilder(this.descriptor).getDefaultInstanceForType(); } this.serializationRuntimeConverter = this.createConverter(this.descriptor, this.typeInfo); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_05/format/formats/protobuf/row/ProtobufSerializationSchema.java ================================================ package flink.examples.sql._05.format.formats.protobuf.row; import org.apache.flink.api.common.serialization.SerializationSchema; import com.google.protobuf.Message; public class ProtobufSerializationSchema implements SerializationSchema { @Override public byte[] serialize(T t) { return t.toByteArray(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_05/format/formats/protobuf/row/ProtobufUtils.java ================================================ package flink.examples.sql._05.format.formats.protobuf.row; import java.io.ByteArrayOutputStream; import java.io.InputStream; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.util.List; import java.util.Map; import java.util.function.Function; import java.util.stream.Collectors; import com.google.protobuf.DescriptorProtos; import com.google.protobuf.DescriptorProtos.FileDescriptorProto; import com.google.protobuf.Descriptors; import com.google.protobuf.Descriptors.FileDescriptor; import com.google.protobuf.DynamicMessage; import com.google.protobuf.Message; import flink.examples.sql._05.format.formats.utils.MoreSuppliers; public class ProtobufUtils { @SuppressWarnings("unchecked") public static M getDefaultInstance(Class messageClazz) { try { Method getDefaultInstanceMethod = messageClazz.getMethod("getDefaultInstance"); return (M) getDefaultInstanceMethod.invoke(null); } catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException var) { throw new IllegalArgumentException(var); } } public static Message getDefaultInstance(byte[] descriptorBytes) { return DynamicMessage.newBuilder(getDescriptor(descriptorBytes)).getDefaultInstanceForType(); } // example public static Descriptors.Descriptor getDescriptor(byte[] descriptorBytes) { DescriptorProtos.FileDescriptorSet fileDescriptorSet = MoreSuppliers.throwing(() -> DescriptorProtos.FileDescriptorSet.parseFrom(descriptorBytes)); List fileDescriptorProtos = fileDescriptorSet.getFileList(); Map protoNameFileDescriptorProtoMapper = fileDescriptorProtos .stream() .collect(Collectors.toMap(FileDescriptorProto::getName, Function.identity())); FileDescriptor fileDescriptor = MoreSuppliers.throwing(() -> FileDescriptor.buildFrom(fileDescriptorProtos.get(0), new FileDescriptor[0])); return fileDescriptor.getMessageTypes().get(0); } public static FileDescriptor getFileDescriptor(byte[] descriptorBytes) { DescriptorProtos.FileDescriptorSet fileDescriptorSet = MoreSuppliers.throwing(() -> DescriptorProtos.FileDescriptorSet.parseFrom(descriptorBytes)); List fileDescriptorProtos = fileDescriptorSet.getFileList(); Map protoNameFileDescriptorProtoMapper = fileDescriptorProtos .stream() .collect(Collectors.toMap(FileDescriptorProto::getName, Function.identity())); return MoreSuppliers.throwing(() -> FileDescriptor.buildFrom(fileDescriptorProtos.get(0), new FileDescriptor[0])); } public static Descriptors.Descriptor getDescriptor(Class messageClazz) { return getDefaultInstance(messageClazz).getDescriptorForType(); } public static byte[] getBytes(InputStream is) throws Exception { try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { byte[] buffer = new byte[1024]; int len; while ((len = is.read(buffer)) != -1) { bos.write(buffer, 0, len); } is.close(); bos.flush(); return bos.toByteArray(); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_05/format/formats/protobuf/row/typeutils/ProtobufSchemaConverter.java ================================================ package flink.examples.sql._05.format.formats.protobuf.row.typeutils; import java.util.List; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.common.typeinfo.Types; import org.apache.flink.api.java.typeutils.MapTypeInfo; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.table.types.logical.ArrayType; import org.apache.flink.table.types.logical.BigIntType; import org.apache.flink.table.types.logical.BinaryType; import org.apache.flink.table.types.logical.BooleanType; import org.apache.flink.table.types.logical.DoubleType; import org.apache.flink.table.types.logical.FloatType; import org.apache.flink.table.types.logical.IntType; import org.apache.flink.table.types.logical.LogicalType; import org.apache.flink.table.types.logical.MapType; import org.apache.flink.table.types.logical.RowType; import org.apache.flink.table.types.logical.RowType.RowField; import org.apache.flink.table.types.logical.VarCharType; import org.apache.flink.types.Row; import org.apache.flink.util.Preconditions; import com.google.common.collect.Lists; import com.google.protobuf.Descriptors; import com.google.protobuf.Descriptors.FieldDescriptor; import com.google.protobuf.Message; import flink.examples.sql._05.format.formats.protobuf.row.ProtobufUtils; /** * Converts an Protobuf schema into Flink's type information. It uses {@link RowTypeInfo} for representing * objects and converts Protobuf types into types that are compatible with Flink's Table & SQL API. * *

Note: Changes in this class need to be kept in sync with the corresponding runtime */ public class ProtobufSchemaConverter { private ProtobufSchemaConverter() { // private } /** * Converts an Protobuf class into a nested row structure with deterministic field order and data * types that are compatible with Flink's Table & SQL API. * * @param protobufClass Protobuf message that contains schema information * @return type information matching the schema */ @SuppressWarnings("unchecked") public static TypeInformation convertToTypeInfo(Class protobufClass) { Preconditions.checkNotNull(protobufClass, "Protobuf specific message class must not be null."); // determine schema to retrieve deterministic field order final Descriptors.Descriptor descriptor = ProtobufUtils.getDescriptor(protobufClass); return (TypeInformation) convertToTypeInfo(descriptor); } @SuppressWarnings("unchecked") public static RowType convertToRowDataTypeInfo(Class protobufClass) { Preconditions.checkNotNull(protobufClass, "Protobuf specific message class must not be null."); // determine schema to retrieve deterministic field order final Descriptors.Descriptor descriptor = ProtobufUtils.getDescriptor(protobufClass); return (RowType) convertToRowDataTypeInfo(descriptor); } /** * Converts an Protobuf descriptorBytes into a nested row structure with deterministic field order and data * types that are compatible with Flink's Table & SQL API. * * @param descriptorBytes Protobuf descriptorBytes * @return type information matching the schema */ @SuppressWarnings("unchecked") public static TypeInformation convertToTypeInfo(byte[] descriptorBytes) { Preconditions.checkNotNull(descriptorBytes, "Protobuf descriptorBytes must not be null."); // determine schema to retrieve deterministic field order final Descriptors.Descriptor descriptor = ProtobufUtils.getDescriptor(descriptorBytes); return (TypeInformation) convertToTypeInfo(descriptor); } public static LogicalType convertToRowDataTypeInfo(Descriptors.GenericDescriptor genericDescriptor) { if (genericDescriptor instanceof Descriptors.Descriptor) { Descriptors.Descriptor descriptor = ((Descriptors.Descriptor) genericDescriptor); List fieldDescriptors = descriptor.getFields(); int size = fieldDescriptors.size(); final LogicalType[] types = new LogicalType[size]; final String[] names = new String[size]; for (int i = 0; i < size; i++) { final FieldDescriptor field = descriptor.getFields().get(i); types[i] = convertToRowDataTypeInfo(field); names[i] = field.getName(); } if (descriptor.getOptions().getMapEntry()) { // map return new MapType(types[0], types[1]); } else { // message List rowFields = Lists.newLinkedList(); for (int i = 0; i < size; i++) { rowFields.add(new RowField(names[i], types[i])); } return new RowType(rowFields); } } else if (genericDescriptor instanceof FieldDescriptor) { FieldDescriptor fieldDescriptor = ((FieldDescriptor) genericDescriptor); LogicalType logicalType = null; // field switch (fieldDescriptor.getType()) { case DOUBLE: logicalType = new DoubleType(); break; case FLOAT: logicalType = new FloatType(); break; case INT64: case UINT64: case FIXED64: case SFIXED64: case SINT64: logicalType = new BigIntType(); break; case INT32: case FIXED32: case UINT32: case SFIXED32: case SINT32: logicalType = new IntType(); break; case BOOL: logicalType = new BooleanType(); break; case STRING: case ENUM: logicalType = new VarCharType(Integer.MAX_VALUE); break; case GROUP: case MESSAGE: logicalType = convertToRowDataTypeInfo(fieldDescriptor.getMessageType()); break; case BYTES: logicalType = new ArrayType(new BinaryType()); break; } if (fieldDescriptor.isRepeated() && !(logicalType instanceof MapType)) { return new ArrayType(logicalType); } else { return logicalType; } } throw new IllegalArgumentException("Unsupported Protobuf type '" + genericDescriptor.getName() + "'."); } public static TypeInformation convertToTypeInfo(Descriptors.GenericDescriptor genericDescriptor) { if (genericDescriptor instanceof Descriptors.Descriptor) { Descriptors.Descriptor descriptor = ((Descriptors.Descriptor) genericDescriptor); List fieldDescriptors = descriptor.getFields(); int size = fieldDescriptors.size(); final TypeInformation[] types = new TypeInformation[size]; final String[] names = new String[size]; for (int i = 0; i < size; i++) { final FieldDescriptor field = descriptor.getFields().get(i); types[i] = convertToTypeInfo(field); names[i] = field.getName(); } if (descriptor.getOptions().getMapEntry()) { // map return Types.MAP(types[0], types[1]); } else { // message return Types.ROW_NAMED(names, types); } } else if (genericDescriptor instanceof FieldDescriptor) { FieldDescriptor fieldDescriptor = ((FieldDescriptor) genericDescriptor); TypeInformation typeInformation = null; // field switch (fieldDescriptor.getType()) { case DOUBLE: typeInformation = Types.DOUBLE; break; case FLOAT: typeInformation = Types.FLOAT; break; case INT64: case UINT64: case FIXED64: case SFIXED64: case SINT64: typeInformation = Types.LONG; break; case INT32: case FIXED32: case UINT32: case SFIXED32: case SINT32: typeInformation = Types.INT; break; case BOOL: typeInformation = Types.BOOLEAN; break; case STRING: case ENUM: typeInformation = Types.STRING; break; case GROUP: case MESSAGE: typeInformation = convertToTypeInfo(fieldDescriptor.getMessageType()); break; case BYTES: typeInformation = Types.PRIMITIVE_ARRAY(Types.BYTE); break; } if (fieldDescriptor.isRepeated() && !(typeInformation instanceof MapTypeInfo)) { return Types.LIST(typeInformation); } else { return typeInformation; } } throw new IllegalArgumentException("Unsupported Protobuf type '" + genericDescriptor.getName() + "'."); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_05/format/formats/protobuf/rowdata/ProtobufFormatFactory.java ================================================ package flink.examples.sql._05.format.formats.protobuf.rowdata; import static flink.examples.sql._05.format.formats.protobuf.rowdata.ProtobufOptions.PROTOBUF_CLASS_NAME; import java.util.Collections; import java.util.HashSet; import java.util.Set; import org.apache.flink.api.common.serialization.DeserializationSchema; import org.apache.flink.api.common.serialization.SerializationSchema; import org.apache.flink.configuration.ConfigOption; import org.apache.flink.configuration.ReadableConfig; import org.apache.flink.table.connector.ChangelogMode; import org.apache.flink.table.connector.format.DecodingFormat; import org.apache.flink.table.connector.format.EncodingFormat; import org.apache.flink.table.connector.source.DynamicTableSource; import org.apache.flink.table.data.RowData; import org.apache.flink.table.factories.DeserializationFormatFactory; import org.apache.flink.table.factories.DynamicTableFactory.Context; import org.apache.flink.table.factories.FactoryUtil; import org.apache.flink.table.factories.SerializationFormatFactory; import org.apache.flink.table.types.DataType; import org.apache.flink.table.types.logical.RowType; import com.google.protobuf.GeneratedMessageV3; public class ProtobufFormatFactory implements DeserializationFormatFactory, SerializationFormatFactory { public static final String IDENTIFIER = "protobuf"; @Override public DecodingFormat> createDecodingFormat(Context context, ReadableConfig formatOptions) { FactoryUtil.validateFactoryOptions(this, formatOptions); final String className = formatOptions.get(PROTOBUF_CLASS_NAME); try { Class protobufV3 = (Class) this.getClass().getClassLoader().loadClass(className); return new DecodingFormat>() { @Override public DeserializationSchema createRuntimeDecoder(DynamicTableSource.Context context, DataType physicalDataType) { final RowType rowType = (RowType) physicalDataType.getLogicalType(); return new ProtobufRowDataDeserializationSchema( protobufV3 , true , rowType); } @Override public ChangelogMode getChangelogMode() { return ChangelogMode.insertOnly(); } }; } catch (ClassNotFoundException e) { throw new RuntimeException(e); } } @Override public EncodingFormat> createEncodingFormat(Context context, ReadableConfig formatOptions) { return null; } @Override public String factoryIdentifier() { return IDENTIFIER; } @Override public Set> requiredOptions() { return Collections.emptySet(); } @Override public Set> optionalOptions() { Set> optionalOptions = new HashSet<>(); optionalOptions.add(PROTOBUF_CLASS_NAME); return optionalOptions; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_05/format/formats/protobuf/rowdata/ProtobufOptions.java ================================================ package flink.examples.sql._05.format.formats.protobuf.rowdata; import org.apache.flink.configuration.ConfigOption; import org.apache.flink.configuration.ConfigOptions; public class ProtobufOptions { public static final ConfigOption PROTOBUF_CLASS_NAME = ConfigOptions.key("class-name") .stringType() .noDefaultValue() .withDescription( "Optional flag to specify whether to fail if a field is missing or not, false by default."); public static final ConfigOption PROTOBUF_DESCRIPTOR_FILE = ConfigOptions.key("descriptor-file") .stringType() .noDefaultValue() .withDescription( "Optional flag to skip fields and rows with parse errors instead of failing;\n" + "fields are set to null in case of errors, false by default."); } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_05/format/formats/protobuf/rowdata/ProtobufRowDataDeserializationSchema.java ================================================ package flink.examples.sql._05.format.formats.protobuf.rowdata; import static java.lang.String.format; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.util.Objects; import org.apache.flink.api.common.serialization.AbstractDeserializationSchema; import org.apache.flink.table.data.RowData; import org.apache.flink.table.types.logical.RowType; import org.apache.flink.util.Preconditions; import com.google.protobuf.Descriptors; import com.google.protobuf.GeneratedMessageV3; import com.google.protobuf.Message; import flink.examples.sql._05.format.formats.protobuf.row.ProtobufUtils; import flink.examples.sql._05.format.formats.protobuf.row.typeutils.ProtobufSchemaConverter; public class ProtobufRowDataDeserializationSchema extends AbstractDeserializationSchema { /** * Protobuf message class for deserialization. Might be null if message class is not available. */ private Class messageClazz; /** * Protobuf serialization descriptorBytes */ private byte[] descriptorBytes; /** * Protobuf serialization descriptor. */ private transient Descriptors.Descriptor descriptor; /** * Type information describing the result type. */ private transient RowType protobufOriginalRowType; /** Flag indicating whether to ignore invalid fields/rows (default: throw an exception). */ private final boolean ignoreParseErrors; /** TypeInformation of the produced {@link RowData}. */ private RowType expectedResultType; /** * Protobuf defaultInstance for descriptor */ private transient Message defaultInstance; private ProtobufToRowDataConverters.ProtobufToRowDataConverter runtimeConverter; /** * Creates a Protobuf deserialization descriptor for the given message class. Having the * concrete Protobuf message class might improve performance. * * @param messageClazz Protobuf message class used to deserialize Protobuf's message to Flink's row * @param ignoreParseErrors */ public ProtobufRowDataDeserializationSchema( Class messageClazz , boolean ignoreParseErrors , RowType expectedResultType) { this.ignoreParseErrors = ignoreParseErrors; Preconditions.checkNotNull(messageClazz, "Protobuf message class must not be null."); this.messageClazz = messageClazz; this.descriptorBytes = null; this.descriptor = ProtobufUtils.getDescriptor(messageClazz); this.defaultInstance = ProtobufUtils.getDefaultInstance(messageClazz); // protobuf 本身的 schema this.protobufOriginalRowType = (RowType) ProtobufSchemaConverter.convertToRowDataTypeInfo(messageClazz); this.expectedResultType = expectedResultType; this.runtimeConverter = new ProtobufToRowDataConverters(false) .createRowDataConverterByLogicalType(this.descriptor, this.expectedResultType); } /** * Creates a Protobuf deserialization descriptor for the given Protobuf descriptorBytes. * * @param descriptorBytes Protobuf descriptorBytes to deserialize Protobuf's message to Flink's row * @param ignoreParseErrors */ // public ProtobufRowDataDeserializationSchema( // byte[] descriptorBytes // , boolean ignoreParseErrors // , RowType expectedResultType) { // this.ignoreParseErrors = ignoreParseErrors; // Preconditions.checkNotNull(descriptorBytes, "Protobuf descriptorBytes must not be null."); // this.messageClazz = null; // this.descriptorBytes = descriptorBytes; // this.descriptor = ProtobufUtils.getDescriptor(descriptorBytes); //// this.typeInfo = (RowTypeInfo) ProtobufSchemaConverter.convertToTypeInfo(this.descriptor); // this.defaultInstance = DynamicMessage.newBuilder(this.descriptor).getDefaultInstanceForType(); //// this.runtimeConverter = new ProtobufToRowDataConverters(true) //// .createRowDataConverter(this.descriptor, this.typeInfo, null); // // this.expectedResultType = expectedResultType; // } @Override public RowData deserialize(byte[] bytes) throws IOException { if (bytes == null) { return null; } try { Message message = this.defaultInstance .newBuilderForType() .mergeFrom(bytes) .build(); return (RowData) runtimeConverter.convert(message); } catch (Throwable t) { if (ignoreParseErrors) { return null; } throw new IOException( format("Failed to deserialize Protobuf '%s'.", new String(bytes)), t); } } private void writeObject(ObjectOutputStream outputStream) throws IOException { if (Objects.nonNull(this.messageClazz)) { outputStream.writeObject(this.messageClazz); outputStream.writeObject(this.expectedResultType); } else { outputStream.writeObject(this.descriptorBytes); } } @SuppressWarnings("unchecked") private void readObject(ObjectInputStream inputStream) throws ClassNotFoundException, IOException { Object o = inputStream.readObject(); this.expectedResultType = (RowType) inputStream.readObject(); if (o instanceof Class) { this.messageClazz = (Class) o; this.descriptor = ProtobufUtils.getDescriptor(messageClazz); this.defaultInstance = ProtobufUtils.getDefaultInstance(messageClazz); this.protobufOriginalRowType = (RowType) ProtobufSchemaConverter.convertToRowDataTypeInfo(messageClazz); this.runtimeConverter = new ProtobufToRowDataConverters(false) .createRowDataConverterByDescriptor(this.descriptor, this.expectedResultType); } else { // this.descriptorBytes = (byte[]) o; // this.descriptor = ProtobufUtils.getDescriptor(this.descriptorBytes); // this.typeInfo = (RowTypeInfo) ProtobufSchemaConverter.convertToTypeInfo(this.descriptor); // this.defaultInstance = DynamicMessage.newBuilder(this.descriptor).getDefaultInstanceForType(); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_05/format/formats/protobuf/rowdata/ProtobufRowDataSerializationSchema.java ================================================ package flink.examples.sql._05.format.formats.protobuf.rowdata; import org.apache.flink.api.common.serialization.SerializationSchema; import org.apache.flink.table.data.RowData; public class ProtobufRowDataSerializationSchema implements SerializationSchema { @Override public byte[] serialize(RowData element) { return new byte[0]; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_05/format/formats/protobuf/rowdata/ProtobufToRowDataConverters.java ================================================ package flink.examples.sql._05.format.formats.protobuf.rowdata; import java.io.Serializable; import java.math.BigDecimal; import java.math.BigInteger; import java.sql.Date; import java.sql.Time; import java.sql.Timestamp; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.TimeZone; import org.apache.flink.table.data.GenericArrayData; import org.apache.flink.table.data.GenericMapData; import org.apache.flink.table.data.GenericRowData; import org.apache.flink.table.data.StringData; import org.apache.flink.table.types.logical.ArrayType; import org.apache.flink.table.types.logical.DateType; import org.apache.flink.table.types.logical.DecimalType; import org.apache.flink.table.types.logical.LogicalType; import org.apache.flink.table.types.logical.MapType; import org.apache.flink.table.types.logical.RowType; import org.apache.flink.table.types.logical.TimeType; import org.apache.flink.table.types.logical.VarCharType; import org.joda.time.DateTime; import org.joda.time.DateTimeFieldType; import org.joda.time.LocalDate; import org.joda.time.LocalTime; import com.google.protobuf.ByteString; import com.google.protobuf.Descriptors; import com.google.protobuf.Descriptors.FieldDescriptor; import com.google.protobuf.Descriptors.FieldDescriptor.JavaType; import com.google.protobuf.DynamicMessage; import com.google.protobuf.MapEntry; import com.google.protobuf.Message; public class ProtobufToRowDataConverters implements Serializable { /** * Used for time conversions into SQL types. */ private static final TimeZone LOCAL_TZ = TimeZone.getDefault(); private final boolean isDynamicMessage; public ProtobufToRowDataConverters(boolean isDynamicMessage) { this.isDynamicMessage = isDynamicMessage; } @FunctionalInterface public interface ProtobufToRowDataConverter extends Serializable { Object convert(Object object); } public ProtobufToRowDataConverter createRowDataConverterByLogicalType( Descriptors.Descriptor descriptor , RowType rowType) { final FieldDescriptor[] fieldDescriptors = descriptor.getFields().toArray(new FieldDescriptor[0]); List fieldLogicalTypes = rowType.getChildren(); final int length = fieldDescriptors.length; final ProtobufToRowDataConverter[] runtimeConverters = new ProtobufToRowDataConverter[length]; for (int i = 0; i < length; i++) { final FieldDescriptor fieldDescriptor = fieldDescriptors[i]; final LogicalType fieldLogicalType = fieldLogicalTypes.get(i); runtimeConverters[i] = createConverterByLogicalType(fieldDescriptor, fieldLogicalType); } return (Object o) -> { Message message = (Message) o; final GenericRowData genericRowData = new GenericRowData(length); for (int i = 0; i < length; i++) { Object fieldO = message.getField(fieldDescriptors[i]); genericRowData.setField(i, runtimeConverters[i].convert(fieldO)); } return genericRowData; }; } @SuppressWarnings("unchecked") private ProtobufToRowDataConverter createConverterByLogicalType(Descriptors.GenericDescriptor genericDescriptor, LogicalType info) { // we perform the conversion based on descriptor information but enriched with pre-computed // type information where useful (i.e., for list) if (info instanceof RowType) { return createRowDataConverterByDescriptor((Descriptors.Descriptor) genericDescriptor, (RowType) info); } else { FieldDescriptor fieldDescriptor = ((FieldDescriptor) genericDescriptor); switch (info.getTypeRoot()) { case CHAR: case VARCHAR: case BOOLEAN: case DECIMAL: case TINYINT: case SMALLINT: case INTEGER: case BIGINT: case FLOAT: case DOUBLE: if (info instanceof ArrayType) { // list LogicalType elementLogicalType = ((ArrayType) info).getElementType(); return createArrayConverter(elementLogicalType); } else { return createObjectConverter(info); } case ARRAY: case MULTISET: // list LogicalType elementLogicalType = ((ArrayType) info).getElementType(); if (fieldDescriptor.getJavaType() != JavaType.MESSAGE) { // list return createArrayConverter(elementLogicalType); } Descriptors.Descriptor elementDescriptor = fieldDescriptor.getMessageType(); ProtobufToRowDataConverter elementConverter = this.createConverterByDescriptor(elementDescriptor, elementLogicalType); return (Object o) -> new GenericArrayData(((List) o) .stream() .map(elementConverter::convert) .toArray()); case ROW: // row return createRowDataConverterByDescriptor(((FieldDescriptor) genericDescriptor).getMessageType(), (RowType) info); case MAP: // map final MapType mapTypeInfo = (MapType) info; // todo map's key only support string final ProtobufToRowDataConverter keyConverter = Object::toString; final FieldDescriptor keyFieldDescriptor = fieldDescriptor.getMessageType().getFields().get(0); final FieldDescriptor valueFieldDescriptor = fieldDescriptor.getMessageType().getFields().get(1); final LogicalType valueTypeInfo = mapTypeInfo.getValueType(); final ProtobufToRowDataConverter valueConverter = createConverterByDescriptor(valueFieldDescriptor, valueTypeInfo); if (this.isDynamicMessage) { return (Object o) -> { final List dynamicMessages = (List) o; final Map convertedMap = new HashMap<>(dynamicMessages.size()); dynamicMessages.forEach((DynamicMessage dynamicMessage) -> { convertedMap.put( StringData.fromString((String) keyConverter.convert(dynamicMessage.getField(keyFieldDescriptor))) , valueConverter.convert(dynamicMessage.getField(valueFieldDescriptor))); }); return new GenericMapData(convertedMap); }; } else { return (Object o) -> { final List mapEntryList = (List) o; final Map convertedMap = new HashMap<>(mapEntryList.size()); mapEntryList.forEach((MapEntry message) -> { convertedMap.put( StringData.fromString((String) keyConverter.convert(message.getKey())) , valueConverter.convert(message.getValue())); }); return new GenericMapData(convertedMap); }; } case BINARY: case VARBINARY: return (Object o) -> { final byte[] bytes = ((ByteString) o).toByteArray(); if (info instanceof DecimalType) { return convertToDecimal(bytes); } return bytes; }; } } throw new IllegalArgumentException("Unsupported Protobuf type '" + genericDescriptor.getName() + "'."); } public ProtobufToRowDataConverter createRowDataConverterByDescriptor( Descriptors.Descriptor descriptor , RowType rowType) { final FieldDescriptor[] fieldDescriptors = descriptor.getFields().toArray(new FieldDescriptor[0]); // final TypeInformation[] fieldTypeInfos = rowTypeInfo.getFieldTypes(); List fieldLogicalTypes = rowType.getChildren(); final int length = fieldDescriptors.length; final ProtobufToRowDataConverter[] runtimeConverters = new ProtobufToRowDataConverter[length]; for (int i = 0; i < length; i++) { final FieldDescriptor fieldDescriptor = fieldDescriptors[i]; final LogicalType fieldLogicalType = fieldLogicalTypes.get(i); runtimeConverters[i] = createConverterByDescriptor(fieldDescriptor, fieldLogicalType); } return (Object o) -> { Message message = (Message) o; final GenericRowData genericRowData = new GenericRowData(length); for (int i = 0; i < length; i++) { Object fieldO = message.getField(fieldDescriptors[i]); genericRowData.setField(i, runtimeConverters[i].convert(fieldO)); } return genericRowData; }; } @SuppressWarnings("unchecked") private ProtobufToRowDataConverter createConverterByDescriptor(Descriptors.GenericDescriptor genericDescriptor, LogicalType info) { // we perform the conversion based on descriptor information but enriched with pre-computed // type information where useful (i.e., for list) if (genericDescriptor instanceof Descriptors.Descriptor) { return createRowDataConverterByDescriptor((Descriptors.Descriptor) genericDescriptor, (RowType) info); } else if (genericDescriptor instanceof FieldDescriptor) { FieldDescriptor fieldDescriptor = ((FieldDescriptor) genericDescriptor); // field switch (fieldDescriptor.getType()) { case INT32: case FIXED32: case UINT32: case SFIXED32: case SINT32: case INT64: case UINT64: case FIXED64: case SFIXED64: case SINT64: case DOUBLE: case FLOAT: case BOOL: case STRING: if (info instanceof ArrayType) { // list LogicalType elementLogicalType = ((ArrayType) info).getElementType(); return createArrayConverter(elementLogicalType); } else { return createObjectConverter(info); } case ENUM: if (info instanceof ArrayType) { // list return (Object o) -> new GenericArrayData(((List) o) .stream() .map(Object::toString) .toArray()); } else { return Object::toString; } case GROUP: case MESSAGE: if (info instanceof ArrayType) { // list LogicalType elementLogicalType = ((ArrayType) info).getElementType(); Descriptors.Descriptor elementDescriptor = fieldDescriptor.getMessageType(); ProtobufToRowDataConverter elementConverter = this.createConverterByDescriptor(elementDescriptor, elementLogicalType); return (Object o) -> new GenericArrayData(((List) o) .stream() .map(elementConverter::convert) .toArray()); } else if (info instanceof MapType) { // map final MapType mapTypeInfo = (MapType) info; // todo map's key only support string final ProtobufToRowDataConverter keyConverter = Object::toString; final FieldDescriptor keyFieldDescriptor = fieldDescriptor.getMessageType().getFields().get(0); final FieldDescriptor valueFieldDescriptor = fieldDescriptor.getMessageType().getFields().get(1); final LogicalType valueTypeInfo = mapTypeInfo.getValueType(); final ProtobufToRowDataConverter valueConverter = createConverterByDescriptor(valueFieldDescriptor, valueTypeInfo); if (this.isDynamicMessage) { return (Object o) -> { final List dynamicMessages = (List) o; final Map convertedMap = new HashMap<>(dynamicMessages.size()); dynamicMessages.forEach((DynamicMessage dynamicMessage) -> { convertedMap.put( StringData.fromString((String) keyConverter.convert(dynamicMessage.getField(keyFieldDescriptor))) , valueConverter.convert(dynamicMessage.getField(valueFieldDescriptor))); }); return new GenericMapData(convertedMap); }; } else { return (Object o) -> { final List mapEntryList = (List) o; final Map convertedMap = new HashMap<>(mapEntryList.size()); mapEntryList.forEach((MapEntry message) -> { convertedMap.put( StringData.fromString((String) keyConverter.convert(message.getKey())) , valueConverter.convert(message.getValue())); }); return new GenericMapData(convertedMap); }; } } else if (info instanceof RowType) { // row return createRowDataConverterByDescriptor(((FieldDescriptor) genericDescriptor).getMessageType(), (RowType) info); } throw new IllegalStateException("Message expected but was: "); case BYTES: return (Object o) -> { final byte[] bytes = ((ByteString) o).toByteArray(); if (info instanceof DecimalType) { return convertToDecimal(bytes); } return bytes; }; } } throw new IllegalArgumentException("Unsupported Protobuf type '" + genericDescriptor.getName() + "'."); } @SuppressWarnings("unchecked") private ProtobufToRowDataConverter createArrayConverter(LogicalType info) { ProtobufToRowDataConverter elementConverter; if (info instanceof DateType) { elementConverter = this::convertToDate; } else if (info instanceof TimeType) { elementConverter = this::convertToTime; } else if (info instanceof VarCharType) { elementConverter = this::convertToString; } else { elementConverter = (Object fieldO) -> (fieldO); } return (Object o) -> new GenericArrayData(((List) o) .stream() .map(elementConverter::convert) .toArray()); } private StringData convertToString(Object filedO) { return StringData.fromString((String) filedO); } private ProtobufToRowDataConverter createObjectConverter(LogicalType info) { if (info instanceof DateType) { return this::convertToDate; } else if (info instanceof TimeType) { return this::convertToTime; } else if (info instanceof VarCharType) { return this::convertToString; } else { return (Object o) -> o; } } // -------------------------------------------------------------------------------------------- private BigDecimal convertToDecimal(byte[] bytes) { return new BigDecimal(new BigInteger(bytes)); } private Date convertToDate(Object object) { final long millis; if (object instanceof Integer) { final Integer value = (Integer) object; // adopted from Apache Calcite final long t = (long) value * 86400000L; millis = t - (long) LOCAL_TZ.getOffset(t); } else { // use 'provided' Joda time final LocalDate value = (LocalDate) object; millis = value.toDate().getTime(); } return new Date(millis); } private Time convertToTime(Object object) { final long millis; if (object instanceof Integer) { millis = (Integer) object; } else { // use 'provided' Joda time final LocalTime value = (LocalTime) object; millis = value.get(DateTimeFieldType.millisOfDay()); } return new Time(millis - LOCAL_TZ.getOffset(millis)); } private Timestamp convertToTimestamp(Object object) { final long millis; if (object instanceof Long) { millis = (Long) object; } else { // use 'provided' Joda time final DateTime value = (DateTime) object; millis = value.toDate().getTime(); } return new Timestamp(millis - LOCAL_TZ.getOffset(millis)); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_05/format/formats/protobuf/rowdata/RowDataToProtobufConverters.java ================================================ package flink.examples.sql._05.format.formats.protobuf.rowdata; public class RowDataToProtobufConverters { } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_05/format/formats/utils/MoreRunnables.java ================================================ package flink.examples.sql._05.format.formats.utils; public class MoreRunnables { public static void throwing(ThrowableRunable throwableRunable) { try { throwableRunable.run(); } catch (Throwable e) { throw new RuntimeException(e); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_05/format/formats/utils/MoreSuppliers.java ================================================ package flink.examples.sql._05.format.formats.utils; public class MoreSuppliers { private MoreSuppliers() { throw new UnsupportedOperationException(); } public static OUT throwing(ThrowableSupplier throwableSupplier) { try { return throwableSupplier.get(); } catch (Throwable e) { throw new RuntimeException(e); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_05/format/formats/utils/ThrowableRunable.java ================================================ package flink.examples.sql._05.format.formats.utils; @FunctionalInterface public interface ThrowableRunable { void run() throws EXCEPTION; } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_05/format/formats/utils/ThrowableSupplier.java ================================================ package flink.examples.sql._05.format.formats.utils; @FunctionalInterface public interface ThrowableSupplier { OUT get() throws EXCEPTION; } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_06/calcite/CalciteTest.java ================================================ package flink.examples.sql._06.calcite; import org.apache.calcite.sql.SqlNode; import org.apache.calcite.sql.parser.SqlParseException; import org.apache.calcite.sql.parser.SqlParser; public class CalciteTest { public static void main(String[] args) throws SqlParseException { SqlParser parser = SqlParser.create("select c,d from source where a = '6'", SqlParser.Config.DEFAULT); SqlNode sqlNode = parser.parseStmt(); System.out.println(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_06/calcite/ParserTest.java ================================================ package flink.examples.sql._06.calcite; import java.util.Arrays; import org.apache.flink.api.java.tuple.Tuple3; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor; import org.apache.flink.streaming.api.windowing.time.Time; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.Table; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; import org.apache.flink.types.Row; import flink.examples.sql._01.countdistincterror.udf.Mod_UDF; import flink.examples.sql._01.countdistincterror.udf.StatusMapper_UDF; public class ParserTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(10); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useOldPlanner() .inStreamingMode() .build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); DataStream> tuple3DataStream = env.fromCollection(Arrays.asList( Tuple3.of("2", 1L, 1627254000000L), Tuple3.of("2", 1L, 1627218000000L + 5000L), Tuple3.of("2", 101L, 1627218000000L + 6000L), Tuple3.of("2", 201L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 7000L), Tuple3.of("2", 301L, 1627218000000L + 86400000 + 7000L))) .assignTimestampsAndWatermarks( new BoundedOutOfOrdernessTimestampExtractor>(Time.seconds(0L)) { @Override public long extractTimestamp(Tuple3 element) { return element.f2; } }); tEnv.registerFunction("mod", new Mod_UDF()); tEnv.registerFunction("status_mapper", new StatusMapper_UDF()); tEnv.createTemporaryView("source_db.source_table", tuple3DataStream, "status, id, timestamp, rowtime.rowtime"); String sql = "SELECT\n" + " sum(part_pv) as pv,\n" + " window_start\n" + "FROM (\n" + "\tSELECT\n" + "\t count(1) as part_pv,\n" + "\t cast(tumble_start(rowtime, INTERVAL '60' SECOND) as bigint) * 1000 as window_start\n" + "\tFROM\n" + "\t source_db.source_table\n" + "\tGROUP BY\n" + "\t tumble(rowtime, INTERVAL '60' SECOND)\n" + "\t , mod(id, 1024)\n" + ")\n" + "GROUP BY\n" + " window_start"; Table result = tEnv.sqlQuery(sql); tEnv.toRetractStream(result, Row.class).print(); env.execute(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_06/calcite/javacc/JavaccCodeGenTest.java ================================================ package flink.examples.sql._06.calcite.javacc; public class JavaccCodeGenTest { public static void main(String[] args) throws Exception { // version(); javacc(); } private static void version() throws Exception { org.javacc.parser.Main.main(new String[] {"-version"}); } private static void javacc() throws Exception { String path = ClassLoader.getSystemResources("Simple1.jj").nextElement().getPath(); org.javacc.parser.Main.main(new String[] {path}); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_06/calcite/javacc/Simple1Test.java ================================================ package flink.examples.sql._06.calcite.javacc; import flink.examples.sql._06.calcite.javacc.generatedcode.Simple1; public class Simple1Test { public static void main(String[] args) throws Exception { Simple1.main(args); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_06/calcite/javacc/generatedcode/ParseException.java ================================================ package flink.examples.sql._06.calcite.javacc.generatedcode;/* Generated By:JavaCC: Do not edit this line. ParseException.java Version 7.0 */ /* JavaCCOptions:KEEP_LINE_COLUMN=true */ /** * This exception is thrown when parse errors are encountered. * You can explicitly create objects of this exception type by * calling the method generateParseException in the generated * parser. * * You can modify this class to customize your error reporting * mechanisms so long as you retain the public fields. */ public class ParseException extends Exception { /** * The version identifier for this Serializable class. * Increment only if the serialized form of the * class changes. */ private static final long serialVersionUID = 1L; /** * The end of line string for this machine. */ protected static String EOL = System.getProperty("line.separator", "\n"); /** * This constructor is used by the method "generateParseException" * in the generated parser. Calling this constructor generates * a new object of this type with the fields "currentToken", * "expectedTokenSequences", and "tokenImage" set. */ public ParseException(Token currentTokenVal, int[][] expectedTokenSequencesVal, String[] tokenImageVal ) { super(initialise(currentTokenVal, expectedTokenSequencesVal, tokenImageVal)); currentToken = currentTokenVal; expectedTokenSequences = expectedTokenSequencesVal; tokenImage = tokenImageVal; } /** * The following constructors are for use by you for whatever * purpose you can think of. Constructing the exception in this * manner makes the exception behave in the normal way - i.e., as * documented in the class "Throwable". The fields "errorToken", * "expectedTokenSequences", and "tokenImage" do not contain * relevant information. The JavaCC generated code does not use * these constructors. */ public ParseException() { super(); } /** Constructor with message. */ public ParseException(String message) { super(message); } /** * This is the last token that has been consumed successfully. If * this object has been created due to a parse error, the token * following this token will (therefore) be the first error token. */ public Token currentToken; /** * Each entry in this array is an array of integers. Each array * of integers represents a sequence of tokens (by their ordinal * values) that is expected at this point of the parse. */ public int[][] expectedTokenSequences; /** * This is a reference to the "tokenImage" array of the generated * parser within which the parse error occurred. This array is * defined in the generated ...Constants interface. */ public String[] tokenImage; /** * It uses "currentToken" and "expectedTokenSequences" to generate a parse * error message and returns it. If this object has been created * due to a parse error, and you do not catch it (it gets thrown * from the parser) the correct error message * gets displayed. */ private static String initialise(Token currentToken, int[][] expectedTokenSequences, String[] tokenImage) { StringBuilder expected = new StringBuilder(); int maxSize = 0; for (int i = 0; i < expectedTokenSequences.length; i++) { if (maxSize < expectedTokenSequences[i].length) { maxSize = expectedTokenSequences[i].length; } for (int j = 0; j < expectedTokenSequences[i].length; j++) { expected.append(tokenImage[expectedTokenSequences[i][j]]).append(' '); } if (expectedTokenSequences[i][expectedTokenSequences[i].length - 1] != 0) { expected.append("..."); } expected.append(EOL).append(" "); } String retval = "Encountered \""; Token tok = currentToken.next; for (int i = 0; i < maxSize; i++) { if (i != 0) retval += " "; if (tok.kind == 0) { retval += tokenImage[0]; break; } retval += " " + tokenImage[tok.kind]; retval += " \""; retval += add_escapes(tok.image); retval += " \""; tok = tok.next; } if (currentToken.next != null) { retval += "\" at line " + currentToken.next.beginLine + ", column " + currentToken.next.beginColumn; } retval += "." + EOL; if (expectedTokenSequences.length == 0) { // Nothing to add here } else { if (expectedTokenSequences.length == 1) { retval += "Was expecting:" + EOL + " "; } else { retval += "Was expecting one of:" + EOL + " "; } retval += expected.toString(); } return retval; } /** * Used to convert raw characters to their escaped version * when these raw version cannot be used as part of an ASCII * string literal. */ static String add_escapes(String str) { StringBuilder retval = new StringBuilder(); char ch; for (int i = 0; i < str.length(); i++) { switch (str.charAt(i)) { case '\b': retval.append("\\b"); continue; case '\t': retval.append("\\t"); continue; case '\n': retval.append("\\n"); continue; case '\f': retval.append("\\f"); continue; case '\r': retval.append("\\r"); continue; case '\"': retval.append("\\\""); continue; case '\'': retval.append("\\\'"); continue; case '\\': retval.append("\\\\"); continue; default: if ((ch = str.charAt(i)) < 0x20 || ch > 0x7e) { String s = "0000" + Integer.toString(ch, 16); retval.append("\\u" + s.substring(s.length() - 4, s.length())); } else { retval.append(ch); } continue; } } return retval.toString(); } } /* JavaCC - OriginalChecksum=de3ddfc6669ad4ae8d41fff7ccf6fbb7 (do not edit this line) */ ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_06/calcite/javacc/generatedcode/Simple1.java ================================================ package flink.examples.sql._06.calcite.javacc.generatedcode;/* Simple1.java */ /* Generated By:JavaCC: Do not edit this line. Simple1.java */ /** Simple brace matcher. */ public class Simple1 implements Simple1Constants { /** Main entry point. */ public static void main(String args[]) throws ParseException { Simple1 parser = new Simple1(System.in); parser.Input(); } /** Root production. */ static final public void Input() throws ParseException { MatchedBraces(); label_1: while (true) { switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) { case 1: case 2:{ ; break; } default: jj_la1[0] = jj_gen; break label_1; } switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) { case 1:{ jj_consume_token(1); break; } case 2:{ jj_consume_token(2); break; } default: jj_la1[1] = jj_gen; jj_consume_token(-1); throw new ParseException(); } } jj_consume_token(0); } /** Brace matching production. */ static final public void MatchedBraces() throws ParseException { jj_consume_token(3); switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) { case 3:{ MatchedBraces(); break; } default: jj_la1[2] = jj_gen; ; } jj_consume_token(4); } static private boolean jj_initialized_once = false; /** Generated Token Manager. */ static public Simple1TokenManager token_source; static SimpleCharStream jj_input_stream; /** Current token. */ static public Token token; /** Next token. */ static public Token jj_nt; static private int jj_ntk; static private int jj_gen; static final private int[] jj_la1 = new int[3]; static private int[] jj_la1_0; static { jj_la1_init_0(); } private static void jj_la1_init_0() { jj_la1_0 = new int[] {0x6,0x6,0x8,}; } /** Constructor with InputStream. */ public Simple1(java.io.InputStream stream) { this(stream, null); } /** Constructor with InputStream and supplied encoding */ public Simple1(java.io.InputStream stream, String encoding) { if (jj_initialized_once) { System.out.println("ERROR: Second call to constructor of static parser. "); System.out.println(" You must either use ReInit() or set the JavaCC option STATIC to false"); System.out.println(" during parser generation."); throw new Error(); } jj_initialized_once = true; try { jj_input_stream = new SimpleCharStream(stream, encoding, 1, 1); } catch(java.io.UnsupportedEncodingException e) { throw new RuntimeException(e); } token_source = new Simple1TokenManager(jj_input_stream); token = new Token(); jj_ntk = -1; jj_gen = 0; for (int i = 0; i < 3; i++) jj_la1[i] = -1; } /** Reinitialise. */ static public void ReInit(java.io.InputStream stream) { ReInit(stream, null); } /** Reinitialise. */ static public void ReInit(java.io.InputStream stream, String encoding) { try { jj_input_stream.ReInit(stream, encoding, 1, 1); } catch(java.io.UnsupportedEncodingException e) { throw new RuntimeException(e); } token_source.ReInit(jj_input_stream); token = new Token(); jj_ntk = -1; jj_gen = 0; for (int i = 0; i < 3; i++) jj_la1[i] = -1; } /** Constructor. */ public Simple1(java.io.Reader stream) { if (jj_initialized_once) { System.out.println("ERROR: Second call to constructor of static parser. "); System.out.println(" You must either use ReInit() or set the JavaCC option STATIC to false"); System.out.println(" during parser generation."); throw new Error(); } jj_initialized_once = true; jj_input_stream = new SimpleCharStream(stream, 1, 1); token_source = new Simple1TokenManager(jj_input_stream); token = new Token(); jj_ntk = -1; jj_gen = 0; for (int i = 0; i < 3; i++) jj_la1[i] = -1; } /** Reinitialise. */ static public void ReInit(java.io.Reader stream) { if (jj_input_stream == null) { jj_input_stream = new SimpleCharStream(stream, 1, 1); } else { jj_input_stream.ReInit(stream, 1, 1); } if (token_source == null) { token_source = new Simple1TokenManager(jj_input_stream); } token_source.ReInit(jj_input_stream); token = new Token(); jj_ntk = -1; jj_gen = 0; for (int i = 0; i < 3; i++) jj_la1[i] = -1; } /** Constructor with generated Token Manager. */ public Simple1(Simple1TokenManager tm) { if (jj_initialized_once) { System.out.println("ERROR: Second call to constructor of static parser. "); System.out.println(" You must either use ReInit() or set the JavaCC option STATIC to false"); System.out.println(" during parser generation."); throw new Error(); } jj_initialized_once = true; token_source = tm; token = new Token(); jj_ntk = -1; jj_gen = 0; for (int i = 0; i < 3; i++) jj_la1[i] = -1; } /** Reinitialise. */ public void ReInit(Simple1TokenManager tm) { token_source = tm; token = new Token(); jj_ntk = -1; jj_gen = 0; for (int i = 0; i < 3; i++) jj_la1[i] = -1; } static private Token jj_consume_token(int kind) throws ParseException { Token oldToken; if ((oldToken = token).next != null) token = token.next; else token = token.next = token_source.getNextToken(); jj_ntk = -1; if (token.kind == kind) { jj_gen++; return token; } token = oldToken; jj_kind = kind; throw generateParseException(); } /** Get the next Token. */ static final public Token getNextToken() { if (token.next != null) token = token.next; else token = token.next = token_source.getNextToken(); jj_ntk = -1; jj_gen++; return token; } /** Get the specific Token. */ static final public Token getToken(int index) { Token t = token; for (int i = 0; i < index; i++) { if (t.next != null) t = t.next; else t = t.next = token_source.getNextToken(); } return t; } static private int jj_ntk_f() { if ((jj_nt=token.next) == null) return (jj_ntk = (token.next=token_source.getNextToken()).kind); else return (jj_ntk = jj_nt.kind); } static private java.util.List jj_expentries = new java.util.ArrayList(); static private int[] jj_expentry; static private int jj_kind = -1; /** Generate ParseException. */ static public ParseException generateParseException() { jj_expentries.clear(); boolean[] la1tokens = new boolean[5]; if (jj_kind >= 0) { la1tokens[jj_kind] = true; jj_kind = -1; } for (int i = 0; i < 3; i++) { if (jj_la1[i] == jj_gen) { for (int j = 0; j < 32; j++) { if ((jj_la1_0[i] & (1<", "\"\\n\"", "\"\\r\"", "\"{\"", "\"}\"", }; } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_06/calcite/javacc/generatedcode/Simple1TokenManager.java ================================================ package flink.examples.sql._06.calcite.javacc.generatedcode;/* Simple1TokenManager.java */ /* Generated By:JavaCC: Do not edit this line. Simple1TokenManager.java */ /** Token Manager. */ public class Simple1TokenManager implements Simple1Constants { /** Debug output. */ public static java.io.PrintStream debugStream = System.out; /** Set debug output. */ public static void setDebugStream(java.io.PrintStream ds) { debugStream = ds; } static private int jjStopAtPos(int pos, int kind) { jjmatchedKind = kind; jjmatchedPos = pos; return pos + 1; } static private int jjMoveStringLiteralDfa0_0(){ switch(curChar) { case 10: return jjStopAtPos(0, 1); case 13: return jjStopAtPos(0, 2); case 123: return jjStopAtPos(0, 3); case 125: return jjStopAtPos(0, 4); default : return 1; } } /** Token literal values. */ public static final String[] jjstrLiteralImages = { "", "\12", "\15", "\173", "\175", }; static protected Token jjFillToken() { final Token t; final String curTokenImage; final int beginLine; final int endLine; final int beginColumn; final int endColumn; String im = jjstrLiteralImages[jjmatchedKind]; curTokenImage = (im == null) ? input_stream.GetImage() : im; beginLine = input_stream.getBeginLine(); beginColumn = input_stream.getBeginColumn(); endLine = input_stream.getEndLine(); endColumn = input_stream.getEndColumn(); t = Token.newToken(jjmatchedKind, curTokenImage); t.beginLine = beginLine; t.endLine = endLine; t.beginColumn = beginColumn; t.endColumn = endColumn; return t; } static final int[] jjnextStates = {0 }; static int curLexState = 0; static int defaultLexState = 0; static int jjnewStateCnt; static int jjround; static int jjmatchedPos; static int jjmatchedKind; /** Get the next Token. */ public static Token getNextToken() { Token matchedToken; int curPos = 0; EOFLoop : for (;;) { try { curChar = input_stream.BeginToken(); } catch(Exception e) { jjmatchedKind = 0; jjmatchedPos = -1; matchedToken = jjFillToken(); return matchedToken; } jjmatchedKind = 0x7fffffff; jjmatchedPos = 0; curPos = jjMoveStringLiteralDfa0_0(); if (jjmatchedKind != 0x7fffffff) { if (jjmatchedPos + 1 < curPos) input_stream.backup(curPos - jjmatchedPos - 1); matchedToken = jjFillToken(); return matchedToken; } int error_line = input_stream.getEndLine(); int error_column = input_stream.getEndColumn(); String error_after = null; boolean EOFSeen = false; try { input_stream.readChar(); input_stream.backup(1); } catch (java.io.IOException e1) { EOFSeen = true; error_after = curPos <= 1 ? "" : input_stream.GetImage(); if (curChar == '\n' || curChar == '\r') { error_line++; error_column = 0; } else error_column++; } if (!EOFSeen) { input_stream.backup(1); error_after = curPos <= 1 ? "" : input_stream.GetImage(); } throw new TokenMgrError(EOFSeen, curLexState, error_line, error_column, error_after, curChar, TokenMgrError.LEXICAL_ERROR); } } static void SkipLexicalActions(Token matchedToken) { switch(jjmatchedKind) { default : break; } } static void MoreLexicalActions() { jjimageLen += (lengthOfMatch = jjmatchedPos + 1); switch(jjmatchedKind) { default : break; } } static void TokenLexicalActions(Token matchedToken) { switch(jjmatchedKind) { default : break; } } static private void jjCheckNAdd(int state) { if (jjrounds[state] != jjround) { jjstateSet[jjnewStateCnt++] = state; jjrounds[state] = jjround; } } static private void jjAddStates(int start, int end) { do { jjstateSet[jjnewStateCnt++] = jjnextStates[start]; } while (start++ != end); } static private void jjCheckNAddTwoStates(int state1, int state2) { jjCheckNAdd(state1); jjCheckNAdd(state2); } /** Constructor. */ public Simple1TokenManager(SimpleCharStream stream){ if (input_stream != null) throw new TokenMgrError("ERROR: Second call to constructor of static lexer. You must use ReInit() to initialize the static variables.", TokenMgrError.STATIC_LEXER_ERROR); input_stream = stream; } /** Constructor. */ public Simple1TokenManager (SimpleCharStream stream, int lexState){ ReInit(stream); SwitchTo(lexState); } /** Reinitialise parser. */ static public void ReInit(SimpleCharStream stream) { jjmatchedPos = jjnewStateCnt = 0; curLexState = defaultLexState; input_stream = stream; ReInitRounds(); } static private void ReInitRounds() { int i; jjround = 0x80000001; for (i = 0; i-- > 0;) jjrounds[i] = 0x80000000; } /** Reinitialise parser. */ static public void ReInit(SimpleCharStream stream, int lexState) { ReInit(stream); SwitchTo(lexState); } /** Switch to specified lex state. */ public static void SwitchTo(int lexState) { if (lexState >= 1 || lexState < 0) throw new TokenMgrError("Error: Ignoring invalid lexical state : " + lexState + ". State unchanged.", TokenMgrError.INVALID_LEXICAL_STATE); else curLexState = lexState; } /** Lexer state names. */ public static final String[] lexStateNames = { "DEFAULT", }; /** Lex State array. */ public static final int[] jjnewLexState = { -1, -1, -1, -1, -1, }; static final long[] jjtoToken = { 0x1fL, }; static final long[] jjtoSkip = { 0x0L, }; static final long[] jjtoSpecial = { 0x0L, }; static final long[] jjtoMore = { 0x0L, }; static protected SimpleCharStream input_stream; static private final int[] jjrounds = new int[0]; static private final int[] jjstateSet = new int[2 * 0]; private static final StringBuilder jjimage = new StringBuilder(); private static StringBuilder image = jjimage; private static int jjimageLen; private static int lengthOfMatch; static protected int curChar; } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_06/calcite/javacc/generatedcode/SimpleCharStream.java ================================================ package flink.examples.sql._06.calcite.javacc.generatedcode;/* Generated By:JavaCC: Do not edit this line. SimpleCharStream.java Version 7.0 */ /* JavaCCOptions:STATIC=true,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ /** * An implementation of interface CharStream, where the stream is assumed to * contain only ASCII characters (without unicode processing). */ public class SimpleCharStream { /** Whether parser is static. */ public static final boolean staticFlag = true; static int bufsize; static int available; static int tokenBegin; /** Position in buffer. */ static public int bufpos = -1; static protected int bufline[]; static protected int bufcolumn[]; static protected int column = 0; static protected int line = 1; static protected boolean prevCharIsCR = false; static protected boolean prevCharIsLF = false; static protected java.io.Reader inputStream; static protected char[] buffer; static protected int maxNextCharInd = 0; static protected int inBuf = 0; static protected int tabSize = 1; static protected boolean trackLineColumn = true; static public void setTabSize(int i) { tabSize = i; } static public int getTabSize() { return tabSize; } static protected void ExpandBuff(boolean wrapAround) { char[] newbuffer = new char[bufsize + 2048]; int newbufline[] = new int[bufsize + 2048]; int newbufcolumn[] = new int[bufsize + 2048]; try { if (wrapAround) { System.arraycopy(buffer, tokenBegin, newbuffer, 0, bufsize - tokenBegin); System.arraycopy(buffer, 0, newbuffer, bufsize - tokenBegin, bufpos); buffer = newbuffer; System.arraycopy(bufline, tokenBegin, newbufline, 0, bufsize - tokenBegin); System.arraycopy(bufline, 0, newbufline, bufsize - tokenBegin, bufpos); bufline = newbufline; System.arraycopy(bufcolumn, tokenBegin, newbufcolumn, 0, bufsize - tokenBegin); System.arraycopy(bufcolumn, 0, newbufcolumn, bufsize - tokenBegin, bufpos); bufcolumn = newbufcolumn; maxNextCharInd = (bufpos += (bufsize - tokenBegin)); } else { System.arraycopy(buffer, tokenBegin, newbuffer, 0, bufsize - tokenBegin); buffer = newbuffer; System.arraycopy(bufline, tokenBegin, newbufline, 0, bufsize - tokenBegin); bufline = newbufline; System.arraycopy(bufcolumn, tokenBegin, newbufcolumn, 0, bufsize - tokenBegin); bufcolumn = newbufcolumn; maxNextCharInd = (bufpos -= tokenBegin); } } catch (Throwable t) { throw new Error(t.getMessage()); } bufsize += 2048; available = bufsize; tokenBegin = 0; } static protected void FillBuff() throws java.io.IOException { if (maxNextCharInd == available) { if (available == bufsize) { if (tokenBegin > 2048) { bufpos = maxNextCharInd = 0; available = tokenBegin; } else if (tokenBegin < 0) bufpos = maxNextCharInd = 0; else ExpandBuff(false); } else if (available > tokenBegin) available = bufsize; else if ((tokenBegin - available) < 2048) ExpandBuff(true); else available = tokenBegin; } int i; try { if ((i = inputStream.read(buffer, maxNextCharInd, available - maxNextCharInd)) == -1) { inputStream.close(); throw new java.io.IOException(); } else maxNextCharInd += i; return; } catch(java.io.IOException e) { --bufpos; backup(0); if (tokenBegin == -1) tokenBegin = bufpos; throw e; } } /** Start. */ static public char BeginToken() throws java.io.IOException { tokenBegin = -1; char c = readChar(); tokenBegin = bufpos; return c; } static protected void UpdateLineColumn(char c) { column++; if (prevCharIsLF) { prevCharIsLF = false; line += (column = 1); } else if (prevCharIsCR) { prevCharIsCR = false; if (c == '\n') { prevCharIsLF = true; } else line += (column = 1); } switch (c) { case '\r' : prevCharIsCR = true; break; case '\n' : prevCharIsLF = true; break; case '\t' : column--; column += (tabSize - (column % tabSize)); break; default : break; } bufline[bufpos] = line; bufcolumn[bufpos] = column; } /** Read a character. */ static public char readChar() throws java.io.IOException { if (inBuf > 0) { --inBuf; if (++bufpos == bufsize) bufpos = 0; return buffer[bufpos]; } if (++bufpos >= maxNextCharInd) FillBuff(); char c = buffer[bufpos]; UpdateLineColumn(c); return c; } @Deprecated /** * @deprecated * @see #getEndColumn */ static public int getColumn() { return bufcolumn[bufpos]; } @Deprecated /** * @deprecated * @see #getEndLine */ static public int getLine() { return bufline[bufpos]; } /** Get token end column number. */ static public int getEndColumn() { return bufcolumn[bufpos]; } /** Get token end line number. */ static public int getEndLine() { return bufline[bufpos]; } /** Get token beginning column number. */ static public int getBeginColumn() { return bufcolumn[tokenBegin]; } /** Get token beginning line number. */ static public int getBeginLine() { return bufline[tokenBegin]; } /** Backup a number of characters. */ static public void backup(int amount) { inBuf += amount; if ((bufpos -= amount) < 0) bufpos += bufsize; } /** Constructor. */ public SimpleCharStream(java.io.Reader dstream, int startline, int startcolumn, int buffersize) { if (inputStream != null) throw new Error("\n ERROR: Second call to the constructor of a static SimpleCharStream.\n" + " You must either use ReInit() or set the JavaCC option STATIC to false\n" + " during the generation of this class."); inputStream = dstream; line = startline; column = startcolumn - 1; available = bufsize = buffersize; buffer = new char[buffersize]; bufline = new int[buffersize]; bufcolumn = new int[buffersize]; } /** Constructor. */ public SimpleCharStream(java.io.Reader dstream, int startline, int startcolumn) { this(dstream, startline, startcolumn, 4096); } /** Constructor. */ public SimpleCharStream(java.io.Reader dstream) { this(dstream, 1, 1, 4096); } /** Reinitialise. */ public void ReInit(java.io.Reader dstream, int startline, int startcolumn, int buffersize) { inputStream = dstream; line = startline; column = startcolumn - 1; if (buffer == null || buffersize != buffer.length) { available = bufsize = buffersize; buffer = new char[buffersize]; bufline = new int[buffersize]; bufcolumn = new int[buffersize]; } prevCharIsLF = prevCharIsCR = false; tokenBegin = inBuf = maxNextCharInd = 0; bufpos = -1; } /** Reinitialise. */ public void ReInit(java.io.Reader dstream, int startline, int startcolumn) { ReInit(dstream, startline, startcolumn, 4096); } /** Reinitialise. */ public void ReInit(java.io.Reader dstream) { ReInit(dstream, 1, 1, 4096); } /** Constructor. */ public SimpleCharStream(java.io.InputStream dstream, String encoding, int startline, int startcolumn, int buffersize) throws java.io.UnsupportedEncodingException { this(encoding == null ? new java.io.InputStreamReader(dstream) : new java.io.InputStreamReader(dstream, encoding), startline, startcolumn, buffersize); } /** Constructor. */ public SimpleCharStream(java.io.InputStream dstream, int startline, int startcolumn, int buffersize) { this(new java.io.InputStreamReader(dstream), startline, startcolumn, buffersize); } /** Constructor. */ public SimpleCharStream(java.io.InputStream dstream, String encoding, int startline, int startcolumn) throws java.io.UnsupportedEncodingException { this(dstream, encoding, startline, startcolumn, 4096); } /** Constructor. */ public SimpleCharStream(java.io.InputStream dstream, int startline, int startcolumn) { this(dstream, startline, startcolumn, 4096); } /** Constructor. */ public SimpleCharStream(java.io.InputStream dstream, String encoding) throws java.io.UnsupportedEncodingException { this(dstream, encoding, 1, 1, 4096); } /** Constructor. */ public SimpleCharStream(java.io.InputStream dstream) { this(dstream, 1, 1, 4096); } /** Reinitialise. */ public void ReInit(java.io.InputStream dstream, String encoding, int startline, int startcolumn, int buffersize) throws java.io.UnsupportedEncodingException { ReInit(encoding == null ? new java.io.InputStreamReader(dstream) : new java.io.InputStreamReader(dstream, encoding), startline, startcolumn, buffersize); } /** Reinitialise. */ public void ReInit(java.io.InputStream dstream, int startline, int startcolumn, int buffersize) { ReInit(new java.io.InputStreamReader(dstream), startline, startcolumn, buffersize); } /** Reinitialise. */ public void ReInit(java.io.InputStream dstream, String encoding) throws java.io.UnsupportedEncodingException { ReInit(dstream, encoding, 1, 1, 4096); } /** Reinitialise. */ public void ReInit(java.io.InputStream dstream) { ReInit(dstream, 1, 1, 4096); } /** Reinitialise. */ public void ReInit(java.io.InputStream dstream, String encoding, int startline, int startcolumn) throws java.io.UnsupportedEncodingException { ReInit(dstream, encoding, startline, startcolumn, 4096); } /** Reinitialise. */ public void ReInit(java.io.InputStream dstream, int startline, int startcolumn) { ReInit(dstream, startline, startcolumn, 4096); } /** Get token literal value. */ static public String GetImage() { if (bufpos >= tokenBegin) return new String(buffer, tokenBegin, bufpos - tokenBegin + 1); else return new String(buffer, tokenBegin, bufsize - tokenBegin) + new String(buffer, 0, bufpos + 1); } /** Get the suffix. */ static public char[] GetSuffix(int len) { char[] ret = new char[len]; if ((bufpos + 1) >= len) System.arraycopy(buffer, bufpos - len + 1, ret, 0, len); else { System.arraycopy(buffer, bufsize - (len - bufpos - 1), ret, 0, len - bufpos - 1); System.arraycopy(buffer, 0, ret, len - bufpos - 1, bufpos + 1); } return ret; } /** Reset buffer when finished. */ static public void Done() { buffer = null; bufline = null; bufcolumn = null; } /** * Method to adjust line and column numbers for the start of a token. */ static public void adjustBeginLineColumn(int newLine, int newCol) { int start = tokenBegin; int len; if (bufpos >= tokenBegin) { len = bufpos - tokenBegin + inBuf + 1; } else { len = bufsize - tokenBegin + bufpos + 1 + inBuf; } int i = 0, j = 0, k = 0; int nextColDiff = 0, columnDiff = 0; while (i < len && bufline[j = start % bufsize] == bufline[k = ++start % bufsize]) { bufline[j] = newLine; nextColDiff = columnDiff + bufcolumn[k] - bufcolumn[j]; bufcolumn[j] = newCol + columnDiff; columnDiff = nextColDiff; i++; } if (i < len) { bufline[j] = newLine++; bufcolumn[j] = newCol + columnDiff; while (i++ < len) { if (bufline[j = start % bufsize] != bufline[++start % bufsize]) bufline[j] = newLine++; else bufline[j] = newLine; } } line = bufline[j]; column = bufcolumn[j]; } static boolean getTrackLineColumn() { return trackLineColumn; } static void setTrackLineColumn(boolean tlc) { trackLineColumn = tlc; } } /* JavaCC - OriginalChecksum=052d2c8783a7a693ccde91d90feb1d3b (do not edit this line) */ ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_06/calcite/javacc/generatedcode/Token.java ================================================ package flink.examples.sql._06.calcite.javacc.generatedcode;/* Generated By:JavaCC: Do not edit this line. Token.java Version 7.0 */ /* JavaCCOptions:TOKEN_EXTENDS=,KEEP_LINE_COLUMN=true,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ /** * Describes the input token stream. */ public class Token implements java.io.Serializable { /** * The version identifier for this Serializable class. * Increment only if the serialized form of the * class changes. */ private static final long serialVersionUID = 1L; /** * An integer that describes the kind of this token. This numbering * system is determined by JavaCCParser, and a table of these numbers is * stored in the file ...Constants.java. */ public int kind; /** The line number of the first character of this Token. */ public int beginLine; /** The column number of the first character of this Token. */ public int beginColumn; /** The line number of the last character of this Token. */ public int endLine; /** The column number of the last character of this Token. */ public int endColumn; /** * The string image of the token. */ public String image; /** * A reference to the next regular (non-special) token from the input * stream. If this is the last token from the input stream, or if the * token manager has not read tokens beyond this one, this field is * set to null. This is true only if this token is also a regular * token. Otherwise, see below for a description of the contents of * this field. */ public Token next; /** * This field is used to access special tokens that occur prior to this * token, but after the immediately preceding regular (non-special) token. * If there are no such special tokens, this field is set to null. * When there are more than one such special token, this field refers * to the last of these special tokens, which in turn refers to the next * previous special token through its specialToken field, and so on * until the first special token (whose specialToken field is null). * The next fields of special tokens refer to other special tokens that * immediately follow it (without an intervening regular token). If there * is no such token, this field is null. */ public Token specialToken; /** * An optional attribute value of the Token. * Tokens which are not used as syntactic sugar will often contain * meaningful values that will be used later on by the compiler or * interpreter. This attribute value is often different from the image. * Any subclass of Token that actually wants to return a non-null value can * override this method as appropriate. */ public Object getValue() { return null; } /** * No-argument constructor */ public Token() {} /** * Constructs a new token for the specified Image. */ public Token(int kind) { this(kind, null); } /** * Constructs a new token for the specified Image and Kind. */ public Token(int kind, String image) { this.kind = kind; this.image = image; } /** * Returns the image. */ @Override public String toString() { return image; } /** * Returns a new Token object, by default. However, if you want, you * can create and return subclass objects based on the value of ofKind. * Simply add the cases to the switch for all those special cases. * For example, if you have a subclass of Token called IDToken that * you want to create if ofKind is ID, simply add something like : * * case MyParserConstants.ID : return new IDToken(ofKind, image); * * to the following switch statement. Then you can cast matchedToken * variable to the appropriate type and use sit in your lexical actions. */ public static Token newToken(int ofKind, String image) { switch(ofKind) { default : return new Token(ofKind, image); } } public static Token newToken(int ofKind) { return newToken(ofKind, null); } } /* JavaCC - OriginalChecksum=093f73b266edc0ed6a424fcd3b5446d1 (do not edit this line) */ ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_06/calcite/javacc/generatedcode/TokenMgrError.java ================================================ package flink.examples.sql._06.calcite.javacc.generatedcode;/* Generated By:JavaCC: Do not edit this line. TokenMgrError.java Version 7.0 */ /* JavaCCOptions: */ /** Token Manager Error. */ public class TokenMgrError extends Error { /** * The version identifier for this Serializable class. * Increment only if the serialized form of the * class changes. */ private static final long serialVersionUID = 1L; /* * Ordinals for various reasons why an Error of this type can be thrown. */ /** * Lexical error occurred. */ public static final int LEXICAL_ERROR = 0; /** * An attempt was made to create a second instance of a static token manager. */ public static final int STATIC_LEXER_ERROR = 1; /** * Tried to change to an invalid lexical state. */ public static final int INVALID_LEXICAL_STATE = 2; /** * Detected (and bailed out of) an infinite loop in the token manager. */ public static final int LOOP_DETECTED = 3; /** * Indicates the reason why the exception is thrown. It will have * one of the above 4 values. */ int errorCode; /** * Replaces unprintable characters by their escaped (or unicode escaped) * equivalents in the given string */ protected static final String addEscapes(String str) { StringBuilder retval = new StringBuilder(); char ch; for (int i = 0; i < str.length(); i++) { switch (str.charAt(i)) { case '\b': retval.append("\\b"); continue; case '\t': retval.append("\\t"); continue; case '\n': retval.append("\\n"); continue; case '\f': retval.append("\\f"); continue; case '\r': retval.append("\\r"); continue; case '\"': retval.append("\\\""); continue; case '\'': retval.append("\\\'"); continue; case '\\': retval.append("\\\\"); continue; default: if ((ch = str.charAt(i)) < 0x20 || ch > 0x7e) { String s = "0000" + Integer.toString(ch, 16); retval.append("\\u" + s.substring(s.length() - 4, s.length())); } else { retval.append(ch); } continue; } } return retval.toString(); } /** * Returns a detailed message for the Error when it is thrown by the * token manager to indicate a lexical error. * Parameters : * EOFSeen : indicates if EOF caused the lexical error * curLexState : lexical state in which this error occurred * errorLine : line number when the error occurred * errorColumn : column number when the error occurred * errorAfter : prefix that was seen before this error occurred * curchar : the offending character * Note: You can customize the lexical error message by modifying this method. */ protected static String LexicalErr(boolean EOFSeen, int lexState, int errorLine, int errorColumn, String errorAfter, int curChar) { char curChar1 = (char)curChar; return("Lexical error at line " + errorLine + ", column " + errorColumn + ". Encountered: " + (EOFSeen ? " " : ("\"" + addEscapes(String.valueOf(curChar1)) + "\"") + " (" + curChar + "), ") + "after : \"" + addEscapes(errorAfter) + "\""); } /** * You can also modify the body of this method to customize your error messages. * For example, cases like LOOP_DETECTED and INVALID_LEXICAL_STATE are not * of end-users concern, so you can return something like : * * "Internal Error : Please file a bug report .... " * * from this method for such cases in the release version of your parser. */ @Override public String getMessage() { return super.getMessage(); } /* * Constructors of various flavors follow. */ /** No arg constructor. */ public TokenMgrError() { } /** Constructor with message and reason. */ public TokenMgrError(String message, int reason) { super(message); errorCode = reason; } /** Full Constructor. */ public TokenMgrError(boolean EOFSeen, int lexState, int errorLine, int errorColumn, String errorAfter, int curChar, int reason) { this(LexicalErr(EOFSeen, lexState, errorLine, errorColumn, errorAfter, curChar), reason); } } /* JavaCC - OriginalChecksum=9e201c978d59ab6f122a52837e6310b1 (do not edit this line) */ ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_01_select_where/SelectWhereHiveDialect.java ================================================ package flink.examples.sql._07.query._01_select_where; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.typeutils.ResultTypeQueryable; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.functions.source.SourceFunction; import org.apache.flink.table.api.SqlDialect; import org.apache.flink.table.api.Table; import org.apache.flink.types.Row; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class SelectWhereHiveDialect { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); // TODO 没有 hive catalog 会 fallback 回 default parser flinkEnv.streamTEnv().getConfig().setSqlDialect(SqlDialect.HIVE); DataStream r = flinkEnv.env().addSource(new UserDefinedSource()); flinkEnv.streamTEnv().createTemporaryView("source_table", r); String selectWhereSql = "select * from source_table"; Table resultTable = flinkEnv.streamTEnv().sqlQuery(selectWhereSql); flinkEnv.streamTEnv().toRetractStream(resultTable, Row.class).print(); flinkEnv.env().execute(); } private static class UserDefinedSource implements SourceFunction, ResultTypeQueryable { private volatile boolean isCancel; @Override public void run(SourceContext sourceContext) throws Exception { int i = 0; while (!this.isCancel) { sourceContext.collect(Row.of("a" + i, "b", 1L)); Thread.sleep(10L); i++; } } @Override public void cancel() { this.isCancel = true; } @Override public TypeInformation getProducedType() { return new RowTypeInfo(new TypeInformation[]{ TypeInformation.of(String.class) , TypeInformation.of(String.class) , TypeInformation.of(Long.class) }, new String[] {"a", "b", "c"}); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_01_select_where/SelectWhereTest.java ================================================ package flink.examples.sql._07.query._01_select_where; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.typeutils.ResultTypeQueryable; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.api.functions.source.SourceFunction; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.Schema; import org.apache.flink.table.api.Table; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; import org.apache.flink.types.Row; public class SelectWhereTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); // DataStream r = env.addSource(new FlinkKafkaConsumer()); DataStream r = env.addSource(new UserDefinedSource()); Table sourceTable = tEnv.fromDataStream(r , Schema .newBuilder() .column("f0", "string") .column("f1", "string") .column("f2", "bigint") .columnByExpression("proctime", "PROCTIME()") .build()); tEnv.createTemporaryView("source_table", sourceTable); String selectWhereSql = "select f0 from source_table where f1 = 'b'"; Table resultTable = tEnv.sqlQuery(selectWhereSql); tEnv.toRetractStream(resultTable, Row.class).print(); env.execute(); } private static class UserDefinedSource implements SourceFunction, ResultTypeQueryable { private volatile boolean isCancel; @Override public void run(SourceContext sourceContext) throws Exception { int i = 0; while (!this.isCancel) { sourceContext.collect(Row.of("a" + i, "b", 1L)); Thread.sleep(10L); i++; } } @Override public void cancel() { this.isCancel = true; } @Override public TypeInformation getProducedType() { return new RowTypeInfo(TypeInformation.of(String.class), TypeInformation.of(String.class), TypeInformation.of(Long.class)); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_01_select_where/SelectWhereTest2.java ================================================ package flink.examples.sql._07.query._01_select_where; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.typeutils.ResultTypeQueryable; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.streaming.api.functions.source.SourceFunction; import org.apache.flink.types.Row; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class SelectWhereTest2 { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); String sourceSql = "CREATE TABLE source_table (\n" + " order_number BIGINT,\n" + " price DECIMAL(32,2)\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.order_number.min' = '10',\n" + " 'fields.order_number.max' = '11'\n" + ")"; String sinkSql = "CREATE TABLE sink_table (\n" + " order_number BIGINT,\n" + " price DECIMAL(32,2)\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ")"; String selectWhereSql = "insert into sink_table\n" + "select * from source_table\n" + "where order_number = 10"; flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "ETL 案例"); flinkEnv.streamTEnv().executeSql(sourceSql); flinkEnv.streamTEnv().executeSql(sinkSql); flinkEnv.streamTEnv().executeSql(selectWhereSql); } private static class UserDefinedSource implements SourceFunction, ResultTypeQueryable { private volatile boolean isCancel; @Override public void run(SourceContext sourceContext) throws Exception { int i = 0; while (!this.isCancel) { sourceContext.collect(Row.of("a" + i, "b", 1L)); Thread.sleep(10L); i++; } } @Override public void cancel() { this.isCancel = true; } @Override public TypeInformation getProducedType() { return new RowTypeInfo(TypeInformation.of(String.class), TypeInformation.of(String.class), TypeInformation.of(Long.class)); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_01_select_where/SelectWhereTest3.java ================================================ package flink.examples.sql._07.query._01_select_where; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.typeutils.ResultTypeQueryable; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.api.functions.source.SourceFunction; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.Schema; import org.apache.flink.table.api.Table; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; import org.apache.flink.types.Row; public class SelectWhereTest3 { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode() .build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); DataStream r = env.addSource(new UserDefinedSource()); Table sourceTable = tEnv.fromDataStream(r , Schema .newBuilder() .column("f0", "string") .column("f1", "string") .column("f2", "bigint") .columnByExpression("proctime", "PROCTIME()") .build()); tEnv.createTemporaryView("source_table", sourceTable); String selectWhereSql = "select f0 from source_table where f1 = 'b'"; Table resultTable = tEnv.sqlQuery(selectWhereSql); tEnv.toRetractStream(resultTable, Row.class).print(); env.execute(); } private static class UserDefinedSource implements SourceFunction, ResultTypeQueryable { private volatile boolean isCancel; @Override public void run(SourceContext sourceContext) throws Exception { int i = 0; while (!this.isCancel) { sourceContext.collect(Row.of("a" + i, "b", 1L)); Thread.sleep(10L); i++; } } @Override public void cancel() { this.isCancel = true; } @Override public TypeInformation getProducedType() { return new RowTypeInfo(TypeInformation.of(String.class), TypeInformation.of(String.class), TypeInformation.of(Long.class)); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_01_select_where/SelectWhereTest4.java ================================================ package flink.examples.sql._07.query._01_select_where; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.typeutils.ResultTypeQueryable; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.api.functions.source.SourceFunction; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.Table; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; import org.apache.flink.types.Row; public class SelectWhereTest4 { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); DataStream r = env.addSource(new UserDefinedSource()); // TODO 测试少一些字段 tEnv.createTemporaryView("source_table", r); String selectWhereSql = "select a from source_table"; Table resultTable = tEnv.sqlQuery(selectWhereSql); tEnv.toRetractStream(resultTable, Row.class).print(); env.execute(); } private static class UserDefinedSource implements SourceFunction, ResultTypeQueryable { private volatile boolean isCancel; @Override public void run(SourceContext sourceContext) throws Exception { int i = 0; while (!this.isCancel) { sourceContext.collect(Row.of("a" + i, "b", 1L)); Thread.sleep(10L); i++; } } @Override public void cancel() { this.isCancel = true; } @Override public TypeInformation getProducedType() { return new RowTypeInfo(new TypeInformation[]{ TypeInformation.of(String.class) , TypeInformation.of(String.class) , TypeInformation.of(Long.class) }, new String[] {"a", "b", "c"}); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_01_select_where/SelectWhereTest5.java ================================================ package flink.examples.sql._07.query._01_select_where; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class SelectWhereTest5 { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); String sql = "CREATE TABLE Orders (\n" + " order_id BIGINT NOT NULL,\n" + " name STRING,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.name.length' = '1',\n" + " 'fields.order_id.min' = '1',\n" + " 'fields.order_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE target_table (\n" + " order_id BIGINT NOT NULL,\n" + " name STRING,\n" + " row_time timestamp(3)\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO target_table\n" + "SELECT * FROM Orders\n" + "Where order_id > 3"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_01_select_where/StreamExecCalc$10.java ================================================ package flink.examples.sql._07.query._01_select_where; public class StreamExecCalc$10 extends org.apache.flink.table.runtime.operators.TableStreamOperator implements org.apache.flink.streaming.api.operators.OneInputStreamOperator { private final Object[] references; org.apache.flink.table.data.BoxedWrapperRowData out = new org.apache.flink.table.data.BoxedWrapperRowData(2); private final org.apache.flink.streaming.runtime.streamrecord.StreamRecord outElement = new org.apache.flink.streaming.runtime.streamrecord.StreamRecord(null); public StreamExecCalc$10( Object[] references, org.apache.flink.streaming.runtime.tasks.StreamTask task, org.apache.flink.streaming.api.graph.StreamConfig config, org.apache.flink.streaming.api.operators.Output output, org.apache.flink.streaming.runtime.tasks.ProcessingTimeService processingTimeService) throws Exception { this.references = references; this.setup(task, config, output); if (this instanceof org.apache.flink.streaming.api.operators.AbstractStreamOperator) { ((org.apache.flink.streaming.api.operators.AbstractStreamOperator) this) .setProcessingTimeService(processingTimeService); } } @Override public void open() throws Exception { super.open(); } @Override public void processElement(org.apache.flink.streaming.runtime.streamrecord.StreamRecord element) throws Exception { org.apache.flink.table.data.RowData in1 = (org.apache.flink.table.data.RowData) element.getValue(); long field$1; boolean isNull$1; boolean isNull$2; boolean result$3; org.apache.flink.table.data.DecimalData field$4; boolean isNull$4; isNull$1 = in1.isNullAt(0); field$1 = -1L; if (!isNull$1) { field$1 = in1.getLong(0); } isNull$2 = isNull$1 || false; result$3 = false; if (!isNull$2) { result$3 = field$1 == ((long) 10L); } if (result$3) { isNull$4 = in1.isNullAt(1); field$4 = null; if (!isNull$4) { field$4 = in1.getDecimal(1, 32, 2); } out.setRowKind(in1.getRowKind()); if (false) { out.setNullAt(0); } else { out.setLong(0, ((long) 10L)); } if (isNull$4) { out.setNullAt(1); } else { out.setNonPrimitiveValue(1, field$4); } output.collect(outElement.replace(out)); } } @Override public void close() throws Exception { super.close(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_02_select_distinct/GroupAggsHandler$5.java ================================================ package flink.examples.sql._07.query._02_select_distinct; public final class GroupAggsHandler$5 implements org.apache.flink.table.runtime.generated.AggsHandleFunction { org.apache.flink.table.data.GenericRowData acc$2 = new org.apache.flink.table.data.GenericRowData(0); org.apache.flink.table.data.GenericRowData acc$3 = new org.apache.flink.table.data.GenericRowData(0); org.apache.flink.table.data.GenericRowData aggValue$4 = new org.apache.flink.table.data.GenericRowData(0); private org.apache.flink.table.runtime.dataview.StateDataViewStore store; public GroupAggsHandler$5(Object[] references) throws Exception { } private org.apache.flink.api.common.functions.RuntimeContext getRuntimeContext() { return store.getRuntimeContext(); } @Override public void open(org.apache.flink.table.runtime.dataview.StateDataViewStore store) throws Exception { this.store = store; } @Override public void accumulate(org.apache.flink.table.data.RowData accInput) throws Exception { } @Override public void retract(org.apache.flink.table.data.RowData retractInput) throws Exception { throw new RuntimeException( "This function not require retract method, but the retract method is called."); } @Override public void merge(org.apache.flink.table.data.RowData otherAcc) throws Exception { throw new RuntimeException("This function not require merge method, but the merge method is called."); } @Override public void setAccumulators(org.apache.flink.table.data.RowData acc) throws Exception { } @Override public void resetAccumulators() throws Exception { } @Override public org.apache.flink.table.data.RowData getAccumulators() throws Exception { acc$3 = new org.apache.flink.table.data.GenericRowData(0); return acc$3; } @Override public org.apache.flink.table.data.RowData createAccumulators() throws Exception { acc$2 = new org.apache.flink.table.data.GenericRowData(0); return acc$2; } @Override public org.apache.flink.table.data.RowData getValue() throws Exception { aggValue$4 = new org.apache.flink.table.data.GenericRowData(0); return aggValue$4; } @Override public void cleanup() throws Exception { } @Override public void close() throws Exception { } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_02_select_distinct/KeyProjection$0.java ================================================ package flink.examples.sql._07.query._02_select_distinct; public class KeyProjection$0 implements org.apache.flink.table.runtime.generated.Projection { org.apache.flink.table.data.binary.BinaryRowData out = new org.apache.flink.table.data.binary.BinaryRowData(1); org.apache.flink.table.data.writer.BinaryRowWriter outWriter = new org.apache.flink.table.data.writer.BinaryRowWriter(out); public KeyProjection$0(Object[] references) throws Exception { } @Override public org.apache.flink.table.data.binary.BinaryRowData apply(org.apache.flink.table.data.RowData in1) { org.apache.flink.table.data.binary.BinaryStringData field$1; boolean isNull$1; outWriter.reset(); isNull$1 = in1.isNullAt(0); field$1 = org.apache.flink.table.data.binary.BinaryStringData.EMPTY_UTF8; if (!isNull$1) { field$1 = ((org.apache.flink.table.data.binary.BinaryStringData) in1.getString(0)); } if (isNull$1) { outWriter.setNullAt(0); } else { outWriter.writeString(0, field$1); } outWriter.complete(); return out; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_02_select_distinct/SelectDistinctTest.java ================================================ package flink.examples.sql._07.query._02_select_distinct; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.typeutils.ResultTypeQueryable; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.api.functions.source.SourceFunction; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.Schema; import org.apache.flink.table.api.Table; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; import org.apache.flink.types.Row; public class SelectDistinctTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); DataStream r = env.addSource(new UserDefinedSource()); Table sourceTable = tEnv.fromDataStream(r , Schema .newBuilder() .column("f0", "string") .column("f1", "string") .column("f2", "bigint") .columnByExpression("proctime", "PROCTIME()") .build()); tEnv.createTemporaryView("source_table", sourceTable); String selectDistinctSql = "select distinct f0 from source_table"; Table resultTable = tEnv.sqlQuery(selectDistinctSql); tEnv.toRetractStream(resultTable, Row.class).print(); String groupBySql = "select f0 from source_table group by f0"; Table resultTable1 = tEnv.sqlQuery(groupBySql); tEnv.toRetractStream(resultTable1, Row.class).print(); env.execute(); } private static class UserDefinedSource implements SourceFunction, ResultTypeQueryable { private volatile boolean isCancel; @Override public void run(SourceContext sourceContext) throws Exception { int i = 0; while (!this.isCancel) { sourceContext.collect(Row.of("a" + i, "b", 1L)); Thread.sleep(10L); i++; } } @Override public void cancel() { this.isCancel = true; } @Override public TypeInformation getProducedType() { return new RowTypeInfo(TypeInformation.of(String.class), TypeInformation.of(String.class), TypeInformation.of(Long.class)); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_02_select_distinct/SelectDistinctTest2.java ================================================ package flink.examples.sql._07.query._02_select_distinct; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; public class SelectDistinctTest2 { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); String sourceSql = "CREATE TABLE Orders (\n" + " id STRING\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.id.length' = '1'\n" + ")"; String sinkSql = "CREATE TABLE target_table (\n" + " id STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ")"; String selectWhereSql = "INSERT into target_table\n" + "SELECT \n" + " DISTINCT id \n" + "FROM Orders"; tEnv.getConfig().getConfiguration().setString("pipeline.name", "SELECT DISTINCT 案例"); tEnv.executeSql(sourceSql); tEnv.executeSql(sinkSql); tEnv.executeSql(selectWhereSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_03_group_agg/_01_group_agg/GroupAggMiniBatchTest.java ================================================ package flink.examples.sql._07.query._03_group_agg._01_group_agg; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; // https://www.jianshu.com/p/aa2e94628e24 public class GroupAggMiniBatchTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); Configuration configuration = tEnv.getConfig().getConfiguration(); // set low-level key-value options configuration.setString("table.exec.mini-batch.enabled", "true"); // enable mini-batch optimization configuration.setString("table.exec.mini-batch.allow-latency", "5 s"); // use 5 seconds to buffer input records configuration.setString("table.exec.mini-batch.size", "5000"); // the maximum number of records can be buffered by each aggregate operator task String sourceSql = "CREATE TABLE source_table (\n" + " order_id STRING,\n" + " price BIGINT\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.order_id.length' = '1',\n" + " 'fields.price.min' = '1',\n" + " 'fields.price.max' = '1000000'\n" + ")"; String sinkSql = "CREATE TABLE sink_table (\n" + " order_id STRING,\n" + " count_result BIGINT,\n" + " sum_result BIGINT,\n" + " avg_result DOUBLE,\n" + " min_result BIGINT,\n" + " max_result BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ")"; String selectWhereSql = "insert into sink_table\n" + "select order_id,\n" + " count(*) as count_result,\n" + " sum(price) as sum_result,\n" + " avg(price) as avg_result,\n" + " min(price) as min_result,\n" + " max(price) as max_result\n" + "from source_table\n" + "group by order_id"; tEnv.getConfig().getConfiguration().setString("pipeline.name", "GROUP AGG MINI BATCH 案例"); tEnv.executeSql(sourceSql); tEnv.executeSql(sinkSql); tEnv.executeSql(selectWhereSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_03_group_agg/_01_group_agg/GroupAggTest.java ================================================ package flink.examples.sql._07.query._03_group_agg._01_group_agg; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; public class GroupAggTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); String sourceSql = "CREATE TABLE source_table (\n" + " order_id STRING,\n" + " price BIGINT\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.order_id.length' = '1',\n" + " 'fields.price.min' = '1',\n" + " 'fields.price.max' = '1000000'\n" + ")"; // String sinkSql = "CREATE TABLE sink_table (\n" // + " order_id STRING,\n" // + " count_result BIGINT,\n" // + " sum_result BIGINT,\n" // + " avg_result DOUBLE,\n" // + " min_result BIGINT,\n" // + " max_result BIGINT\n" // + ") WITH (\n" // + " 'connector' = 'print'\n" // + ")"; String sinkSql = "CREATE TABLE sink_table (\n" + " count_result BIGINT\n" // + " sum_result BIGINT,\n" // + " avg_result DOUBLE,\n" // + " min_result BIGINT,\n" // + " max_result BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ")"; // String selectWhereSql = "insert into sink_table\n" // + "select order_id,\n" // + " count(*) as count_result,\n" // + " sum(price) as sum_result,\n" // + " avg(price) as avg_result,\n" // + " min(price) as min_result,\n" // + " max(price) as max_result\n" // + "from source_table\n" // + "group by order_id"; String selectWhereSql = "insert into sink_table\n" + "select count(1) as count_result\n" + "from (\n" + " select order_id,\n" + " count(*) as count_result\n" + " from source_table\n" + " group by order_id\n" + ")"; tEnv.getConfig().getConfiguration().setString("pipeline.name", "GROUP AGG 案例"); tEnv.executeSql(sourceSql); tEnv.executeSql(sinkSql); tEnv.executeSql(selectWhereSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_03_group_agg/_01_group_agg/GroupAggsHandler$39.java ================================================ package flink.examples.sql._07.query._03_group_agg._01_group_agg; public final class GroupAggsHandler$39 implements org.apache.flink.table.runtime.generated.AggsHandleFunction { long agg0_count1; boolean agg0_count1IsNull; long agg1_sum; boolean agg1_sumIsNull; long agg2_sum; boolean agg2_sumIsNull; long agg2_count; boolean agg2_countIsNull; long agg3_min; boolean agg3_minIsNull; long agg4_max; boolean agg4_maxIsNull; org.apache.flink.table.data.GenericRowData acc$2 = new org.apache.flink.table.data.GenericRowData(6); org.apache.flink.table.data.GenericRowData acc$3 = new org.apache.flink.table.data.GenericRowData(6); org.apache.flink.table.data.GenericRowData aggValue$38 = new org.apache.flink.table.data.GenericRowData(5); private org.apache.flink.table.runtime.dataview.StateDataViewStore store; public GroupAggsHandler$39(Object[] references) throws Exception { } private org.apache.flink.api.common.functions.RuntimeContext getRuntimeContext() { return store.getRuntimeContext(); } @Override public void open(org.apache.flink.table.runtime.dataview.StateDataViewStore store) throws Exception { this.store = store; } @Override public void accumulate(org.apache.flink.table.data.RowData accInput) throws Exception { boolean isNull$10; long result$11; long field$12; boolean isNull$12; boolean isNull$13; long result$14; boolean isNull$17; long result$18; boolean isNull$20; long result$21; boolean isNull$23; boolean result$24; boolean isNull$28; boolean result$29; isNull$12 = accInput.isNullAt(1); field$12 = -1L; if (!isNull$12) { field$12 = accInput.getLong(1); } isNull$10 = agg0_count1IsNull || false; result$11 = -1L; if (!isNull$10) { result$11 = (long) (agg0_count1 + ((long) 1L)); } agg0_count1 = result$11; agg0_count1IsNull = isNull$10; long result$16 = -1L; boolean isNull$16; if (isNull$12) { isNull$16 = agg1_sumIsNull; if (!isNull$16) { result$16 = agg1_sum; } } else { long result$15 = -1L; boolean isNull$15; if (agg1_sumIsNull) { isNull$15 = isNull$12; if (!isNull$15) { result$15 = field$12; } } else { isNull$13 = agg1_sumIsNull || isNull$12; result$14 = -1L; if (!isNull$13) { result$14 = (long) (agg1_sum + field$12); } isNull$15 = isNull$13; if (!isNull$15) { result$15 = result$14; } } isNull$16 = isNull$15; if (!isNull$16) { result$16 = result$15; } } agg1_sum = result$16; ; agg1_sumIsNull = isNull$16; long result$19 = -1L; boolean isNull$19; if (isNull$12) { isNull$19 = agg2_sumIsNull; if (!isNull$19) { result$19 = agg2_sum; } } else { isNull$17 = agg2_sumIsNull || isNull$12; result$18 = -1L; if (!isNull$17) { result$18 = (long) (agg2_sum + field$12); } isNull$19 = isNull$17; if (!isNull$19) { result$19 = result$18; } } agg2_sum = result$19; ; agg2_sumIsNull = isNull$19; long result$22 = -1L; boolean isNull$22; if (isNull$12) { isNull$22 = agg2_countIsNull; if (!isNull$22) { result$22 = agg2_count; } } else { isNull$20 = agg2_countIsNull || false; result$21 = -1L; if (!isNull$20) { result$21 = (long) (agg2_count + ((long) 1L)); } isNull$22 = isNull$20; if (!isNull$22) { result$22 = result$21; } } agg2_count = result$22; ; agg2_countIsNull = isNull$22; long result$27 = -1L; boolean isNull$27; if (isNull$12) { isNull$27 = agg3_minIsNull; if (!isNull$27) { result$27 = agg3_min; } } else { long result$26 = -1L; boolean isNull$26; if (agg3_minIsNull) { isNull$26 = isNull$12; if (!isNull$26) { result$26 = field$12; } } else { isNull$23 = isNull$12 || agg3_minIsNull; result$24 = false; if (!isNull$23) { result$24 = field$12 < agg3_min; } long result$25 = -1L; boolean isNull$25; if (result$24) { isNull$25 = isNull$12; if (!isNull$25) { result$25 = field$12; } } else { isNull$25 = agg3_minIsNull; if (!isNull$25) { result$25 = agg3_min; } } isNull$26 = isNull$25; if (!isNull$26) { result$26 = result$25; } } isNull$27 = isNull$26; if (!isNull$27) { result$27 = result$26; } } agg3_min = result$27; ; agg3_minIsNull = isNull$27; long result$32 = -1L; boolean isNull$32; if (isNull$12) { isNull$32 = agg4_maxIsNull; if (!isNull$32) { result$32 = agg4_max; } } else { long result$31 = -1L; boolean isNull$31; if (agg4_maxIsNull) { isNull$31 = isNull$12; if (!isNull$31) { result$31 = field$12; } } else { isNull$28 = isNull$12 || agg4_maxIsNull; result$29 = false; if (!isNull$28) { result$29 = field$12 > agg4_max; } long result$30 = -1L; boolean isNull$30; if (result$29) { isNull$30 = isNull$12; if (!isNull$30) { result$30 = field$12; } } else { isNull$30 = agg4_maxIsNull; if (!isNull$30) { result$30 = agg4_max; } } isNull$31 = isNull$30; if (!isNull$31) { result$31 = result$30; } } isNull$32 = isNull$31; if (!isNull$32) { result$32 = result$31; } } agg4_max = result$32; ; agg4_maxIsNull = isNull$32; } @Override public void retract(org.apache.flink.table.data.RowData retractInput) throws Exception { throw new RuntimeException( "This function not require retract method, but the retract method is called."); } @Override public void merge(org.apache.flink.table.data.RowData otherAcc) throws Exception { throw new RuntimeException("This function not require merge method, but the merge method is called."); } @Override public void setAccumulators(org.apache.flink.table.data.RowData acc) throws Exception { long field$4; boolean isNull$4; long field$5; boolean isNull$5; long field$6; boolean isNull$6; long field$7; boolean isNull$7; long field$8; boolean isNull$8; long field$9; boolean isNull$9; isNull$8 = acc.isNullAt(4); field$8 = -1L; if (!isNull$8) { field$8 = acc.getLong(4); } isNull$4 = acc.isNullAt(0); field$4 = -1L; if (!isNull$4) { field$4 = acc.getLong(0); } isNull$5 = acc.isNullAt(1); field$5 = -1L; if (!isNull$5) { field$5 = acc.getLong(1); } isNull$7 = acc.isNullAt(3); field$7 = -1L; if (!isNull$7) { field$7 = acc.getLong(3); } isNull$9 = acc.isNullAt(5); field$9 = -1L; if (!isNull$9) { field$9 = acc.getLong(5); } isNull$6 = acc.isNullAt(2); field$6 = -1L; if (!isNull$6) { field$6 = acc.getLong(2); } agg0_count1 = field$4; ; agg0_count1IsNull = isNull$4; agg1_sum = field$5; ; agg1_sumIsNull = isNull$5; agg2_sum = field$6; ; agg2_sumIsNull = isNull$6; agg2_count = field$7; ; agg2_countIsNull = isNull$7; agg3_min = field$8; ; agg3_minIsNull = isNull$8; agg4_max = field$9; ; agg4_maxIsNull = isNull$9; } @Override public void resetAccumulators() throws Exception { agg0_count1 = ((long) 0L); agg0_count1IsNull = false; agg1_sum = ((long) -1L); agg1_sumIsNull = true; agg2_sum = ((long) 0L); agg2_sumIsNull = false; agg2_count = ((long) 0L); agg2_countIsNull = false; agg3_min = ((long) -1L); agg3_minIsNull = true; agg4_max = ((long) -1L); agg4_maxIsNull = true; } @Override public org.apache.flink.table.data.RowData getAccumulators() throws Exception { acc$3 = new org.apache.flink.table.data.GenericRowData(6); if (agg0_count1IsNull) { acc$3.setField(0, null); } else { acc$3.setField(0, agg0_count1); } if (agg1_sumIsNull) { acc$3.setField(1, null); } else { acc$3.setField(1, agg1_sum); } if (agg2_sumIsNull) { acc$3.setField(2, null); } else { acc$3.setField(2, agg2_sum); } if (agg2_countIsNull) { acc$3.setField(3, null); } else { acc$3.setField(3, agg2_count); } if (agg3_minIsNull) { acc$3.setField(4, null); } else { acc$3.setField(4, agg3_min); } if (agg4_maxIsNull) { acc$3.setField(5, null); } else { acc$3.setField(5, agg4_max); } return acc$3; } @Override public org.apache.flink.table.data.RowData createAccumulators() throws Exception { acc$2 = new org.apache.flink.table.data.GenericRowData(6); if (false) { acc$2.setField(0, null); } else { acc$2.setField(0, ((long) 0L)); } if (true) { acc$2.setField(1, null); } else { acc$2.setField(1, ((long) -1L)); } if (false) { acc$2.setField(2, null); } else { acc$2.setField(2, ((long) 0L)); } if (false) { acc$2.setField(3, null); } else { acc$2.setField(3, ((long) 0L)); } if (true) { acc$2.setField(4, null); } else { acc$2.setField(4, ((long) -1L)); } if (true) { acc$2.setField(5, null); } else { acc$2.setField(5, ((long) -1L)); } return acc$2; } @Override public org.apache.flink.table.data.RowData getValue() throws Exception { boolean isNull$33; boolean result$34; boolean isNull$35; long result$36; aggValue$38 = new org.apache.flink.table.data.GenericRowData(5); if (agg0_count1IsNull) { aggValue$38.setField(0, null); } else { aggValue$38.setField(0, agg0_count1); } if (agg1_sumIsNull) { aggValue$38.setField(1, null); } else { aggValue$38.setField(1, agg1_sum); } isNull$33 = agg2_countIsNull || false; result$34 = false; if (!isNull$33) { result$34 = agg2_count == ((long) 0L); } long result$37 = -1L; boolean isNull$37; if (result$34) { isNull$37 = true; if (!isNull$37) { result$37 = ((long) -1L); } } else { isNull$35 = agg2_sumIsNull || agg2_countIsNull; result$36 = -1L; if (!isNull$35) { result$36 = (long) (agg2_sum / agg2_count); } isNull$37 = isNull$35; if (!isNull$37) { result$37 = result$36; } } if (isNull$37) { aggValue$38.setField(2, null); } else { aggValue$38.setField(2, result$37); } if (agg3_minIsNull) { aggValue$38.setField(3, null); } else { aggValue$38.setField(3, agg3_min); } if (agg4_maxIsNull) { aggValue$38.setField(4, null); } else { aggValue$38.setField(4, agg4_max); } return aggValue$38; } @Override public void cleanup() throws Exception { } @Override public void close() throws Exception { } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_03_group_agg/_02_count_distinct/CountDistinctGroupAggTest.java ================================================ package flink.examples.sql._07.query._03_group_agg._02_count_distinct; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; public class CountDistinctGroupAggTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); String sourceSql = "CREATE TABLE source_table (\n" + " dim STRING,\n" + " user_id BIGINT\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'fields.dim.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '1000000'\n" + ")"; String sinkSql = "CREATE TABLE sink_table (\n" + " dim STRING,\n" + " uv BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ")"; String selectWhereSql = "insert into sink_table\n" + "select dim,\n" + " count(distinct user_id) as uv\n" + "from source_table\n" + "group by dim"; tEnv.getConfig().getConfiguration().setString("pipeline.name", "COUNT DISTINCT 案例"); tEnv.executeSql(sourceSql); tEnv.executeSql(sinkSql); tEnv.executeSql(selectWhereSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_03_group_agg/_02_count_distinct/GroupAggsHandler$17.java ================================================ package flink.examples.sql._07.query._03_group_agg._02_count_distinct; public final class GroupAggsHandler$17 implements org.apache.flink.table.runtime.generated.AggsHandleFunction { long agg0_count; boolean agg0_countIsNull; private transient org.apache.flink.table.runtime.typeutils.ExternalSerializer externalSerializer$2; private transient org.apache.flink.table.runtime.typeutils.ExternalSerializer externalSerializer$3; private org.apache.flink.table.runtime.dataview.StateMapView distinctAcc_0_dataview; private org.apache.flink.table.data.binary.BinaryRawValueData distinctAcc_0_dataview_raw_value; private org.apache.flink.table.api.dataview.MapView distinct_view_0; org.apache.flink.table.data.GenericRowData acc$5 = new org.apache.flink.table.data.GenericRowData(2); org.apache.flink.table.data.GenericRowData acc$7 = new org.apache.flink.table.data.GenericRowData(2); org.apache.flink.table.data.GenericRowData aggValue$16 = new org.apache.flink.table.data.GenericRowData(1); private org.apache.flink.table.runtime.dataview.StateDataViewStore store; public GroupAggsHandler$17(Object[] references) throws Exception { externalSerializer$2 = (((org.apache.flink.table.runtime.typeutils.ExternalSerializer) references[0])); externalSerializer$3 = (((org.apache.flink.table.runtime.typeutils.ExternalSerializer) references[1])); } private org.apache.flink.api.common.functions.RuntimeContext getRuntimeContext() { return store.getRuntimeContext(); } @Override public void open(org.apache.flink.table.runtime.dataview.StateDataViewStore store) throws Exception { this.store = store; distinctAcc_0_dataview = (org.apache.flink.table.runtime.dataview.StateMapView) store .getStateMapView("distinctAcc_0", true, externalSerializer$2, externalSerializer$3); distinctAcc_0_dataview_raw_value = org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(distinctAcc_0_dataview); distinct_view_0 = distinctAcc_0_dataview; } @Override public void accumulate(org.apache.flink.table.data.RowData accInput) throws Exception { long field$9; boolean isNull$9; boolean isNull$11; long result$12; isNull$9 = accInput.isNullAt(1); field$9 = -1L; if (!isNull$9) { field$9 = accInput.getLong(1); } Long distinctKey$10 = (Long) field$9; if (isNull$9) { distinctKey$10 = null; } Long value$14 = (Long) distinct_view_0.get(distinctKey$10); if (value$14 == null) { value$14 = 0L; } boolean is_distinct_value_changed_0 = false; long existed$15 = ((long) value$14) & (1L << 0); if (existed$15 == 0) { // not existed value$14 = ((long) value$14) | (1L << 0); is_distinct_value_changed_0 = true; long result$13 = -1L; boolean isNull$13; if (isNull$9) { isNull$13 = agg0_countIsNull; if (!isNull$13) { result$13 = agg0_count; } } else { isNull$11 = agg0_countIsNull || false; result$12 = -1L; if (!isNull$11) { result$12 = (long) (agg0_count + ((long) 1L)); } isNull$13 = isNull$11; if (!isNull$13) { result$13 = result$12; } } agg0_count = result$13; ; agg0_countIsNull = isNull$13; } if (is_distinct_value_changed_0) { distinct_view_0.put(distinctKey$10, value$14); } } @Override public void retract(org.apache.flink.table.data.RowData retractInput) throws Exception { throw new RuntimeException( "This function not require retract method, but the retract method is called."); } @Override public void merge(org.apache.flink.table.data.RowData otherAcc) throws Exception { throw new RuntimeException("This function not require merge method, but the merge method is called."); } @Override public void setAccumulators(org.apache.flink.table.data.RowData acc) throws Exception { long field$8; boolean isNull$8; isNull$8 = acc.isNullAt(0); field$8 = -1L; if (!isNull$8) { field$8 = acc.getLong(0); } distinct_view_0 = distinctAcc_0_dataview; agg0_count = field$8; ; agg0_countIsNull = isNull$8; } @Override public void resetAccumulators() throws Exception { agg0_count = ((long) 0L); agg0_countIsNull = false; distinct_view_0.clear(); } @Override public org.apache.flink.table.data.RowData getAccumulators() throws Exception { acc$7 = new org.apache.flink.table.data.GenericRowData(2); if (agg0_countIsNull) { acc$7.setField(0, null); } else { acc$7.setField(0, agg0_count); } org.apache.flink.table.data.binary.BinaryRawValueData distinct_acc$6 = org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(distinct_view_0); if (false) { acc$7.setField(1, null); } else { acc$7.setField(1, distinct_acc$6); } return acc$7; } @Override public org.apache.flink.table.data.RowData createAccumulators() throws Exception { acc$5 = new org.apache.flink.table.data.GenericRowData(2); if (false) { acc$5.setField(0, null); } else { acc$5.setField(0, ((long) 0L)); } org.apache.flink.table.api.dataview.MapView mapview$4 = new org.apache.flink.table.api.dataview.MapView(); org.apache.flink.table.data.binary.BinaryRawValueData distinct_acc$4 = org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(mapview$4); if (false) { acc$5.setField(1, null); } else { acc$5.setField(1, distinct_acc$4); } return acc$5; } @Override public org.apache.flink.table.data.RowData getValue() throws Exception { aggValue$16 = new org.apache.flink.table.data.GenericRowData(1); if (agg0_countIsNull) { aggValue$16.setField(0, null); } else { aggValue$16.setField(0, agg0_count); } return aggValue$16; } @Override public void cleanup() throws Exception { distinctAcc_0_dataview.clear(); } @Override public void close() throws Exception { } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_03_group_agg/_03_grouping_sets/GroupingSetsEqualsGroupAggUnionAllGroupAggTest2.java ================================================ package flink.examples.sql._07.query._03_group_agg._03_grouping_sets; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; public class GroupingSetsEqualsGroupAggUnionAllGroupAggTest2 { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); String sinkSql = "CREATE TABLE sink_table (\n" + " supplier_id STRING,\n" + " product_id STRING,\n" + " total BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ")"; String selectWhereSql = "insert into sink_table\n" + "SELECT\n" + " supplier_id,\n" + " product_id,\n" + " COUNT(*) AS total\n" + "FROM (VALUES\n" + " ('supplier1', 'product1', 4),\n" + " ('supplier1', 'product2', 3),\n" + " ('supplier2', 'product3', 3),\n" + " ('supplier2', 'product4', 4))\n" + "AS Products(supplier_id, product_id, rating)\n" + "GROUP BY supplier_id, product_id\n" + "UNION ALL\n" + "SELECT\n" + " supplier_id,\n" + " cast(null as string) as product_id,\n" + " COUNT(*) AS total\n" + "FROM (VALUES\n" + " ('supplier1', 'product1', 4),\n" + " ('supplier1', 'product2', 3),\n" + " ('supplier2', 'product3', 3),\n" + " ('supplier2', 'product4', 4))\n" + "AS Products(supplier_id, product_id, rating)\n" + "GROUP BY supplier_id\n" + "UNION ALL\n" + "SELECT\n" + " cast(null as string) AS supplier_id,\n" + " cast(null as string) AS product_id,\n" + " COUNT(*) AS total\n" + "FROM (VALUES\n" + " ('supplier1', 'product1', 4),\n" + " ('supplier1', 'product2', 3),\n" + " ('supplier2', 'product3', 3),\n" + " ('supplier2', 'product4', 4))\n" + "AS Products(supplier_id, product_id, rating)"; tEnv.getConfig().getConfiguration().setString("pipeline.name", "GROUPING SETS 等同于 GROUP AGG UNION ALL 案例"); tEnv.executeSql(sinkSql); tEnv.executeSql(selectWhereSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_03_group_agg/_03_grouping_sets/GroupingSetsGroupAggTest.java ================================================ package flink.examples.sql._07.query._03_group_agg._03_grouping_sets; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; public class GroupingSetsGroupAggTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); String sourceSql = "CREATE TABLE source_table (\n" + " supplier_id STRING,\n" + " product_id STRING,\n" + " price BIGINT\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'fields.supplier_id.length' = '1',\n" + " 'fields.product_id.length' = '1',\n" + " 'fields.price.min' = '1',\n" + " 'fields.price.max' = '1000000'\n" + ")"; String sinkSql = "CREATE TABLE sink_table (\n" + " supplier_id STRING,\n" + " product_id STRING,\n" + " total BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ")"; String selectWhereSql = "INSERT INTO sink_table\n" + "SELECT supplier_id,\n" + " product_id,\n" + " sum(price) as total\n" + "FROM source_table\n" + "GROUP BY GROUPING SETS ((supplier_id, product_id), (supplier_id), ())"; tEnv.getConfig().getConfiguration().setString("pipeline.name", "GROUPING SETS 案例"); tEnv.executeSql(sourceSql); tEnv.executeSql(sinkSql); tEnv.executeSql(selectWhereSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_03_group_agg/_03_grouping_sets/GroupingSetsGroupAggTest2.java ================================================ package flink.examples.sql._07.query._03_group_agg._03_grouping_sets; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; public class GroupingSetsGroupAggTest2 { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); String sinkSql = "CREATE TABLE sink_table (\n" + " supplier_id STRING,\n" + " product_id STRING,\n" + " total BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ")"; String selectWhereSql = "insert into sink_table\n" + "SELECT\n" + " supplier_id,\n" + " product_id,\n" + " COUNT(*) AS total\n" + "FROM (VALUES\n" + " ('supplier1', 'product1', 4),\n" + " ('supplier1', 'product2', 3),\n" + " ('supplier2', 'product3', 3),\n" + " ('supplier2', 'product4', 4))\n" + "AS Products(supplier_id, product_id, rating)\n" + "GROUP BY GROUPING SETS ((supplier_id, product_id), (supplier_id), ())"; tEnv.getConfig().getConfiguration().setString("pipeline.name", "GROUPING SETS 案例"); tEnv.executeSql(sinkSql); tEnv.executeSql(selectWhereSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_03_group_agg/_03_grouping_sets/StreamExecExpand$20.java ================================================ package flink.examples.sql._07.query._03_group_agg._03_grouping_sets; public class StreamExecExpand$20 extends org.apache.flink.table.runtime.operators.TableStreamOperator implements org.apache.flink.streaming.api.operators.OneInputStreamOperator { private final Object[] references; private transient org.apache.flink.table.runtime.typeutils.StringDataSerializer typeSerializer$15; private transient org.apache.flink.table.runtime.typeutils.StringDataSerializer typeSerializer$18; org.apache.flink.table.data.BoxedWrapperRowData out = new org.apache.flink.table.data.BoxedWrapperRowData(3); private final org.apache.flink.streaming.runtime.streamrecord.StreamRecord outElement = new org.apache.flink.streaming.runtime.streamrecord.StreamRecord(null); public StreamExecExpand$20( Object[] references, org.apache.flink.streaming.runtime.tasks.StreamTask task, org.apache.flink.streaming.api.graph.StreamConfig config, org.apache.flink.streaming.api.operators.Output output, org.apache.flink.streaming.runtime.tasks.ProcessingTimeService processingTimeService) throws Exception { this.references = references; typeSerializer$15 = (((org.apache.flink.table.runtime.typeutils.StringDataSerializer) references[0])); typeSerializer$18 = (((org.apache.flink.table.runtime.typeutils.StringDataSerializer) references[1])); this.setup(task, config, output); if (this instanceof org.apache.flink.streaming.api.operators.AbstractStreamOperator) { ((org.apache.flink.streaming.api.operators.AbstractStreamOperator) this) .setProcessingTimeService(processingTimeService); } } @Override public void open() throws Exception { super.open(); } @Override public void processElement(org.apache.flink.streaming.runtime.streamrecord.StreamRecord element) throws Exception { org.apache.flink.table.data.RowData in1 = (org.apache.flink.table.data.RowData) element.getValue(); org.apache.flink.table.data.binary.BinaryStringData field$14; boolean isNull$14; org.apache.flink.table.data.binary.BinaryStringData field$16; org.apache.flink.table.data.binary.BinaryStringData field$17; boolean isNull$17; org.apache.flink.table.data.binary.BinaryStringData field$19; isNull$14 = in1.isNullAt(0); field$14 = org.apache.flink.table.data.binary.BinaryStringData.EMPTY_UTF8; if (!isNull$14) { field$14 = ((org.apache.flink.table.data.binary.BinaryStringData) in1.getString(0)); } field$16 = field$14; if (!isNull$14) { field$16 = (org.apache.flink.table.data.binary.BinaryStringData) (typeSerializer$15.copy(field$16)); } isNull$17 = in1.isNullAt(1); field$17 = org.apache.flink.table.data.binary.BinaryStringData.EMPTY_UTF8; if (!isNull$17) { field$17 = ((org.apache.flink.table.data.binary.BinaryStringData) in1.getString(1)); } field$19 = field$17; if (!isNull$17) { field$19 = (org.apache.flink.table.data.binary.BinaryStringData) (typeSerializer$18.copy(field$19)); } out.setRowKind(in1.getRowKind()); if (isNull$14) { out.setNullAt(0); } else { out.setNonPrimitiveValue(0, field$16); } if (isNull$17) { out.setNullAt(1); } else { out.setNonPrimitiveValue(1, field$19); } if (false) { out.setNullAt(2); } else { out.setLong(2, ((long) 0L)); } output.collect(outElement.replace(out)); out.setRowKind(in1.getRowKind()); if (isNull$14) { out.setNullAt(0); } else { out.setNonPrimitiveValue(0, field$16); } if (true) { out.setNullAt(1); } else { out.setNonPrimitiveValue(1, ((org.apache.flink.table.data.binary.BinaryStringData) org.apache.flink.table.data.binary.BinaryStringData.EMPTY_UTF8)); } if (false) { out.setNullAt(2); } else { out.setLong(2, ((long) 1L)); } output.collect(outElement.replace(out)); out.setRowKind(in1.getRowKind()); if (true) { out.setNullAt(0); } else { out.setNonPrimitiveValue(0, ((org.apache.flink.table.data.binary.BinaryStringData) org.apache.flink.table.data.binary.BinaryStringData.EMPTY_UTF8)); } if (true) { out.setNullAt(1); } else { out.setNonPrimitiveValue(1, ((org.apache.flink.table.data.binary.BinaryStringData) org.apache.flink.table.data.binary.BinaryStringData.EMPTY_UTF8)); } if (false) { out.setNullAt(2); } else { out.setLong(2, ((long) 3L)); } output.collect(outElement.replace(out)); } @Override public void close() throws Exception { super.close(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_03_group_agg/_04_cube/CubeGroupAggTest.java ================================================ package flink.examples.sql._07.query._03_group_agg._04_cube; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; public class CubeGroupAggTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); String sourceSql = "CREATE TABLE source_table (\n" + " supplier_id STRING,\n" + " product_id STRING,\n" + " price BIGINT\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'fields.supplier_id.length' = '1',\n" + " 'fields.product_id.length' = '1',\n" + " 'fields.price.min' = '1',\n" + " 'fields.price.max' = '1000000'\n" + ")"; String sinkSql = "CREATE TABLE sink_table (\n" + " supplier_id STRING,\n" + " product_id STRING,\n" + " total BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ")"; String selectWhereSql = "insert into sink_table\n" + "SELECT supplier_id, product_id, COUNT(*) as total\n" + "FROM source_table\n" + "AS Products(supplier_id, product_id, rating)\n" + "GROUP BY CUBE (supplier_id, product_id)"; tEnv.getConfig().getConfiguration().setString("pipeline.name", "CUBE 案例"); tEnv.executeSql(sourceSql); tEnv.executeSql(sinkSql); tEnv.executeSql(selectWhereSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_03_group_agg/_04_cube/CubeGroupAggTest2.java ================================================ package flink.examples.sql._07.query._03_group_agg._04_cube; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; public class CubeGroupAggTest2 { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); String sinkSql = "CREATE TABLE sink_table (\n" + " supplier_id STRING,\n" + " product_id STRING,\n" + " total BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ")"; String selectWhereSql = "insert into sink_table\n" + "SELECT supplier_id, product_id, COUNT(*) as total\n" + "FROM (VALUES\n" + " ('supplier1', 'product1', 4),\n" + " ('supplier1', 'product2', 3),\n" + " ('supplier2', 'product3', 3),\n" + " ('supplier2', 'product4', 4))\n" + "AS Products(supplier_id, product_id, rating)\n" + "GROUP BY CUBE (supplier_id, product_id)"; tEnv.getConfig().getConfiguration().setString("pipeline.name", "CUBE 案例"); tEnv.executeSql(sinkSql); tEnv.executeSql(selectWhereSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_03_group_agg/_05_rollup/RollUpGroupAggTest.java ================================================ package flink.examples.sql._07.query._03_group_agg._05_rollup; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; public class RollUpGroupAggTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); String sourceSql = "CREATE TABLE source_table (\n" + " supplier_id STRING,\n" + " product_id STRING,\n" + " price BIGINT\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'fields.supplier_id.length' = '1',\n" + " 'fields.product_id.length' = '1',\n" + " 'fields.price.min' = '1',\n" + " 'fields.price.max' = '1000000'\n" + ")"; String sinkSql = "CREATE TABLE sink_table (\n" + " supplier_id STRING,\n" + " product_id STRING,\n" + " total BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ")"; String selectWhereSql = "insert into sink_table\n" + "SELECT supplier_id, product_id, COUNT(*) as total\n" + "FROM source_table\n" + "AS Products(supplier_id, product_id, rating)\n" + "GROUP BY ROLLUP (supplier_id, product_id)"; tEnv.getConfig().getConfiguration().setString("pipeline.name", "ROLLUP 案例"); tEnv.executeSql(sourceSql); tEnv.executeSql(sinkSql); tEnv.executeSql(selectWhereSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_03_group_agg/_05_rollup/RollUpGroupAggTest2.java ================================================ package flink.examples.sql._07.query._03_group_agg._05_rollup; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; public class RollUpGroupAggTest2 { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); String sinkSql = "CREATE TABLE sink_table (\n" + " supplier_id STRING,\n" + " product_id STRING,\n" + " total BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ")"; String selectWhereSql = "insert into sink_table\n" + "SELECT supplier_id, product_id, COUNT(*) as total\n" + "FROM (VALUES\n" + " ('supplier1', 'product1', 4),\n" + " ('supplier1', 'product2', 3),\n" + " ('supplier2', 'product3', 3),\n" + " ('supplier2', 'product4', 4))\n" + "AS Products(supplier_id, product_id, rating)\n" + "GROUP BY ROLLUP (supplier_id, product_id)"; tEnv.getConfig().getConfiguration().setString("pipeline.name", "ROLLUP 案例"); tEnv.executeSql(sinkSql); tEnv.executeSql(selectWhereSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_04_window_agg/_01_tumble_window/TumbleWindow2GroupAggTest.java ================================================ package flink.examples.sql._07.query._04_window_agg._01_tumble_window; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class TumbleWindow2GroupAggTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[] {"--enable.hive.module.v2", "false", "--table.optimizer.agg-phase-strategy", "TWO_PHASE"}); String sql = "-- 数据源表\n" + "CREATE TABLE source_table (\n" + " -- 维度数据\n" + " dim STRING,\n" + " -- 用户 id\n" + " user_id BIGINT,\n" + " -- 用户\n" + " price BIGINT,\n" + " -- 事件时间戳\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " -- watermark 设置\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.dim.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '100000',\n" + " 'fields.price.min' = '1',\n" + " 'fields.price.max' = '100000'\n" + ");\n" + "\n" + "-- 数据汇表\n" + "CREATE TABLE sink_table (\n" + " dim STRING,\n" + " pv BIGINT,\n" + " sum_price BIGINT,\n" + " max_price BIGINT,\n" + " min_price BIGINT,\n" + " uv BIGINT,\n" + " window_start bigint\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "-- 数据处理逻辑\n" + "insert into sink_table\n" + "select dim,\n" + " sum(bucket_pv) as pv,\n" + " sum(bucket_sum_price) as sum_price,\n" + " max(bucket_max_price) as max_price,\n" + " min(bucket_min_price) as min_price,\n" + " sum(bucket_uv) as uv,\n" + " max(window_start) as window_start\n" + "from (\n" + " select dim,\n" + " count(*) as bucket_pv,\n" + " sum(price) as bucket_sum_price,\n" + " max(price) as bucket_max_price,\n" + " min(price) as bucket_min_price,\n" + " -- 计算 uv 数\n" + " count(distinct user_id) as bucket_uv,\n" + " cast((UNIX_TIMESTAMP(CAST(row_time AS STRING))) / 60 as bigint) as window_start\n" + " from source_table\n" + " group by\n" + " -- 按照用户 id 进行分桶,防止数据倾斜\n" + " mod(user_id, 1024),\n" + " dim,\n" + " cast((UNIX_TIMESTAMP(CAST(row_time AS STRING))) / 60 as bigint)\n" + ")\n" + "group by dim,\n" + " window_start"; flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 WINDOW TVF TUMBLE WINDOW 案例"); Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_04_window_agg/_01_tumble_window/TumbleWindowTest.java ================================================ package flink.examples.sql._07.query._04_window_agg._01_tumble_window; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class TumbleWindowTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[] {"--table.optimizer.agg-phase-strategy", "TWO_PHASE"}); String sourceSql = "CREATE TABLE source_table (\n" + " dim STRING,\n" + " user_id BIGINT,\n" + " price BIGINT,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10000',\n" + " 'fields.dim.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '100000',\n" + " 'fields.price.min' = '1',\n" + " 'fields.price.max' = '100000'\n" + ")"; String sinkSql = "CREATE TABLE sink_table (\n" + " dim STRING,\n" + " pv BIGINT,\n" + " sum_price BIGINT,\n" + " max_price BIGINT,\n" + " min_price BIGINT,\n" + " uv BIGINT,\n" + " window_start bigint\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ")"; String selectWhereSql = "insert into sink_table\n" + "select dim,\n" + "\t sum(bucket_pv) as pv,\n" + "\t sum(bucket_sum_price) as sum_price,\n" + "\t max(bucket_max_price) as max_price,\n" + "\t min(bucket_min_price) as min_price,\n" + "\t sum(bucket_uv) as uv,\n" + "\t max(window_start) as window_start\n" + "from (\n" + "\t SELECT dim,\n" + "\t \t UNIX_TIMESTAMP(CAST(window_start AS STRING)) * 1000 as window_start, \n" + "\t window_end, \n" + "\t count(*) as bucket_pv,\n" + "\t sum(price) as bucket_sum_price,\n" + "\t max(price) as bucket_max_price,\n" + "\t min(price) as bucket_min_price,\n" + "\t count(distinct user_id) as bucket_uv\n" + "\t FROM TABLE(TUMBLE(\n" + "\t \t\t\tTABLE source_table\n" + "\t \t\t\t, DESCRIPTOR(row_time)\n" + "\t \t\t\t, INTERVAL '60' SECOND))\n" + "\t GROUP BY window_start, \n" + "\t \t\t window_end,\n" + "\t\t\t dim,\n" + "\t\t\t mod(user_id, 1024)\n" + ")\n" + "group by dim,\n" + "\t\t window_start"; flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 WINDOW TVF TUMBLE WINDOW 案例"); flinkEnv.streamTEnv().executeSql(sourceSql); flinkEnv.streamTEnv().executeSql(sinkSql); flinkEnv.streamTEnv().executeSql(selectWhereSql); /** * 两阶段聚合 * 本地 agg:{@link org.apache.flink.table.runtime.operators.aggregate.window.LocalSlicingWindowAggOperator} * -> {@link org.apache.flink.table.runtime.operators.aggregate.window.combines.LocalAggCombiner} * * key agg;{@link org.apache.flink.table.runtime.operators.window.slicing.SlicingWindowOperator} * -> {@link org.apache.flink.table.runtime.operators.aggregate.window.processors.SliceUnsharedWindowAggProcessor} * -> {@link org.apache.flink.table.runtime.operators.aggregate.window.combines.GlobalAggCombiner} */ } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_04_window_agg/_01_tumble_window/TumbleWindowTest2.java ================================================ package flink.examples.sql._07.query._04_window_agg._01_tumble_window; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class TumbleWindowTest2 { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[] {"--table.optimizer.agg-phase-strategy", "ONE_PHASE"}); String sourceSql = "CREATE TABLE source_table (\n" + " dim STRING,\n" + " user_id BIGINT,\n" + " price BIGINT,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10000',\n" + " 'fields.dim.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '100000',\n" + " 'fields.price.min' = '1',\n" + " 'fields.price.max' = '100000'\n" + ")"; String sinkSql = "CREATE TABLE sink_table (\n" + " dim STRING,\n" + " pv BIGINT,\n" + " sum_price BIGINT,\n" + " max_price BIGINT,\n" + " min_price BIGINT,\n" + " uv BIGINT,\n" + " window_start bigint\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ")"; String selectWhereSql1 = "insert into sink_table\n" + "select dim,\n" + "\t sum(bucket_pv) as pv,\n" + "\t sum(bucket_sum_price) as sum_price,\n" + "\t max(bucket_max_price) as max_price,\n" + "\t min(bucket_min_price) as min_price,\n" + "\t sum(bucket_uv) as uv,\n" + "\t UNIX_TIMESTAMP(CAST(window_start AS STRING)) as window_start\n" + "from TABLE(\n" + "\t TUMBLE(\n" + "\t\tTABLE (\n" + "\t\t\tSELECT \n" + "\t\t\t\tdim,\n" + "\t\t\t\twindow_time as rowtime,\n" + "\t\t\t count(*) as bucket_pv,\n" + "\t\t\t sum(price) as bucket_sum_price,\n" + "\t\t\t max(price) as bucket_max_price,\n" + "\t\t\t min(price) as bucket_min_price,\n" + "\t\t\t count(distinct user_id) as bucket_uv\n" + "\t\t\tFROM TABLE(TUMBLE(\n" + "\t\t\t\t\t\tTABLE source_table\n" + "\t\t\t\t\t\t, DESCRIPTOR(row_time)\n" + "\t\t\t\t\t\t, INTERVAL '60' SECOND))\n" + "\t\t\tGROUP BY \n" + "\t\t\t\twindow_time,window_start, window_end,\n" + "\t\t\t\tdim,\n" + "\t\t\t\tmod(user_id, 1024)\n" + "\t\t)\n" + "\t\t, DESCRIPTOR(rowtime)\n" + "\t\t, INTERVAL '60' SECOND)\n" + ")\n" + "group by dim,\n" + "\t\t window_start, window_end"; String selectWhereSql = "insert into sink_table\n" + "with tmp as (\n" + "\tSELECT \n" + "\t\tdim,\n" + "\t\twindow_time as t,\n" + "\t count(*) as bucket_pv,\n" + "\t sum(price) as bucket_sum_price,\n" + "\t max(price) as bucket_max_price,\n" + "\t min(price) as bucket_min_price,\n" + "\t count(distinct user_id) as bucket_uv\n" + "\tFROM TABLE(TUMBLE(\n" + "\t\t\t\tTABLE source_table\n" + "\t\t\t\t, DESCRIPTOR(row_time)\n" + "\t\t\t\t, INTERVAL '60' SECOND))\n" + "\tGROUP BY \n" + "\t\twindow_time,window_start, window_end, \n" + "\t\tdim,\n" + "\t\tmod(user_id, 1024)\n" + ")\n" + "select dim,\n" + "\t sum(bucket_pv) as pv,\n" + "\t sum(bucket_sum_price) as sum_price,\n" + "\t max(bucket_max_price) as max_price,\n" + "\t min(bucket_min_price) as min_price,\n" + "\t sum(bucket_uv) as uv,\n" + "\t UNIX_TIMESTAMP(CAST(window_start AS STRING)) as window_start\n" + "from TABLE(\n" + "\t TUMBLE(\n" + "\t\tTABLE tmp\n" + "\t\t, DESCRIPTOR(t)\n" + "\t\t, INTERVAL '60' SECOND)\n" + ")\n" + "group by dim,\n" + "\t\t window_start, window_end"; flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 WINDOW TVF TUMBLE WINDOW 案例"); flinkEnv.streamTEnv().executeSql(sourceSql); flinkEnv.streamTEnv().executeSql(sinkSql); flinkEnv.streamTEnv().executeSql(selectWhereSql); /** * 两阶段聚合 * 本地 agg:{@link org.apache.flink.table.runtime.operators.aggregate.window.LocalSlicingWindowAggOperator} * -> {@link org.apache.flink.table.runtime.operators.aggregate.window.combines.LocalAggCombiner} * * key agg;{@link org.apache.flink.table.runtime.operators.window.slicing.SlicingWindowOperator} * -> {@link org.apache.flink.table.runtime.operators.aggregate.window.processors.SliceUnsharedWindowAggProcessor} * -> {@link org.apache.flink.table.runtime.operators.aggregate.window.combines.GlobalAggCombiner} */ } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_04_window_agg/_01_tumble_window/TumbleWindowTest3.java ================================================ package flink.examples.sql._07.query._04_window_agg._01_tumble_window; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class TumbleWindowTest3 { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[] {"--table.optimizer.agg-phase-strategy", "TWO_PHASE"}); String sourceSql = "CREATE TABLE source_table (\n" + " dim STRING,\n" + " user_id BIGINT,\n" + " price BIGINT,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10000',\n" + " 'fields.dim.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '100000',\n" + " 'fields.price.min' = '1',\n" + " 'fields.price.max' = '100000'\n" + ")"; String sinkSql = "CREATE TABLE sink_table (\n" + " dim STRING,\n" + " pv BIGINT,\n" + " sum_price BIGINT,\n" + " max_price BIGINT,\n" + " min_price BIGINT,\n" + " uv BIGINT,\n" + " window_start bigint\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ")"; String selectWhereSql = "insert into sink_table\n" + "select dim,\n" + "\t sum(bucket_pv) as pv,\n" + "\t sum(bucket_sum_price) as sum_price,\n" + "\t max(bucket_max_price) as max_price,\n" + "\t min(bucket_min_price) as min_price,\n" + "\t sum(bucket_uv) as uv,\n" + "\t UNIX_TIMESTAMP(CAST(TUMBLE_START(rowtime, INTERVAL '5' MINUTE) AS STRING)) as window_start\n" + "from (\n" + "\tSELECT \n" + "\t\tdim,\n" + "\t\twindow_time as rowtime,\n" + "\t count(*) as bucket_pv,\n" + "\t sum(price) as bucket_sum_price,\n" + "\t max(price) as bucket_max_price,\n" + "\t min(price) as bucket_min_price,\n" + "\t count(distinct user_id) as bucket_uv\n" + "\tFROM TABLE(TUMBLE(\n" + "\t\t\t\tTABLE source_table\n" + "\t\t\t\t, DESCRIPTOR(row_time)\n" + "\t\t\t\t, INTERVAL '60' SECOND))\n" + "\tGROUP BY \n" + "\t\twindow_time,window_start, window_end, \n" + "\t\tdim,\n" + "\t\tmod(user_id, 1024)\n" + ")\n" + "group by dim,\n" + " \t\tTUMBLE(rowtime, INTERVAL '5' MINUTE)"; flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 WINDOW TVF TUMBLE WINDOW 案例"); flinkEnv.streamTEnv().executeSql(sourceSql); flinkEnv.streamTEnv().executeSql(sinkSql); flinkEnv.streamTEnv().executeSql(selectWhereSql); /** * 两阶段聚合 * 本地 agg:{@link org.apache.flink.table.runtime.operators.aggregate.window.LocalSlicingWindowAggOperator} * -> {@link org.apache.flink.table.runtime.operators.aggregate.window.combines.LocalAggCombiner} * * key agg;{@link org.apache.flink.table.runtime.operators.window.slicing.SlicingWindowOperator} * -> {@link org.apache.flink.table.runtime.operators.aggregate.window.processors.SliceUnsharedWindowAggProcessor} * -> {@link org.apache.flink.table.runtime.operators.aggregate.window.combines.GlobalAggCombiner} */ } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_04_window_agg/_01_tumble_window/TumbleWindowTest4.java ================================================ package flink.examples.sql._07.query._04_window_agg._01_tumble_window; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class TumbleWindowTest4 { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[] {"--table.optimizer.agg-phase-strategy", "TWO_PHASE"}); String sourceSql = "CREATE TABLE source_table (\n" + " dim STRING,\n" + " user_id BIGINT,\n" + " price BIGINT,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10000',\n" + " 'fields.dim.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '100000',\n" + " 'fields.price.min' = '1',\n" + " 'fields.price.max' = '100000'\n" + ")"; String sinkSql = "CREATE TABLE sink_table (\n" + " dim STRING,\n" + " pv BIGINT,\n" + " sum_price BIGINT,\n" + " max_price BIGINT,\n" + " min_price BIGINT,\n" + " uv BIGINT,\n" + " window_start bigint\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ")"; String selectWhereSql = "insert into sink_table\n" + "select dim,\n" + "\t sum(bucket_pv) as pv,\n" + "\t sum(bucket_sum_price) as sum_price,\n" + "\t max(bucket_max_price) as max_price,\n" + "\t min(bucket_min_price) as min_price,\n" + "\t sum(bucket_uv) as uv,\n" + "\t UNIX_TIMESTAMP(CAST(TUMBLE_START(rowtime, INTERVAL '5' MINUTE) AS STRING)) as rowtime\n" + "from (\n" + "\tSELECT \n" + "\t\tdim,\n" + "\t\tTUMBLE_ROWTIME(row_time, INTERVAL '5' MINUTE) as rowtime,\n" + "\t count(*) as bucket_pv,\n" + "\t sum(price) as bucket_sum_price,\n" + "\t max(price) as bucket_max_price,\n" + "\t min(price) as bucket_min_price,\n" + "\t count(distinct user_id) as bucket_uv\n" + "\tFROM source_table\n" + "\tGROUP BY \n" + "\t\tTUMBLE(row_time, INTERVAL '5' MINUTE),\n" + "\t\tdim,\n" + "\t\tmod(user_id, 1024)\n" + ")\n" + "group by dim,\n" + " \t\tTUMBLE(rowtime, INTERVAL '5' MINUTE)"; flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 WINDOW TVF TUMBLE WINDOW 案例"); flinkEnv.streamTEnv().executeSql(sourceSql); flinkEnv.streamTEnv().executeSql(sinkSql); flinkEnv.streamTEnv().executeSql(selectWhereSql); /** * 两阶段聚合 * 本地 agg:{@link org.apache.flink.table.runtime.operators.aggregate.window.LocalSlicingWindowAggOperator} * -> {@link org.apache.flink.table.runtime.operators.aggregate.window.combines.LocalAggCombiner} * * key agg;{@link org.apache.flink.table.runtime.operators.window.slicing.SlicingWindowOperator} * -> {@link org.apache.flink.table.runtime.operators.aggregate.window.processors.SliceUnsharedWindowAggProcessor} * -> {@link org.apache.flink.table.runtime.operators.aggregate.window.combines.GlobalAggCombiner} */ } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_04_window_agg/_01_tumble_window/TumbleWindowTest5.java ================================================ package flink.examples.sql._07.query._04_window_agg._01_tumble_window; import org.apache.flink.api.common.functions.FlatMapFunction; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor; import org.apache.flink.streaming.api.windowing.time.Time; import org.apache.flink.types.Row; import org.apache.flink.util.Collector; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class TumbleWindowTest5 { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[] {"--table.optimizer.agg-phase-strategy", "TWO_PHASE"}); String sourceSql = "CREATE TABLE source_table (\n" + " dim STRING,\n" + " user_id BIGINT,\n" + " price BIGINT,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10000',\n" + " 'fields.dim.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '100000',\n" + " 'fields.price.min' = '1',\n" + " 'fields.price.max' = '100000'\n" + ")"; String sinkSql = "CREATE TABLE sink_table (\n" + " dim STRING,\n" + " pv BIGINT,\n" + " sum_price BIGINT,\n" + " max_price BIGINT,\n" + " min_price BIGINT,\n" + " uv BIGINT,\n" + " window_start bigint\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ")"; flinkEnv.streamTEnv().executeSql(sourceSql); flinkEnv.streamTEnv().executeSql(sinkSql); String s1 = "\tSELECT \n" + "\t\tdim,\n" + "\t\tUNIX_TIMESTAMP(CAST(window_start AS STRING)) as rowtime,\n" + "\t count(*) as bucket_pv,\n" + "\t sum(price) as bucket_sum_price,\n" + "\t max(price) as bucket_max_price,\n" + "\t min(price) as bucket_min_price,\n" + "\t count(distinct user_id) as bucket_uv\n" + "\tFROM TABLE(TUMBLE(\n" + "\t\t\t\tTABLE source_table\n" + "\t\t\t\t, DESCRIPTOR(row_time)\n" + "\t\t\t\t, INTERVAL '60' SECOND))\n" + "\tGROUP BY \n" + "\t\twindow_start,\n" + "\t\twindow_end,\n" + "\t\tdim,\n" + "\t\tmod(user_id, 1024)\n"; DataStream r = flinkEnv.streamTEnv() .toRetractStream(flinkEnv.streamTEnv().sqlQuery(s1), Row.class) .flatMap(new FlatMapFunction, Row>() { @Override public void flatMap(Tuple2 value, Collector out) throws Exception { out.collect(value.f1); } }) .returns(new RowTypeInfo(TypeInformation.of(String.class), TypeInformation.of(Long.class), TypeInformation.of(Long.class), TypeInformation.of(Long.class), TypeInformation.of(Long.class), TypeInformation.of(Long.class), TypeInformation.of(Long.class))); DataStream d = r .assignTimestampsAndWatermarks(new BoundedOutOfOrdernessTimestampExtractor(Time.minutes(0)) { @Override public long extractTimestamp(Row element) { return element.getFieldAs("f1"); } }); // Table t = flinkEnv.streamTEnv().fromDataStream(d, "dim, rowtime, bucket_pv, bucket_sum_price, bucket_max_price, bucket_min_price, bucket_uv, rowtime.rowtime"); flinkEnv.streamTEnv().createTemporaryView("tmp", d, "dim, bucket_pv, bucket_sum_price, bucket_max_price, bucket_min_price, bucket_uv, rowtime.rowtime"); String selectWhereSql = "insert into sink_table\n" + "select dim,\n" + "\t sum(bucket_pv) as pv,\n" + "\t sum(bucket_sum_price) as sum_price,\n" + "\t max(bucket_max_price) as max_price,\n" + "\t min(bucket_min_price) as min_price,\n" + "\t sum(bucket_uv) as uv,\n" + "\t UNIX_TIMESTAMP(CAST(window_start AS STRING)) as window_start\n" + "from TABLE(\n" + "\t TUMBLE(\n" + "\t\tTABLE tmp\n" + "\t\t, DESCRIPTOR(rowtime)\n" + "\t\t, INTERVAL '60' SECOND)\n" + ")\n" + "group by dim,\n" + "\t\t window_start, window_end"; flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 WINDOW TVF TUMBLE WINDOW 案例"); flinkEnv.streamTEnv().executeSql(selectWhereSql); /** * 两阶段聚合 * 本地 agg:{@link org.apache.flink.table.runtime.operators.aggregate.window.LocalSlicingWindowAggOperator} * -> {@link org.apache.flink.table.runtime.operators.aggregate.window.combines.LocalAggCombiner} * * key agg;{@link org.apache.flink.table.runtime.operators.window.slicing.SlicingWindowOperator} * -> {@link org.apache.flink.table.runtime.operators.aggregate.window.processors.SliceUnsharedWindowAggProcessor} * -> {@link org.apache.flink.table.runtime.operators.aggregate.window.combines.GlobalAggCombiner} */ } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_04_window_agg/_01_tumble_window/global_agg/GlobalWindowAggsHandler$232.java ================================================ package flink.examples.sql._07.query._04_window_agg._01_tumble_window.global_agg; public final class GlobalWindowAggsHandler$232 implements org.apache.flink.table.runtime.generated.NamespaceAggsHandleFunction { private transient org.apache.flink.table.runtime.operators.window.slicing.SliceAssigners.SlicedUnsharedSliceAssigner sliceAssigner$163; long agg0_count1; boolean agg0_count1IsNull; long agg1_sum; boolean agg1_sumIsNull; long agg2_max; boolean agg2_maxIsNull; long agg3_min; boolean agg3_minIsNull; long agg4_count; boolean agg4_countIsNull; private transient org.apache.flink.table.runtime.typeutils.ExternalSerializer externalSerializer$164; private transient org.apache.flink.table.runtime.typeutils.ExternalSerializer externalSerializer$165; private org.apache.flink.table.runtime.dataview.StateMapView distinctAcc_0_dataview; private org.apache.flink.table.data.binary.BinaryRawValueData distinctAcc_0_dataview_raw_value; private org.apache.flink.table.api.dataview.MapView distinct_view_0; org.apache.flink.table.data.GenericRowData acc$167 = new org.apache.flink.table.data.GenericRowData(6); org.apache.flink.table.data.GenericRowData acc$169 = new org.apache.flink.table.data.GenericRowData(6); private org.apache.flink.table.api.dataview.MapView otherMapView$221; private transient org.apache.flink.table.data.conversion.RawObjectConverter converter$222; org.apache.flink.table.data.GenericRowData aggValue$231 = new org.apache.flink.table.data.GenericRowData(7); private org.apache.flink.table.runtime.dataview.StateDataViewStore store; private Long namespace; public GlobalWindowAggsHandler$232(Object[] references) throws Exception { sliceAssigner$163 = (((org.apache.flink.table.runtime.operators.window.slicing.SliceAssigners.SlicedUnsharedSliceAssigner) references[0])); externalSerializer$164 = (((org.apache.flink.table.runtime.typeutils.ExternalSerializer) references[1])); externalSerializer$165 = (((org.apache.flink.table.runtime.typeutils.ExternalSerializer) references[2])); converter$222 = (((org.apache.flink.table.data.conversion.RawObjectConverter) references[3])); } private org.apache.flink.api.common.functions.RuntimeContext getRuntimeContext() { return store.getRuntimeContext(); } @Override public void open(org.apache.flink.table.runtime.dataview.StateDataViewStore store) throws Exception { this.store = store; distinctAcc_0_dataview = (org.apache.flink.table.runtime.dataview.StateMapView) store .getStateMapView("distinctAcc_0", true, externalSerializer$164, externalSerializer$165); distinctAcc_0_dataview_raw_value = org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(distinctAcc_0_dataview); distinct_view_0 = distinctAcc_0_dataview; converter$222.open(getRuntimeContext().getUserCodeClassLoader()); } @Override public void accumulate(org.apache.flink.table.data.RowData accInput) throws Exception { boolean isNull$176; long result$177; long field$178; boolean isNull$178; boolean isNull$179; long result$180; boolean isNull$183; boolean result$184; boolean isNull$188; boolean result$189; long field$193; boolean isNull$193; boolean isNull$195; long result$196; isNull$178 = accInput.isNullAt(2); field$178 = -1L; if (!isNull$178) { field$178 = accInput.getLong(2); } isNull$193 = accInput.isNullAt(3); field$193 = -1L; if (!isNull$193) { field$193 = accInput.getLong(3); } isNull$176 = agg0_count1IsNull || false; result$177 = -1L; if (!isNull$176) { result$177 = (long) (agg0_count1 + ((long) 1L)); } agg0_count1 = result$177; ; agg0_count1IsNull = isNull$176; long result$182 = -1L; boolean isNull$182; if (isNull$178) { isNull$182 = agg1_sumIsNull; if (!isNull$182) { result$182 = agg1_sum; } } else { long result$181 = -1L; boolean isNull$181; if (agg1_sumIsNull) { isNull$181 = isNull$178; if (!isNull$181) { result$181 = field$178; } } else { isNull$179 = agg1_sumIsNull || isNull$178; result$180 = -1L; if (!isNull$179) { result$180 = (long) (agg1_sum + field$178); } isNull$181 = isNull$179; if (!isNull$181) { result$181 = result$180; } } isNull$182 = isNull$181; if (!isNull$182) { result$182 = result$181; } } agg1_sum = result$182; ; agg1_sumIsNull = isNull$182; long result$187 = -1L; boolean isNull$187; if (isNull$178) { isNull$187 = agg2_maxIsNull; if (!isNull$187) { result$187 = agg2_max; } } else { long result$186 = -1L; boolean isNull$186; if (agg2_maxIsNull) { isNull$186 = isNull$178; if (!isNull$186) { result$186 = field$178; } } else { isNull$183 = isNull$178 || agg2_maxIsNull; result$184 = false; if (!isNull$183) { result$184 = field$178 > agg2_max; } long result$185 = -1L; boolean isNull$185; if (result$184) { isNull$185 = isNull$178; if (!isNull$185) { result$185 = field$178; } } else { isNull$185 = agg2_maxIsNull; if (!isNull$185) { result$185 = agg2_max; } } isNull$186 = isNull$185; if (!isNull$186) { result$186 = result$185; } } isNull$187 = isNull$186; if (!isNull$187) { result$187 = result$186; } } agg2_max = result$187; ; agg2_maxIsNull = isNull$187; long result$192 = -1L; boolean isNull$192; if (isNull$178) { isNull$192 = agg3_minIsNull; if (!isNull$192) { result$192 = agg3_min; } } else { long result$191 = -1L; boolean isNull$191; if (agg3_minIsNull) { isNull$191 = isNull$178; if (!isNull$191) { result$191 = field$178; } } else { isNull$188 = isNull$178 || agg3_minIsNull; result$189 = false; if (!isNull$188) { result$189 = field$178 < agg3_min; } long result$190 = -1L; boolean isNull$190; if (result$189) { isNull$190 = isNull$178; if (!isNull$190) { result$190 = field$178; } } else { isNull$190 = agg3_minIsNull; if (!isNull$190) { result$190 = agg3_min; } } isNull$191 = isNull$190; if (!isNull$191) { result$191 = result$190; } } isNull$192 = isNull$191; if (!isNull$192) { result$192 = result$191; } } agg3_min = result$192; ; agg3_minIsNull = isNull$192; Long distinctKey$194 = (Long) field$193; if (isNull$193) { distinctKey$194 = null; } Long value$198 = (Long) distinct_view_0.get(distinctKey$194); if (value$198 == null) { value$198 = 0L; } boolean is_distinct_value_changed_0 = false; long existed$199 = ((long) value$198) & (1L << 0); if (existed$199 == 0) { // not existed value$198 = ((long) value$198) | (1L << 0); is_distinct_value_changed_0 = true; long result$197 = -1L; boolean isNull$197; if (isNull$193) { isNull$197 = agg4_countIsNull; if (!isNull$197) { result$197 = agg4_count; } } else { isNull$195 = agg4_countIsNull || false; result$196 = -1L; if (!isNull$195) { result$196 = (long) (agg4_count + ((long) 1L)); } isNull$197 = isNull$195; if (!isNull$197) { result$197 = result$196; } } agg4_count = result$197; ; agg4_countIsNull = isNull$197; } if (is_distinct_value_changed_0) { distinct_view_0.put(distinctKey$194, value$198); } } @Override public void retract(org.apache.flink.table.data.RowData retractInput) throws Exception { throw new RuntimeException( "This function not require retract method, but the retract method is called."); } @Override public void merge(Long ns, org.apache.flink.table.data.RowData otherAcc) throws Exception { namespace = (Long) ns; long field$200; boolean isNull$200; boolean isNull$201; long result$202; long field$203; boolean isNull$203; boolean isNull$204; long result$205; long field$208; boolean isNull$208; boolean isNull$209; boolean result$210; long field$214; boolean isNull$214; boolean isNull$215; boolean result$216; org.apache.flink.table.data.binary.BinaryRawValueData field$220; boolean isNull$220; boolean isNull$226; long result$227; isNull$208 = otherAcc.isNullAt(2); field$208 = -1L; if (!isNull$208) { field$208 = otherAcc.getLong(2); } isNull$203 = otherAcc.isNullAt(1); field$203 = -1L; if (!isNull$203) { field$203 = otherAcc.getLong(1); } isNull$200 = otherAcc.isNullAt(0); field$200 = -1L; if (!isNull$200) { field$200 = otherAcc.getLong(0); } isNull$214 = otherAcc.isNullAt(3); field$214 = -1L; if (!isNull$214) { field$214 = otherAcc.getLong(3); } isNull$220 = otherAcc.isNullAt(5); field$220 = null; if (!isNull$220) { field$220 = ((org.apache.flink.table.data.binary.BinaryRawValueData) otherAcc.getRawValue(5)); } otherMapView$221 = null; if (!isNull$220) { otherMapView$221 = (org.apache.flink.table.api.dataview.MapView) converter$222 .toExternal((org.apache.flink.table.data.binary.BinaryRawValueData) field$220); } isNull$201 = agg0_count1IsNull || isNull$200; result$202 = -1L; if (!isNull$201) { result$202 = (long) (agg0_count1 + field$200); } agg0_count1 = result$202; ; agg0_count1IsNull = isNull$201; long result$207 = -1L; boolean isNull$207; if (isNull$203) { isNull$207 = agg1_sumIsNull; if (!isNull$207) { result$207 = agg1_sum; } } else { long result$206 = -1L; boolean isNull$206; if (agg1_sumIsNull) { isNull$206 = isNull$203; if (!isNull$206) { result$206 = field$203; } } else { isNull$204 = agg1_sumIsNull || isNull$203; result$205 = -1L; if (!isNull$204) { result$205 = (long) (agg1_sum + field$203); } isNull$206 = isNull$204; if (!isNull$206) { result$206 = result$205; } } isNull$207 = isNull$206; if (!isNull$207) { result$207 = result$206; } } agg1_sum = result$207; ; agg1_sumIsNull = isNull$207; long result$213 = -1L; boolean isNull$213; if (isNull$208) { isNull$213 = agg2_maxIsNull; if (!isNull$213) { result$213 = agg2_max; } } else { long result$212 = -1L; boolean isNull$212; if (agg2_maxIsNull) { isNull$212 = isNull$208; if (!isNull$212) { result$212 = field$208; } } else { isNull$209 = isNull$208 || agg2_maxIsNull; result$210 = false; if (!isNull$209) { result$210 = field$208 > agg2_max; } long result$211 = -1L; boolean isNull$211; if (result$210) { isNull$211 = isNull$208; if (!isNull$211) { result$211 = field$208; } } else { isNull$211 = agg2_maxIsNull; if (!isNull$211) { result$211 = agg2_max; } } isNull$212 = isNull$211; if (!isNull$212) { result$212 = result$211; } } isNull$213 = isNull$212; if (!isNull$213) { result$213 = result$212; } } agg2_max = result$213; ; agg2_maxIsNull = isNull$213; long result$219 = -1L; boolean isNull$219; if (isNull$214) { isNull$219 = agg3_minIsNull; if (!isNull$219) { result$219 = agg3_min; } } else { long result$218 = -1L; boolean isNull$218; if (agg3_minIsNull) { isNull$218 = isNull$214; if (!isNull$218) { result$218 = field$214; } } else { isNull$215 = isNull$214 || agg3_minIsNull; result$216 = false; if (!isNull$215) { result$216 = field$214 < agg3_min; } long result$217 = -1L; boolean isNull$217; if (result$216) { isNull$217 = isNull$214; if (!isNull$217) { result$217 = field$214; } } else { isNull$217 = agg3_minIsNull; if (!isNull$217) { result$217 = agg3_min; } } isNull$218 = isNull$217; if (!isNull$218) { result$218 = result$217; } } isNull$219 = isNull$218; if (!isNull$219) { result$219 = result$218; } } agg3_min = result$219; ; agg3_minIsNull = isNull$219; Iterable otherEntries$229 = (Iterable) otherMapView$221.entries(); if (otherEntries$229 != null) { for (java.util.Map.Entry entry : otherEntries$229) { Long distinctKey$223 = (Long) entry.getKey(); long field$224 = -1L; boolean isNull$225 = true; if (distinctKey$223 != null) { isNull$225 = false; field$224 = (long) distinctKey$223; } Long otherValue = (Long) entry.getValue(); Long thisValue = (Long) distinct_view_0.get(distinctKey$223); if (thisValue == null) { thisValue = 0L; } boolean is_distinct_value_changed_0 = false; boolean is_distinct_value_empty_0 = false; long existed$230 = ((long) thisValue) & (1L << 0); if (existed$230 == 0) { // not existed long otherExisted = ((long) otherValue) & (1L << 0); if (otherExisted != 0) { // existed in other is_distinct_value_changed_0 = true; // do accumulate long result$228 = -1L; boolean isNull$228; if (isNull$225) { isNull$228 = agg4_countIsNull; if (!isNull$228) { result$228 = agg4_count; } } else { isNull$226 = agg4_countIsNull || false; result$227 = -1L; if (!isNull$226) { result$227 = (long) (agg4_count + ((long) 1L)); } isNull$228 = isNull$226; if (!isNull$228) { result$228 = result$227; } } agg4_count = result$228; ; agg4_countIsNull = isNull$228; } } thisValue = ((long) thisValue) | ((long) otherValue); is_distinct_value_empty_0 = false; if (is_distinct_value_empty_0) { distinct_view_0.remove(distinctKey$223); } else if (is_distinct_value_changed_0) { // value is not empty and is changed, do update distinct_view_0.put(distinctKey$223, thisValue); } } // end foreach } // end otherEntries != null } @Override public void setAccumulators(Long ns, org.apache.flink.table.data.RowData acc) throws Exception { namespace = (Long) ns; long field$170; boolean isNull$170; long field$171; boolean isNull$171; long field$172; boolean isNull$172; long field$173; boolean isNull$173; long field$174; boolean isNull$174; org.apache.flink.table.data.binary.BinaryRawValueData field$175; boolean isNull$175; isNull$174 = acc.isNullAt(4); field$174 = -1L; if (!isNull$174) { field$174 = acc.getLong(4); } isNull$170 = acc.isNullAt(0); field$170 = -1L; if (!isNull$170) { field$170 = acc.getLong(0); } isNull$171 = acc.isNullAt(1); field$171 = -1L; if (!isNull$171) { field$171 = acc.getLong(1); } isNull$173 = acc.isNullAt(3); field$173 = -1L; if (!isNull$173) { field$173 = acc.getLong(3); } // when namespace is null, the dataview is used in heap, no key and namespace set if (namespace != null) { distinctAcc_0_dataview.setCurrentNamespace(namespace); distinct_view_0 = distinctAcc_0_dataview; } else { isNull$175 = acc.isNullAt(5); field$175 = null; if (!isNull$175) { field$175 = ((org.apache.flink.table.data.binary.BinaryRawValueData) acc.getRawValue(5)); } distinct_view_0 = (org.apache.flink.table.api.dataview.MapView) field$175.getJavaObject(); } isNull$172 = acc.isNullAt(2); field$172 = -1L; if (!isNull$172) { field$172 = acc.getLong(2); } agg0_count1 = field$170; ; agg0_count1IsNull = isNull$170; agg1_sum = field$171; ; agg1_sumIsNull = isNull$171; agg2_max = field$172; ; agg2_maxIsNull = isNull$172; agg3_min = field$173; ; agg3_minIsNull = isNull$173; agg4_count = field$174; ; agg4_countIsNull = isNull$174; } @Override public org.apache.flink.table.data.RowData getAccumulators() throws Exception { acc$169 = new org.apache.flink.table.data.GenericRowData(6); if (agg0_count1IsNull) { acc$169.setField(0, null); } else { acc$169.setField(0, agg0_count1); } if (agg1_sumIsNull) { acc$169.setField(1, null); } else { acc$169.setField(1, agg1_sum); } if (agg2_maxIsNull) { acc$169.setField(2, null); } else { acc$169.setField(2, agg2_max); } if (agg3_minIsNull) { acc$169.setField(3, null); } else { acc$169.setField(3, agg3_min); } if (agg4_countIsNull) { acc$169.setField(4, null); } else { acc$169.setField(4, agg4_count); } org.apache.flink.table.data.binary.BinaryRawValueData distinct_acc$168 = org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(distinct_view_0); if (false) { acc$169.setField(5, null); } else { acc$169.setField(5, distinct_acc$168); } return acc$169; } @Override public org.apache.flink.table.data.RowData createAccumulators() throws Exception { acc$167 = new org.apache.flink.table.data.GenericRowData(6); if (false) { acc$167.setField(0, null); } else { acc$167.setField(0, ((long) 0L)); } if (true) { acc$167.setField(1, null); } else { acc$167.setField(1, ((long) -1L)); } if (true) { acc$167.setField(2, null); } else { acc$167.setField(2, ((long) -1L)); } if (true) { acc$167.setField(3, null); } else { acc$167.setField(3, ((long) -1L)); } if (false) { acc$167.setField(4, null); } else { acc$167.setField(4, ((long) 0L)); } org.apache.flink.table.api.dataview.MapView mapview$166 = new org.apache.flink.table.api.dataview.MapView(); org.apache.flink.table.data.binary.BinaryRawValueData distinct_acc$166 = org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(mapview$166); if (false) { acc$167.setField(5, null); } else { acc$167.setField(5, distinct_acc$166); } return acc$167; } @Override public org.apache.flink.table.data.RowData getValue(Long ns) throws Exception { namespace = (Long) ns; aggValue$231 = new org.apache.flink.table.data.GenericRowData(7); if (agg0_count1IsNull) { aggValue$231.setField(0, null); } else { aggValue$231.setField(0, agg0_count1); } if (agg1_sumIsNull) { aggValue$231.setField(1, null); } else { aggValue$231.setField(1, agg1_sum); } if (agg2_maxIsNull) { aggValue$231.setField(2, null); } else { aggValue$231.setField(2, agg2_max); } if (agg3_minIsNull) { aggValue$231.setField(3, null); } else { aggValue$231.setField(3, agg3_min); } if (agg4_countIsNull) { aggValue$231.setField(4, null); } else { aggValue$231.setField(4, agg4_count); } if (false) { aggValue$231.setField(5, null); } else { aggValue$231.setField(5, org.apache.flink.table.data.TimestampData .fromEpochMillis(sliceAssigner$163.getWindowStart(namespace))); } if (false) { aggValue$231.setField(6, null); } else { aggValue$231.setField(6, org.apache.flink.table.data.TimestampData.fromEpochMillis(namespace)); } return aggValue$231; } @Override public void cleanup(Long ns) throws Exception { namespace = (Long) ns; distinctAcc_0_dataview.setCurrentNamespace(namespace); distinctAcc_0_dataview.clear(); } @Override public void close() throws Exception { } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_04_window_agg/_01_tumble_window/global_agg/LocalWindowAggsHandler$162.java ================================================ package flink.examples.sql._07.query._04_window_agg._01_tumble_window.global_agg; public final class LocalWindowAggsHandler$162 implements org.apache.flink.table.runtime.generated.NamespaceAggsHandleFunction { private transient org.apache.flink.table.runtime.operators.window.slicing.SliceAssigners.SlicedUnsharedSliceAssigner sliceAssigner$95; long agg0_count1; boolean agg0_count1IsNull; long agg1_sum; boolean agg1_sumIsNull; long agg2_max; boolean agg2_maxIsNull; long agg3_min; boolean agg3_minIsNull; long agg4_count; boolean agg4_countIsNull; private org.apache.flink.table.api.dataview.MapView distinct_view_0; org.apache.flink.table.data.GenericRowData acc$97 = new org.apache.flink.table.data.GenericRowData(6); org.apache.flink.table.data.GenericRowData acc$99 = new org.apache.flink.table.data.GenericRowData(6); private org.apache.flink.table.api.dataview.MapView otherMapView$151; private transient org.apache.flink.table.data.conversion.RawObjectConverter converter$152; org.apache.flink.table.data.GenericRowData aggValue$161 = new org.apache.flink.table.data.GenericRowData(7); private org.apache.flink.table.runtime.dataview.StateDataViewStore store; private Long namespace; public LocalWindowAggsHandler$162(Object[] references) throws Exception { sliceAssigner$95 = (((org.apache.flink.table.runtime.operators.window.slicing.SliceAssigners.SlicedUnsharedSliceAssigner) references[0])); converter$152 = (((org.apache.flink.table.data.conversion.RawObjectConverter) references[1])); } private org.apache.flink.api.common.functions.RuntimeContext getRuntimeContext() { return store.getRuntimeContext(); } @Override public void open(org.apache.flink.table.runtime.dataview.StateDataViewStore store) throws Exception { this.store = store; converter$152.open(getRuntimeContext().getUserCodeClassLoader()); } @Override public void accumulate(org.apache.flink.table.data.RowData accInput) throws Exception { boolean isNull$106; long result$107; long field$108; boolean isNull$108; boolean isNull$109; long result$110; boolean isNull$113; boolean result$114; boolean isNull$118; boolean result$119; long field$123; boolean isNull$123; boolean isNull$125; long result$126; isNull$108 = accInput.isNullAt(2); field$108 = -1L; if (!isNull$108) { field$108 = accInput.getLong(2); } isNull$123 = accInput.isNullAt(3); field$123 = -1L; if (!isNull$123) { field$123 = accInput.getLong(3); } isNull$106 = agg0_count1IsNull || false; result$107 = -1L; if (!isNull$106) { result$107 = (long) (agg0_count1 + ((long) 1L)); } agg0_count1 = result$107; ; agg0_count1IsNull = isNull$106; long result$112 = -1L; boolean isNull$112; if (isNull$108) { isNull$112 = agg1_sumIsNull; if (!isNull$112) { result$112 = agg1_sum; } } else { long result$111 = -1L; boolean isNull$111; if (agg1_sumIsNull) { isNull$111 = isNull$108; if (!isNull$111) { result$111 = field$108; } } else { isNull$109 = agg1_sumIsNull || isNull$108; result$110 = -1L; if (!isNull$109) { result$110 = (long) (agg1_sum + field$108); } isNull$111 = isNull$109; if (!isNull$111) { result$111 = result$110; } } isNull$112 = isNull$111; if (!isNull$112) { result$112 = result$111; } } agg1_sum = result$112; ; agg1_sumIsNull = isNull$112; long result$117 = -1L; boolean isNull$117; if (isNull$108) { isNull$117 = agg2_maxIsNull; if (!isNull$117) { result$117 = agg2_max; } } else { long result$116 = -1L; boolean isNull$116; if (agg2_maxIsNull) { isNull$116 = isNull$108; if (!isNull$116) { result$116 = field$108; } } else { isNull$113 = isNull$108 || agg2_maxIsNull; result$114 = false; if (!isNull$113) { result$114 = field$108 > agg2_max; } long result$115 = -1L; boolean isNull$115; if (result$114) { isNull$115 = isNull$108; if (!isNull$115) { result$115 = field$108; } } else { isNull$115 = agg2_maxIsNull; if (!isNull$115) { result$115 = agg2_max; } } isNull$116 = isNull$115; if (!isNull$116) { result$116 = result$115; } } isNull$117 = isNull$116; if (!isNull$117) { result$117 = result$116; } } agg2_max = result$117; ; agg2_maxIsNull = isNull$117; long result$122 = -1L; boolean isNull$122; if (isNull$108) { isNull$122 = agg3_minIsNull; if (!isNull$122) { result$122 = agg3_min; } } else { long result$121 = -1L; boolean isNull$121; if (agg3_minIsNull) { isNull$121 = isNull$108; if (!isNull$121) { result$121 = field$108; } } else { isNull$118 = isNull$108 || agg3_minIsNull; result$119 = false; if (!isNull$118) { result$119 = field$108 < agg3_min; } long result$120 = -1L; boolean isNull$120; if (result$119) { isNull$120 = isNull$108; if (!isNull$120) { result$120 = field$108; } } else { isNull$120 = agg3_minIsNull; if (!isNull$120) { result$120 = agg3_min; } } isNull$121 = isNull$120; if (!isNull$121) { result$121 = result$120; } } isNull$122 = isNull$121; if (!isNull$122) { result$122 = result$121; } } agg3_min = result$122; ; agg3_minIsNull = isNull$122; Long distinctKey$124 = (Long) field$123; if (isNull$123) { distinctKey$124 = null; } Long value$128 = (Long) distinct_view_0.get(distinctKey$124); if (value$128 == null) { value$128 = 0L; } boolean is_distinct_value_changed_0 = false; long existed$129 = ((long) value$128) & (1L << 0); if (existed$129 == 0) { // not existed value$128 = ((long) value$128) | (1L << 0); is_distinct_value_changed_0 = true; long result$127 = -1L; boolean isNull$127; if (isNull$123) { isNull$127 = agg4_countIsNull; if (!isNull$127) { result$127 = agg4_count; } } else { isNull$125 = agg4_countIsNull || false; result$126 = -1L; if (!isNull$125) { result$126 = (long) (agg4_count + ((long) 1L)); } isNull$127 = isNull$125; if (!isNull$127) { result$127 = result$126; } } agg4_count = result$127; ; agg4_countIsNull = isNull$127; } if (is_distinct_value_changed_0) { distinct_view_0.put(distinctKey$124, value$128); } } @Override public void retract(org.apache.flink.table.data.RowData retractInput) throws Exception { throw new RuntimeException( "This function not require retract method, but the retract method is called."); } @Override public void merge(Long ns, org.apache.flink.table.data.RowData otherAcc) throws Exception { namespace = (Long) ns; long field$130; boolean isNull$130; boolean isNull$131; long result$132; long field$133; boolean isNull$133; boolean isNull$134; long result$135; long field$138; boolean isNull$138; boolean isNull$139; boolean result$140; long field$144; boolean isNull$144; boolean isNull$145; boolean result$146; org.apache.flink.table.data.binary.BinaryRawValueData field$150; boolean isNull$150; boolean isNull$156; long result$157; isNull$130 = otherAcc.isNullAt(2); field$130 = -1L; if (!isNull$130) { field$130 = otherAcc.getLong(2); } isNull$133 = otherAcc.isNullAt(3); field$133 = -1L; if (!isNull$133) { field$133 = otherAcc.getLong(3); } isNull$150 = otherAcc.isNullAt(7); field$150 = null; if (!isNull$150) { field$150 = ((org.apache.flink.table.data.binary.BinaryRawValueData) otherAcc.getRawValue(7)); } otherMapView$151 = null; if (!isNull$150) { otherMapView$151 = (org.apache.flink.table.api.dataview.MapView) converter$152 .toExternal((org.apache.flink.table.data.binary.BinaryRawValueData) field$150); } isNull$144 = otherAcc.isNullAt(5); field$144 = -1L; if (!isNull$144) { field$144 = otherAcc.getLong(5); } isNull$138 = otherAcc.isNullAt(4); field$138 = -1L; if (!isNull$138) { field$138 = otherAcc.getLong(4); } isNull$131 = agg0_count1IsNull || isNull$130; result$132 = -1L; if (!isNull$131) { result$132 = (long) (agg0_count1 + field$130); } agg0_count1 = result$132; ; agg0_count1IsNull = isNull$131; long result$137 = -1L; boolean isNull$137; if (isNull$133) { isNull$137 = agg1_sumIsNull; if (!isNull$137) { result$137 = agg1_sum; } } else { long result$136 = -1L; boolean isNull$136; if (agg1_sumIsNull) { isNull$136 = isNull$133; if (!isNull$136) { result$136 = field$133; } } else { isNull$134 = agg1_sumIsNull || isNull$133; result$135 = -1L; if (!isNull$134) { result$135 = (long) (agg1_sum + field$133); } isNull$136 = isNull$134; if (!isNull$136) { result$136 = result$135; } } isNull$137 = isNull$136; if (!isNull$137) { result$137 = result$136; } } agg1_sum = result$137; ; agg1_sumIsNull = isNull$137; long result$143 = -1L; boolean isNull$143; if (isNull$138) { isNull$143 = agg2_maxIsNull; if (!isNull$143) { result$143 = agg2_max; } } else { long result$142 = -1L; boolean isNull$142; if (agg2_maxIsNull) { isNull$142 = isNull$138; if (!isNull$142) { result$142 = field$138; } } else { isNull$139 = isNull$138 || agg2_maxIsNull; result$140 = false; if (!isNull$139) { result$140 = field$138 > agg2_max; } long result$141 = -1L; boolean isNull$141; if (result$140) { isNull$141 = isNull$138; if (!isNull$141) { result$141 = field$138; } } else { isNull$141 = agg2_maxIsNull; if (!isNull$141) { result$141 = agg2_max; } } isNull$142 = isNull$141; if (!isNull$142) { result$142 = result$141; } } isNull$143 = isNull$142; if (!isNull$143) { result$143 = result$142; } } agg2_max = result$143; ; agg2_maxIsNull = isNull$143; long result$149 = -1L; boolean isNull$149; if (isNull$144) { isNull$149 = agg3_minIsNull; if (!isNull$149) { result$149 = agg3_min; } } else { long result$148 = -1L; boolean isNull$148; if (agg3_minIsNull) { isNull$148 = isNull$144; if (!isNull$148) { result$148 = field$144; } } else { isNull$145 = isNull$144 || agg3_minIsNull; result$146 = false; if (!isNull$145) { result$146 = field$144 < agg3_min; } long result$147 = -1L; boolean isNull$147; if (result$146) { isNull$147 = isNull$144; if (!isNull$147) { result$147 = field$144; } } else { isNull$147 = agg3_minIsNull; if (!isNull$147) { result$147 = agg3_min; } } isNull$148 = isNull$147; if (!isNull$148) { result$148 = result$147; } } isNull$149 = isNull$148; if (!isNull$149) { result$149 = result$148; } } agg3_min = result$149; ; agg3_minIsNull = isNull$149; Iterable otherEntries$159 = (Iterable) otherMapView$151.entries(); if (otherEntries$159 != null) { for (java.util.Map.Entry entry : otherEntries$159) { Long distinctKey$153 = (Long) entry.getKey(); long field$154 = -1L; boolean isNull$155 = true; if (distinctKey$153 != null) { isNull$155 = false; field$154 = (long) distinctKey$153; } Long otherValue = (Long) entry.getValue(); Long thisValue = (Long) distinct_view_0.get(distinctKey$153); if (thisValue == null) { thisValue = 0L; } boolean is_distinct_value_changed_0 = false; boolean is_distinct_value_empty_0 = false; long existed$160 = ((long) thisValue) & (1L << 0); if (existed$160 == 0) { // not existed long otherExisted = ((long) otherValue) & (1L << 0); if (otherExisted != 0) { // existed in other is_distinct_value_changed_0 = true; // do accumulate long result$158 = -1L; boolean isNull$158; if (isNull$155) { isNull$158 = agg4_countIsNull; if (!isNull$158) { result$158 = agg4_count; } } else { isNull$156 = agg4_countIsNull || false; result$157 = -1L; if (!isNull$156) { result$157 = (long) (agg4_count + ((long) 1L)); } isNull$158 = isNull$156; if (!isNull$158) { result$158 = result$157; } } agg4_count = result$158; ; agg4_countIsNull = isNull$158; } } thisValue = ((long) thisValue) | ((long) otherValue); is_distinct_value_empty_0 = false; if (is_distinct_value_empty_0) { distinct_view_0.remove(distinctKey$153); } else if (is_distinct_value_changed_0) { // value is not empty and is changed, do update distinct_view_0.put(distinctKey$153, thisValue); } } // end foreach } // end otherEntries != null } @Override public void setAccumulators(Long ns, org.apache.flink.table.data.RowData acc) throws Exception { namespace = (Long) ns; long field$100; boolean isNull$100; long field$101; boolean isNull$101; long field$102; boolean isNull$102; long field$103; boolean isNull$103; long field$104; boolean isNull$104; org.apache.flink.table.data.binary.BinaryRawValueData field$105; boolean isNull$105; isNull$104 = acc.isNullAt(4); field$104 = -1L; if (!isNull$104) { field$104 = acc.getLong(4); } isNull$100 = acc.isNullAt(0); field$100 = -1L; if (!isNull$100) { field$100 = acc.getLong(0); } isNull$101 = acc.isNullAt(1); field$101 = -1L; if (!isNull$101) { field$101 = acc.getLong(1); } isNull$103 = acc.isNullAt(3); field$103 = -1L; if (!isNull$103) { field$103 = acc.getLong(3); } isNull$105 = acc.isNullAt(5); field$105 = null; if (!isNull$105) { field$105 = ((org.apache.flink.table.data.binary.BinaryRawValueData) acc.getRawValue(5)); } distinct_view_0 = (org.apache.flink.table.api.dataview.MapView) field$105.getJavaObject(); isNull$102 = acc.isNullAt(2); field$102 = -1L; if (!isNull$102) { field$102 = acc.getLong(2); } agg0_count1 = field$100; ; agg0_count1IsNull = isNull$100; agg1_sum = field$101; ; agg1_sumIsNull = isNull$101; agg2_max = field$102; ; agg2_maxIsNull = isNull$102; agg3_min = field$103; ; agg3_minIsNull = isNull$103; agg4_count = field$104; ; agg4_countIsNull = isNull$104; } @Override public org.apache.flink.table.data.RowData getAccumulators() throws Exception { acc$99 = new org.apache.flink.table.data.GenericRowData(6); if (agg0_count1IsNull) { acc$99.setField(0, null); } else { acc$99.setField(0, agg0_count1); } if (agg1_sumIsNull) { acc$99.setField(1, null); } else { acc$99.setField(1, agg1_sum); } if (agg2_maxIsNull) { acc$99.setField(2, null); } else { acc$99.setField(2, agg2_max); } if (agg3_minIsNull) { acc$99.setField(3, null); } else { acc$99.setField(3, agg3_min); } if (agg4_countIsNull) { acc$99.setField(4, null); } else { acc$99.setField(4, agg4_count); } org.apache.flink.table.data.binary.BinaryRawValueData distinct_acc$98 = org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(distinct_view_0); if (false) { acc$99.setField(5, null); } else { acc$99.setField(5, distinct_acc$98); } return acc$99; } @Override public org.apache.flink.table.data.RowData createAccumulators() throws Exception { acc$97 = new org.apache.flink.table.data.GenericRowData(6); if (false) { acc$97.setField(0, null); } else { acc$97.setField(0, ((long) 0L)); } if (true) { acc$97.setField(1, null); } else { acc$97.setField(1, ((long) -1L)); } if (true) { acc$97.setField(2, null); } else { acc$97.setField(2, ((long) -1L)); } if (true) { acc$97.setField(3, null); } else { acc$97.setField(3, ((long) -1L)); } if (false) { acc$97.setField(4, null); } else { acc$97.setField(4, ((long) 0L)); } org.apache.flink.table.api.dataview.MapView mapview$96 = new org.apache.flink.table.api.dataview.MapView(); org.apache.flink.table.data.binary.BinaryRawValueData distinct_acc$96 = org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(mapview$96); if (false) { acc$97.setField(5, null); } else { acc$97.setField(5, distinct_acc$96); } return acc$97; } @Override public org.apache.flink.table.data.RowData getValue(Long ns) throws Exception { namespace = (Long) ns; aggValue$161 = new org.apache.flink.table.data.GenericRowData(7); if (agg0_count1IsNull) { aggValue$161.setField(0, null); } else { aggValue$161.setField(0, agg0_count1); } if (agg1_sumIsNull) { aggValue$161.setField(1, null); } else { aggValue$161.setField(1, agg1_sum); } if (agg2_maxIsNull) { aggValue$161.setField(2, null); } else { aggValue$161.setField(2, agg2_max); } if (agg3_minIsNull) { aggValue$161.setField(3, null); } else { aggValue$161.setField(3, agg3_min); } if (agg4_countIsNull) { aggValue$161.setField(4, null); } else { aggValue$161.setField(4, agg4_count); } if (false) { aggValue$161.setField(5, null); } else { aggValue$161.setField(5, org.apache.flink.table.data.TimestampData .fromEpochMillis(sliceAssigner$95.getWindowStart(namespace))); } if (false) { aggValue$161.setField(6, null); } else { aggValue$161.setField(6, org.apache.flink.table.data.TimestampData.fromEpochMillis(namespace)); } return aggValue$161; } @Override public void cleanup(Long ns) throws Exception { namespace = (Long) ns; } @Override public void close() throws Exception { } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_04_window_agg/_01_tumble_window/global_agg/StateWindowAggsHandler$300.java ================================================ package flink.examples.sql._07.query._04_window_agg._01_tumble_window.global_agg; public final class StateWindowAggsHandler$300 implements org.apache.flink.table.runtime.generated.NamespaceAggsHandleFunction { private transient org.apache.flink.table.runtime.operators.window.slicing.SliceAssigners.SlicedUnsharedSliceAssigner sliceAssigner$233; long agg0_count1; boolean agg0_count1IsNull; long agg1_sum; boolean agg1_sumIsNull; long agg2_max; boolean agg2_maxIsNull; long agg3_min; boolean agg3_minIsNull; long agg4_count; boolean agg4_countIsNull; private transient org.apache.flink.table.runtime.typeutils.ExternalSerializer externalSerializer$234; private transient org.apache.flink.table.runtime.typeutils.ExternalSerializer externalSerializer$235; private org.apache.flink.table.runtime.dataview.StateMapView distinctAcc_0_dataview; private org.apache.flink.table.data.binary.BinaryRawValueData distinctAcc_0_dataview_raw_value; private org.apache.flink.table.runtime.dataview.StateMapView distinctAcc_0_dataview_backup; private org.apache.flink.table.data.binary.BinaryRawValueData distinctAcc_0_dataview_backup_raw_value; private org.apache.flink.table.api.dataview.MapView distinct_view_0; private org.apache.flink.table.api.dataview.MapView distinct_backup_view_0; org.apache.flink.table.data.GenericRowData acc$237 = new org.apache.flink.table.data.GenericRowData(6); org.apache.flink.table.data.GenericRowData acc$239 = new org.apache.flink.table.data.GenericRowData(6); org.apache.flink.table.data.GenericRowData aggValue$299 = new org.apache.flink.table.data.GenericRowData(7); private org.apache.flink.table.runtime.dataview.StateDataViewStore store; private Long namespace; public StateWindowAggsHandler$300(Object[] references) throws Exception { sliceAssigner$233 = (((org.apache.flink.table.runtime.operators.window.slicing.SliceAssigners.SlicedUnsharedSliceAssigner) references[0])); externalSerializer$234 = (((org.apache.flink.table.runtime.typeutils.ExternalSerializer) references[1])); externalSerializer$235 = (((org.apache.flink.table.runtime.typeutils.ExternalSerializer) references[2])); } private org.apache.flink.api.common.functions.RuntimeContext getRuntimeContext() { return store.getRuntimeContext(); } @Override public void open(org.apache.flink.table.runtime.dataview.StateDataViewStore store) throws Exception { this.store = store; distinctAcc_0_dataview = (org.apache.flink.table.runtime.dataview.StateMapView) store .getStateMapView("distinctAcc_0", true, externalSerializer$234, externalSerializer$235); distinctAcc_0_dataview_raw_value = org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(distinctAcc_0_dataview); distinctAcc_0_dataview_backup = (org.apache.flink.table.runtime.dataview.StateMapView) store .getStateMapView("distinctAcc_0", true, externalSerializer$234, externalSerializer$235); distinctAcc_0_dataview_backup_raw_value = org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(distinctAcc_0_dataview_backup); distinct_view_0 = distinctAcc_0_dataview; distinct_backup_view_0 = distinctAcc_0_dataview_backup; } @Override public void accumulate(org.apache.flink.table.data.RowData accInput) throws Exception { boolean isNull$246; long result$247; long field$248; boolean isNull$248; boolean isNull$249; long result$250; boolean isNull$253; boolean result$254; boolean isNull$258; boolean result$259; long field$263; boolean isNull$263; boolean isNull$265; long result$266; isNull$248 = accInput.isNullAt(2); field$248 = -1L; if (!isNull$248) { field$248 = accInput.getLong(2); } isNull$263 = accInput.isNullAt(3); field$263 = -1L; if (!isNull$263) { field$263 = accInput.getLong(3); } isNull$246 = agg0_count1IsNull || false; result$247 = -1L; if (!isNull$246) { result$247 = (long) (agg0_count1 + ((long) 1L)); } agg0_count1 = result$247; ; agg0_count1IsNull = isNull$246; long result$252 = -1L; boolean isNull$252; if (isNull$248) { isNull$252 = agg1_sumIsNull; if (!isNull$252) { result$252 = agg1_sum; } } else { long result$251 = -1L; boolean isNull$251; if (agg1_sumIsNull) { isNull$251 = isNull$248; if (!isNull$251) { result$251 = field$248; } } else { isNull$249 = agg1_sumIsNull || isNull$248; result$250 = -1L; if (!isNull$249) { result$250 = (long) (agg1_sum + field$248); } isNull$251 = isNull$249; if (!isNull$251) { result$251 = result$250; } } isNull$252 = isNull$251; if (!isNull$252) { result$252 = result$251; } } agg1_sum = result$252; ; agg1_sumIsNull = isNull$252; long result$257 = -1L; boolean isNull$257; if (isNull$248) { isNull$257 = agg2_maxIsNull; if (!isNull$257) { result$257 = agg2_max; } } else { long result$256 = -1L; boolean isNull$256; if (agg2_maxIsNull) { isNull$256 = isNull$248; if (!isNull$256) { result$256 = field$248; } } else { isNull$253 = isNull$248 || agg2_maxIsNull; result$254 = false; if (!isNull$253) { result$254 = field$248 > agg2_max; } long result$255 = -1L; boolean isNull$255; if (result$254) { isNull$255 = isNull$248; if (!isNull$255) { result$255 = field$248; } } else { isNull$255 = agg2_maxIsNull; if (!isNull$255) { result$255 = agg2_max; } } isNull$256 = isNull$255; if (!isNull$256) { result$256 = result$255; } } isNull$257 = isNull$256; if (!isNull$257) { result$257 = result$256; } } agg2_max = result$257; ; agg2_maxIsNull = isNull$257; long result$262 = -1L; boolean isNull$262; if (isNull$248) { isNull$262 = agg3_minIsNull; if (!isNull$262) { result$262 = agg3_min; } } else { long result$261 = -1L; boolean isNull$261; if (agg3_minIsNull) { isNull$261 = isNull$248; if (!isNull$261) { result$261 = field$248; } } else { isNull$258 = isNull$248 || agg3_minIsNull; result$259 = false; if (!isNull$258) { result$259 = field$248 < agg3_min; } long result$260 = -1L; boolean isNull$260; if (result$259) { isNull$260 = isNull$248; if (!isNull$260) { result$260 = field$248; } } else { isNull$260 = agg3_minIsNull; if (!isNull$260) { result$260 = agg3_min; } } isNull$261 = isNull$260; if (!isNull$261) { result$261 = result$260; } } isNull$262 = isNull$261; if (!isNull$262) { result$262 = result$261; } } agg3_min = result$262; ; agg3_minIsNull = isNull$262; Long distinctKey$264 = (Long) field$263; if (isNull$263) { distinctKey$264 = null; } Long value$268 = (Long) distinct_view_0.get(distinctKey$264); if (value$268 == null) { value$268 = 0L; } boolean is_distinct_value_changed_0 = false; long existed$269 = ((long) value$268) & (1L << 0); if (existed$269 == 0) { // not existed value$268 = ((long) value$268) | (1L << 0); is_distinct_value_changed_0 = true; long result$267 = -1L; boolean isNull$267; if (isNull$263) { isNull$267 = agg4_countIsNull; if (!isNull$267) { result$267 = agg4_count; } } else { isNull$265 = agg4_countIsNull || false; result$266 = -1L; if (!isNull$265) { result$266 = (long) (agg4_count + ((long) 1L)); } isNull$267 = isNull$265; if (!isNull$267) { result$267 = result$266; } } agg4_count = result$267; ; agg4_countIsNull = isNull$267; } if (is_distinct_value_changed_0) { distinct_view_0.put(distinctKey$264, value$268); } } @Override public void retract(org.apache.flink.table.data.RowData retractInput) throws Exception { throw new RuntimeException( "This function not require retract method, but the retract method is called."); } @Override public void merge(Long ns, org.apache.flink.table.data.RowData otherAcc) throws Exception { namespace = (Long) ns; long field$270; boolean isNull$270; boolean isNull$271; long result$272; long field$273; boolean isNull$273; boolean isNull$274; long result$275; long field$278; boolean isNull$278; boolean isNull$279; boolean result$280; long field$284; boolean isNull$284; boolean isNull$285; boolean result$286; org.apache.flink.table.data.binary.BinaryRawValueData field$290; boolean isNull$290; boolean isNull$294; long result$295; isNull$278 = otherAcc.isNullAt(2); field$278 = -1L; if (!isNull$278) { field$278 = otherAcc.getLong(2); } isNull$273 = otherAcc.isNullAt(1); field$273 = -1L; if (!isNull$273) { field$273 = otherAcc.getLong(1); } isNull$270 = otherAcc.isNullAt(0); field$270 = -1L; if (!isNull$270) { field$270 = otherAcc.getLong(0); } isNull$284 = otherAcc.isNullAt(3); field$284 = -1L; if (!isNull$284) { field$284 = otherAcc.getLong(3); } // when namespace is null, the dataview is used in heap, no key and namespace set if (namespace != null) { distinctAcc_0_dataview_backup.setCurrentNamespace(namespace); distinct_backup_view_0 = distinctAcc_0_dataview_backup; } else { isNull$290 = otherAcc.isNullAt(5); field$290 = null; if (!isNull$290) { field$290 = ((org.apache.flink.table.data.binary.BinaryRawValueData) otherAcc.getRawValue(5)); } distinct_backup_view_0 = (org.apache.flink.table.api.dataview.MapView) field$290.getJavaObject(); } isNull$271 = agg0_count1IsNull || isNull$270; result$272 = -1L; if (!isNull$271) { result$272 = (long) (agg0_count1 + field$270); } agg0_count1 = result$272; ; agg0_count1IsNull = isNull$271; long result$277 = -1L; boolean isNull$277; if (isNull$273) { isNull$277 = agg1_sumIsNull; if (!isNull$277) { result$277 = agg1_sum; } } else { long result$276 = -1L; boolean isNull$276; if (agg1_sumIsNull) { isNull$276 = isNull$273; if (!isNull$276) { result$276 = field$273; } } else { isNull$274 = agg1_sumIsNull || isNull$273; result$275 = -1L; if (!isNull$274) { result$275 = (long) (agg1_sum + field$273); } isNull$276 = isNull$274; if (!isNull$276) { result$276 = result$275; } } isNull$277 = isNull$276; if (!isNull$277) { result$277 = result$276; } } agg1_sum = result$277; ; agg1_sumIsNull = isNull$277; long result$283 = -1L; boolean isNull$283; if (isNull$278) { isNull$283 = agg2_maxIsNull; if (!isNull$283) { result$283 = agg2_max; } } else { long result$282 = -1L; boolean isNull$282; if (agg2_maxIsNull) { isNull$282 = isNull$278; if (!isNull$282) { result$282 = field$278; } } else { isNull$279 = isNull$278 || agg2_maxIsNull; result$280 = false; if (!isNull$279) { result$280 = field$278 > agg2_max; } long result$281 = -1L; boolean isNull$281; if (result$280) { isNull$281 = isNull$278; if (!isNull$281) { result$281 = field$278; } } else { isNull$281 = agg2_maxIsNull; if (!isNull$281) { result$281 = agg2_max; } } isNull$282 = isNull$281; if (!isNull$282) { result$282 = result$281; } } isNull$283 = isNull$282; if (!isNull$283) { result$283 = result$282; } } agg2_max = result$283; ; agg2_maxIsNull = isNull$283; long result$289 = -1L; boolean isNull$289; if (isNull$284) { isNull$289 = agg3_minIsNull; if (!isNull$289) { result$289 = agg3_min; } } else { long result$288 = -1L; boolean isNull$288; if (agg3_minIsNull) { isNull$288 = isNull$284; if (!isNull$288) { result$288 = field$284; } } else { isNull$285 = isNull$284 || agg3_minIsNull; result$286 = false; if (!isNull$285) { result$286 = field$284 < agg3_min; } long result$287 = -1L; boolean isNull$287; if (result$286) { isNull$287 = isNull$284; if (!isNull$287) { result$287 = field$284; } } else { isNull$287 = agg3_minIsNull; if (!isNull$287) { result$287 = agg3_min; } } isNull$288 = isNull$287; if (!isNull$288) { result$288 = result$287; } } isNull$289 = isNull$288; if (!isNull$289) { result$289 = result$288; } } agg3_min = result$289; ; agg3_minIsNull = isNull$289; Iterable otherEntries$297 = (Iterable) distinct_backup_view_0.entries(); if (otherEntries$297 != null) { for (java.util.Map.Entry entry : otherEntries$297) { Long distinctKey$291 = (Long) entry.getKey(); long field$292 = -1L; boolean isNull$293 = true; if (distinctKey$291 != null) { isNull$293 = false; field$292 = (long) distinctKey$291; } Long otherValue = (Long) entry.getValue(); Long thisValue = (Long) distinct_view_0.get(distinctKey$291); if (thisValue == null) { thisValue = 0L; } boolean is_distinct_value_changed_0 = false; boolean is_distinct_value_empty_0 = false; long existed$298 = ((long) thisValue) & (1L << 0); if (existed$298 == 0) { // not existed long otherExisted = ((long) otherValue) & (1L << 0); if (otherExisted != 0) { // existed in other is_distinct_value_changed_0 = true; // do accumulate long result$296 = -1L; boolean isNull$296; if (isNull$293) { isNull$296 = agg4_countIsNull; if (!isNull$296) { result$296 = agg4_count; } } else { isNull$294 = agg4_countIsNull || false; result$295 = -1L; if (!isNull$294) { result$295 = (long) (agg4_count + ((long) 1L)); } isNull$296 = isNull$294; if (!isNull$296) { result$296 = result$295; } } agg4_count = result$296; ; agg4_countIsNull = isNull$296; } } thisValue = ((long) thisValue) | ((long) otherValue); is_distinct_value_empty_0 = false; if (is_distinct_value_empty_0) { distinct_view_0.remove(distinctKey$291); } else if (is_distinct_value_changed_0) { // value is not empty and is changed, do update distinct_view_0.put(distinctKey$291, thisValue); } } // end foreach } // end otherEntries != null } @Override public void setAccumulators(Long ns, org.apache.flink.table.data.RowData acc) throws Exception { namespace = (Long) ns; long field$240; boolean isNull$240; long field$241; boolean isNull$241; long field$242; boolean isNull$242; long field$243; boolean isNull$243; long field$244; boolean isNull$244; org.apache.flink.table.data.binary.BinaryRawValueData field$245; boolean isNull$245; isNull$244 = acc.isNullAt(4); field$244 = -1L; if (!isNull$244) { field$244 = acc.getLong(4); } isNull$240 = acc.isNullAt(0); field$240 = -1L; if (!isNull$240) { field$240 = acc.getLong(0); } isNull$241 = acc.isNullAt(1); field$241 = -1L; if (!isNull$241) { field$241 = acc.getLong(1); } isNull$243 = acc.isNullAt(3); field$243 = -1L; if (!isNull$243) { field$243 = acc.getLong(3); } // when namespace is null, the dataview is used in heap, no key and namespace set if (namespace != null) { distinctAcc_0_dataview.setCurrentNamespace(namespace); distinct_view_0 = distinctAcc_0_dataview; } else { isNull$245 = acc.isNullAt(5); field$245 = null; if (!isNull$245) { field$245 = ((org.apache.flink.table.data.binary.BinaryRawValueData) acc.getRawValue(5)); } distinct_view_0 = (org.apache.flink.table.api.dataview.MapView) field$245.getJavaObject(); } isNull$242 = acc.isNullAt(2); field$242 = -1L; if (!isNull$242) { field$242 = acc.getLong(2); } agg0_count1 = field$240; ; agg0_count1IsNull = isNull$240; agg1_sum = field$241; ; agg1_sumIsNull = isNull$241; agg2_max = field$242; ; agg2_maxIsNull = isNull$242; agg3_min = field$243; ; agg3_minIsNull = isNull$243; agg4_count = field$244; ; agg4_countIsNull = isNull$244; } @Override public org.apache.flink.table.data.RowData getAccumulators() throws Exception { acc$239 = new org.apache.flink.table.data.GenericRowData(6); if (agg0_count1IsNull) { acc$239.setField(0, null); } else { acc$239.setField(0, agg0_count1); } if (agg1_sumIsNull) { acc$239.setField(1, null); } else { acc$239.setField(1, agg1_sum); } if (agg2_maxIsNull) { acc$239.setField(2, null); } else { acc$239.setField(2, agg2_max); } if (agg3_minIsNull) { acc$239.setField(3, null); } else { acc$239.setField(3, agg3_min); } if (agg4_countIsNull) { acc$239.setField(4, null); } else { acc$239.setField(4, agg4_count); } org.apache.flink.table.data.binary.BinaryRawValueData distinct_acc$238 = org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(distinct_view_0); if (false) { acc$239.setField(5, null); } else { acc$239.setField(5, distinct_acc$238); } return acc$239; } @Override public org.apache.flink.table.data.RowData createAccumulators() throws Exception { acc$237 = new org.apache.flink.table.data.GenericRowData(6); if (false) { acc$237.setField(0, null); } else { acc$237.setField(0, ((long) 0L)); } if (true) { acc$237.setField(1, null); } else { acc$237.setField(1, ((long) -1L)); } if (true) { acc$237.setField(2, null); } else { acc$237.setField(2, ((long) -1L)); } if (true) { acc$237.setField(3, null); } else { acc$237.setField(3, ((long) -1L)); } if (false) { acc$237.setField(4, null); } else { acc$237.setField(4, ((long) 0L)); } org.apache.flink.table.api.dataview.MapView mapview$236 = new org.apache.flink.table.api.dataview.MapView(); org.apache.flink.table.data.binary.BinaryRawValueData distinct_acc$236 = org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(mapview$236); if (false) { acc$237.setField(5, null); } else { acc$237.setField(5, distinct_acc$236); } return acc$237; } @Override public org.apache.flink.table.data.RowData getValue(Long ns) throws Exception { namespace = (Long) ns; aggValue$299 = new org.apache.flink.table.data.GenericRowData(7); if (agg0_count1IsNull) { aggValue$299.setField(0, null); } else { aggValue$299.setField(0, agg0_count1); } if (agg1_sumIsNull) { aggValue$299.setField(1, null); } else { aggValue$299.setField(1, agg1_sum); } if (agg2_maxIsNull) { aggValue$299.setField(2, null); } else { aggValue$299.setField(2, agg2_max); } if (agg3_minIsNull) { aggValue$299.setField(3, null); } else { aggValue$299.setField(3, agg3_min); } if (agg4_countIsNull) { aggValue$299.setField(4, null); } else { aggValue$299.setField(4, agg4_count); } if (false) { aggValue$299.setField(5, null); } else { aggValue$299.setField(5, org.apache.flink.table.data.TimestampData .fromEpochMillis(sliceAssigner$233.getWindowStart(namespace))); } if (false) { aggValue$299.setField(6, null); } else { aggValue$299.setField(6, org.apache.flink.table.data.TimestampData.fromEpochMillis(namespace)); } return aggValue$299; } @Override public void cleanup(Long ns) throws Exception { namespace = (Long) ns; distinctAcc_0_dataview.setCurrentNamespace(namespace); distinctAcc_0_dataview.clear(); } @Override public void close() throws Exception { } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_04_window_agg/_01_tumble_window/local_agg/KeyProjection$89.java ================================================ package flink.examples.sql._07.query._04_window_agg._01_tumble_window.local_agg; public class KeyProjection$89 implements org.apache.flink.table.runtime.generated.Projection { org.apache.flink.table.data.binary.BinaryRowData out = new org.apache.flink.table.data.binary.BinaryRowData(2); org.apache.flink.table.data.writer.BinaryRowWriter outWriter = new org.apache.flink.table.data.writer.BinaryRowWriter(out); public KeyProjection$89(Object[] references) throws Exception { } @Override public org.apache.flink.table.data.binary.BinaryRowData apply(org.apache.flink.table.data.RowData in1) { org.apache.flink.table.data.binary.BinaryStringData field$90; boolean isNull$90; int field$91; boolean isNull$91; outWriter.reset(); isNull$90 = in1.isNullAt(0); field$90 = org.apache.flink.table.data.binary.BinaryStringData.EMPTY_UTF8; if (!isNull$90) { field$90 = ((org.apache.flink.table.data.binary.BinaryStringData) in1.getString(0)); } if (isNull$90) { outWriter.setNullAt(0); } else { outWriter.writeString(0, field$90); } isNull$91 = in1.isNullAt(1); field$91 = -1; if (!isNull$91) { field$91 = in1.getInt(1); } if (isNull$91) { outWriter.setNullAt(1); } else { outWriter.writeInt(1, field$91); } outWriter.complete(); return out; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_04_window_agg/_01_tumble_window/local_agg/LocalWindowAggsHandler$88.java ================================================ package flink.examples.sql._07.query._04_window_agg._01_tumble_window.local_agg; public final class LocalWindowAggsHandler$88 implements org.apache.flink.table.runtime.generated.NamespaceAggsHandleFunction { private transient org.apache.flink.table.runtime.operators.window.slicing.SliceAssigners.TumblingSliceAssigner sliceAssigner$21; long agg0_count1; boolean agg0_count1IsNull; long agg1_sum; boolean agg1_sumIsNull; long agg2_max; boolean agg2_maxIsNull; long agg3_min; boolean agg3_minIsNull; long agg4_count; boolean agg4_countIsNull; private org.apache.flink.table.api.dataview.MapView distinct_view_0; org.apache.flink.table.data.GenericRowData acc$23 = new org.apache.flink.table.data.GenericRowData(6); org.apache.flink.table.data.GenericRowData acc$25 = new org.apache.flink.table.data.GenericRowData(6); private org.apache.flink.table.api.dataview.MapView otherMapView$77; private transient org.apache.flink.table.data.conversion.RawObjectConverter converter$78; org.apache.flink.table.data.GenericRowData aggValue$87 = new org.apache.flink.table.data.GenericRowData(5); private org.apache.flink.table.runtime.dataview.StateDataViewStore store; private Long namespace; public LocalWindowAggsHandler$88(Object[] references) throws Exception { sliceAssigner$21 = (((org.apache.flink.table.runtime.operators.window.slicing.SliceAssigners.TumblingSliceAssigner) references[0])); converter$78 = (((org.apache.flink.table.data.conversion.RawObjectConverter) references[1])); } private org.apache.flink.api.common.functions.RuntimeContext getRuntimeContext() { return store.getRuntimeContext(); } @Override public void open(org.apache.flink.table.runtime.dataview.StateDataViewStore store) throws Exception { this.store = store; converter$78.open(getRuntimeContext().getUserCodeClassLoader()); } @Override public void accumulate(org.apache.flink.table.data.RowData accInput) throws Exception { boolean isNull$32; long result$33; long field$34; boolean isNull$34; boolean isNull$35; long result$36; boolean isNull$39; boolean result$40; boolean isNull$44; boolean result$45; long field$49; boolean isNull$49; boolean isNull$51; long result$52; isNull$34 = accInput.isNullAt(2); field$34 = -1L; if (!isNull$34) { field$34 = accInput.getLong(2); } isNull$49 = accInput.isNullAt(3); field$49 = -1L; if (!isNull$49) { field$49 = accInput.getLong(3); } isNull$32 = agg0_count1IsNull || false; result$33 = -1L; if (!isNull$32) { result$33 = (long) (agg0_count1 + ((long) 1L)); } agg0_count1 = result$33; ; agg0_count1IsNull = isNull$32; long result$38 = -1L; boolean isNull$38; if (isNull$34) { isNull$38 = agg1_sumIsNull; if (!isNull$38) { result$38 = agg1_sum; } } else { long result$37 = -1L; boolean isNull$37; if (agg1_sumIsNull) { isNull$37 = isNull$34; if (!isNull$37) { result$37 = field$34; } } else { isNull$35 = agg1_sumIsNull || isNull$34; result$36 = -1L; if (!isNull$35) { result$36 = (long) (agg1_sum + field$34); } isNull$37 = isNull$35; if (!isNull$37) { result$37 = result$36; } } isNull$38 = isNull$37; if (!isNull$38) { result$38 = result$37; } } agg1_sum = result$38; ; agg1_sumIsNull = isNull$38; long result$43 = -1L; boolean isNull$43; if (isNull$34) { isNull$43 = agg2_maxIsNull; if (!isNull$43) { result$43 = agg2_max; } } else { long result$42 = -1L; boolean isNull$42; if (agg2_maxIsNull) { isNull$42 = isNull$34; if (!isNull$42) { result$42 = field$34; } } else { isNull$39 = isNull$34 || agg2_maxIsNull; result$40 = false; if (!isNull$39) { result$40 = field$34 > agg2_max; } long result$41 = -1L; boolean isNull$41; if (result$40) { isNull$41 = isNull$34; if (!isNull$41) { result$41 = field$34; } } else { isNull$41 = agg2_maxIsNull; if (!isNull$41) { result$41 = agg2_max; } } isNull$42 = isNull$41; if (!isNull$42) { result$42 = result$41; } } isNull$43 = isNull$42; if (!isNull$43) { result$43 = result$42; } } agg2_max = result$43; ; agg2_maxIsNull = isNull$43; long result$48 = -1L; boolean isNull$48; if (isNull$34) { isNull$48 = agg3_minIsNull; if (!isNull$48) { result$48 = agg3_min; } } else { long result$47 = -1L; boolean isNull$47; if (agg3_minIsNull) { isNull$47 = isNull$34; if (!isNull$47) { result$47 = field$34; } } else { isNull$44 = isNull$34 || agg3_minIsNull; result$45 = false; if (!isNull$44) { result$45 = field$34 < agg3_min; } long result$46 = -1L; boolean isNull$46; if (result$45) { isNull$46 = isNull$34; if (!isNull$46) { result$46 = field$34; } } else { isNull$46 = agg3_minIsNull; if (!isNull$46) { result$46 = agg3_min; } } isNull$47 = isNull$46; if (!isNull$47) { result$47 = result$46; } } isNull$48 = isNull$47; if (!isNull$48) { result$48 = result$47; } } agg3_min = result$48; ; agg3_minIsNull = isNull$48; Long distinctKey$50 = (Long) field$49; if (isNull$49) { distinctKey$50 = null; } Long value$54 = (Long) distinct_view_0.get(distinctKey$50); if (value$54 == null) { value$54 = 0L; } boolean is_distinct_value_changed_0 = false; long existed$55 = ((long) value$54) & (1L << 0); if (existed$55 == 0) { // not existed value$54 = ((long) value$54) | (1L << 0); is_distinct_value_changed_0 = true; long result$53 = -1L; boolean isNull$53; if (isNull$49) { isNull$53 = agg4_countIsNull; if (!isNull$53) { result$53 = agg4_count; } } else { isNull$51 = agg4_countIsNull || false; result$52 = -1L; if (!isNull$51) { result$52 = (long) (agg4_count + ((long) 1L)); } isNull$53 = isNull$51; if (!isNull$53) { result$53 = result$52; } } agg4_count = result$53; ; agg4_countIsNull = isNull$53; } if (is_distinct_value_changed_0) { distinct_view_0.put(distinctKey$50, value$54); } } @Override public void retract(org.apache.flink.table.data.RowData retractInput) throws Exception { throw new RuntimeException( "This function not require retract method, but the retract method is called."); } @Override public void merge(Long ns, org.apache.flink.table.data.RowData otherAcc) throws Exception { namespace = (Long) ns; long field$56; boolean isNull$56; boolean isNull$57; long result$58; long field$59; boolean isNull$59; boolean isNull$60; long result$61; long field$64; boolean isNull$64; boolean isNull$65; boolean result$66; long field$70; boolean isNull$70; boolean isNull$71; boolean result$72; org.apache.flink.table.data.binary.BinaryRawValueData field$76; boolean isNull$76; boolean isNull$82; long result$83; isNull$64 = otherAcc.isNullAt(2); field$64 = -1L; if (!isNull$64) { field$64 = otherAcc.getLong(2); } isNull$59 = otherAcc.isNullAt(1); field$59 = -1L; if (!isNull$59) { field$59 = otherAcc.getLong(1); } isNull$56 = otherAcc.isNullAt(0); field$56 = -1L; if (!isNull$56) { field$56 = otherAcc.getLong(0); } isNull$70 = otherAcc.isNullAt(3); field$70 = -1L; if (!isNull$70) { field$70 = otherAcc.getLong(3); } isNull$76 = otherAcc.isNullAt(5); field$76 = null; if (!isNull$76) { field$76 = ((org.apache.flink.table.data.binary.BinaryRawValueData) otherAcc.getRawValue(5)); } otherMapView$77 = null; if (!isNull$76) { otherMapView$77 = (org.apache.flink.table.api.dataview.MapView) converter$78 .toExternal((org.apache.flink.table.data.binary.BinaryRawValueData) field$76); } isNull$57 = agg0_count1IsNull || isNull$56; result$58 = -1L; if (!isNull$57) { result$58 = (long) (agg0_count1 + field$56); } agg0_count1 = result$58; ; agg0_count1IsNull = isNull$57; long result$63 = -1L; boolean isNull$63; if (isNull$59) { isNull$63 = agg1_sumIsNull; if (!isNull$63) { result$63 = agg1_sum; } } else { long result$62 = -1L; boolean isNull$62; if (agg1_sumIsNull) { isNull$62 = isNull$59; if (!isNull$62) { result$62 = field$59; } } else { isNull$60 = agg1_sumIsNull || isNull$59; result$61 = -1L; if (!isNull$60) { result$61 = (long) (agg1_sum + field$59); } isNull$62 = isNull$60; if (!isNull$62) { result$62 = result$61; } } isNull$63 = isNull$62; if (!isNull$63) { result$63 = result$62; } } agg1_sum = result$63; ; agg1_sumIsNull = isNull$63; long result$69 = -1L; boolean isNull$69; if (isNull$64) { isNull$69 = agg2_maxIsNull; if (!isNull$69) { result$69 = agg2_max; } } else { long result$68 = -1L; boolean isNull$68; if (agg2_maxIsNull) { isNull$68 = isNull$64; if (!isNull$68) { result$68 = field$64; } } else { isNull$65 = isNull$64 || agg2_maxIsNull; result$66 = false; if (!isNull$65) { result$66 = field$64 > agg2_max; } long result$67 = -1L; boolean isNull$67; if (result$66) { isNull$67 = isNull$64; if (!isNull$67) { result$67 = field$64; } } else { isNull$67 = agg2_maxIsNull; if (!isNull$67) { result$67 = agg2_max; } } isNull$68 = isNull$67; if (!isNull$68) { result$68 = result$67; } } isNull$69 = isNull$68; if (!isNull$69) { result$69 = result$68; } } agg2_max = result$69; ; agg2_maxIsNull = isNull$69; long result$75 = -1L; boolean isNull$75; if (isNull$70) { isNull$75 = agg3_minIsNull; if (!isNull$75) { result$75 = agg3_min; } } else { long result$74 = -1L; boolean isNull$74; if (agg3_minIsNull) { isNull$74 = isNull$70; if (!isNull$74) { result$74 = field$70; } } else { isNull$71 = isNull$70 || agg3_minIsNull; result$72 = false; if (!isNull$71) { result$72 = field$70 < agg3_min; } long result$73 = -1L; boolean isNull$73; if (result$72) { isNull$73 = isNull$70; if (!isNull$73) { result$73 = field$70; } } else { isNull$73 = agg3_minIsNull; if (!isNull$73) { result$73 = agg3_min; } } isNull$74 = isNull$73; if (!isNull$74) { result$74 = result$73; } } isNull$75 = isNull$74; if (!isNull$75) { result$75 = result$74; } } agg3_min = result$75; ; agg3_minIsNull = isNull$75; Iterable otherEntries$85 = (Iterable) otherMapView$77.entries(); if (otherEntries$85 != null) { for (java.util.Map.Entry entry : otherEntries$85) { Long distinctKey$79 = (Long) entry.getKey(); long field$80 = -1L; boolean isNull$81 = true; if (distinctKey$79 != null) { isNull$81 = false; field$80 = (long) distinctKey$79; } Long otherValue = (Long) entry.getValue(); Long thisValue = (Long) distinct_view_0.get(distinctKey$79); if (thisValue == null) { thisValue = 0L; } boolean is_distinct_value_changed_0 = false; boolean is_distinct_value_empty_0 = false; long existed$86 = ((long) thisValue) & (1L << 0); if (existed$86 == 0) { // not existed long otherExisted = ((long) otherValue) & (1L << 0); if (otherExisted != 0) { // existed in other is_distinct_value_changed_0 = true; // do accumulate long result$84 = -1L; boolean isNull$84; if (isNull$81) { isNull$84 = agg4_countIsNull; if (!isNull$84) { result$84 = agg4_count; } } else { isNull$82 = agg4_countIsNull || false; result$83 = -1L; if (!isNull$82) { result$83 = (long) (agg4_count + ((long) 1L)); } isNull$84 = isNull$82; if (!isNull$84) { result$84 = result$83; } } agg4_count = result$84; ; agg4_countIsNull = isNull$84; } } thisValue = ((long) thisValue) | ((long) otherValue); is_distinct_value_empty_0 = false; if (is_distinct_value_empty_0) { distinct_view_0.remove(distinctKey$79); } else if (is_distinct_value_changed_0) { // value is not empty and is changed, do update distinct_view_0.put(distinctKey$79, thisValue); } } // end foreach } // end otherEntries != null } @Override public void setAccumulators(Long ns, org.apache.flink.table.data.RowData acc) throws Exception { namespace = (Long) ns; long field$26; boolean isNull$26; long field$27; boolean isNull$27; long field$28; boolean isNull$28; long field$29; boolean isNull$29; long field$30; boolean isNull$30; org.apache.flink.table.data.binary.BinaryRawValueData field$31; boolean isNull$31; isNull$30 = acc.isNullAt(4); field$30 = -1L; if (!isNull$30) { field$30 = acc.getLong(4); } isNull$26 = acc.isNullAt(0); field$26 = -1L; if (!isNull$26) { field$26 = acc.getLong(0); } isNull$27 = acc.isNullAt(1); field$27 = -1L; if (!isNull$27) { field$27 = acc.getLong(1); } isNull$29 = acc.isNullAt(3); field$29 = -1L; if (!isNull$29) { field$29 = acc.getLong(3); } isNull$31 = acc.isNullAt(5); field$31 = null; if (!isNull$31) { field$31 = ((org.apache.flink.table.data.binary.BinaryRawValueData) acc.getRawValue(5)); } distinct_view_0 = (org.apache.flink.table.api.dataview.MapView) field$31.getJavaObject(); isNull$28 = acc.isNullAt(2); field$28 = -1L; if (!isNull$28) { field$28 = acc.getLong(2); } agg0_count1 = field$26; ; agg0_count1IsNull = isNull$26; agg1_sum = field$27; ; agg1_sumIsNull = isNull$27; agg2_max = field$28; ; agg2_maxIsNull = isNull$28; agg3_min = field$29; ; agg3_minIsNull = isNull$29; agg4_count = field$30; ; agg4_countIsNull = isNull$30; } @Override public org.apache.flink.table.data.RowData getAccumulators() throws Exception { acc$25 = new org.apache.flink.table.data.GenericRowData(6); if (agg0_count1IsNull) { acc$25.setField(0, null); } else { acc$25.setField(0, agg0_count1); } if (agg1_sumIsNull) { acc$25.setField(1, null); } else { acc$25.setField(1, agg1_sum); } if (agg2_maxIsNull) { acc$25.setField(2, null); } else { acc$25.setField(2, agg2_max); } if (agg3_minIsNull) { acc$25.setField(3, null); } else { acc$25.setField(3, agg3_min); } if (agg4_countIsNull) { acc$25.setField(4, null); } else { acc$25.setField(4, agg4_count); } org.apache.flink.table.data.binary.BinaryRawValueData distinct_acc$24 = org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(distinct_view_0); if (false) { acc$25.setField(5, null); } else { acc$25.setField(5, distinct_acc$24); } return acc$25; } @Override public org.apache.flink.table.data.RowData createAccumulators() throws Exception { acc$23 = new org.apache.flink.table.data.GenericRowData(6); if (false) { acc$23.setField(0, null); } else { acc$23.setField(0, ((long) 0L)); } if (true) { acc$23.setField(1, null); } else { acc$23.setField(1, ((long) -1L)); } if (true) { acc$23.setField(2, null); } else { acc$23.setField(2, ((long) -1L)); } if (true) { acc$23.setField(3, null); } else { acc$23.setField(3, ((long) -1L)); } if (false) { acc$23.setField(4, null); } else { acc$23.setField(4, ((long) 0L)); } org.apache.flink.table.api.dataview.MapView mapview$22 = new org.apache.flink.table.api.dataview.MapView(); org.apache.flink.table.data.binary.BinaryRawValueData distinct_acc$22 = org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(mapview$22); if (false) { acc$23.setField(5, null); } else { acc$23.setField(5, distinct_acc$22); } return acc$23; } @Override public org.apache.flink.table.data.RowData getValue(Long ns) throws Exception { namespace = (Long) ns; aggValue$87 = new org.apache.flink.table.data.GenericRowData(5); if (agg0_count1IsNull) { aggValue$87.setField(0, null); } else { aggValue$87.setField(0, agg0_count1); } if (agg1_sumIsNull) { aggValue$87.setField(1, null); } else { aggValue$87.setField(1, agg1_sum); } if (agg2_maxIsNull) { aggValue$87.setField(2, null); } else { aggValue$87.setField(2, agg2_max); } if (agg3_minIsNull) { aggValue$87.setField(3, null); } else { aggValue$87.setField(3, agg3_min); } if (agg4_countIsNull) { aggValue$87.setField(4, null); } else { aggValue$87.setField(4, agg4_count); } return aggValue$87; } @Override public void cleanup(Long ns) throws Exception { namespace = (Long) ns; } @Override public void close() throws Exception { } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_04_window_agg/_02_cumulate_window/CumulateWindowGroupingSetsBigintTest.java ================================================ package flink.examples.sql._07.query._04_window_agg._02_cumulate_window; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class CumulateWindowGroupingSetsBigintTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.env().setParallelism(1); String sql = "CREATE TABLE source_table (\n" + " age BIGINT,\n" + " sex STRING,\n" + " user_id BIGINT,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.age.min' = '1',\n" + " 'fields.age.max' = '10',\n" + " 'fields.sex.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " age STRING,\n" + " sex STRING,\n" + " uv BIGINT,\n" + " window_end bigint\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "insert into sink_table\n" + "select age,\n" + " sex,\n" + " sum(bucket_uv) as uv,\n" + " max(window_end) as window_end\n" + "from (\n" + " SELECT UNIX_TIMESTAMP(CAST(window_end AS STRING)) * 1000 as window_end, \n" + " window_start, \n" + " if (age is null, 'ALL', cast(age as string)) as age,\n" + " if (sex is null, 'ALL', sex) as sex,\n" + " count(distinct user_id) as bucket_uv\n" + " FROM TABLE(CUMULATE(\n" + " TABLE source_table\n" + " , DESCRIPTOR(row_time)\n" + " , INTERVAL '5' SECOND\n" + " , INTERVAL '1' DAY))\n" + " GROUP BY window_start, \n" + " window_end,\n" + " GROUPING SETS ((), (age), (sex), (age, sex)),\n" + " mod(user_id, 1024)\n" + ")\n" + "group by age,\n" + " sex,\n" + " window_end;"; for (String innerSql : sql.split(";")) { flinkEnv.streamTEnv().executeSql(innerSql); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_04_window_agg/_02_cumulate_window/CumulateWindowGroupingSetsTest.java ================================================ package flink.examples.sql._07.query._04_window_agg._02_cumulate_window; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class CumulateWindowGroupingSetsTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.env().setParallelism(1); String sql = "CREATE TABLE source_table (\n" + " age STRING,\n" + " sex STRING,\n" + " user_id BIGINT,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.age.length' = '1',\n" + " 'fields.sex.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " age STRING,\n" + " sex STRING,\n" + " uv BIGINT,\n" + " window_end bigint\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "insert into sink_table\n" + "select age,\n" + " sex,\n" + " sum(bucket_uv) as uv,\n" + " max(window_end) as window_end\n" + "from (\n" + " SELECT UNIX_TIMESTAMP(CAST(window_end AS STRING)) * 1000 as window_end, \n" + " window_start, \n" + " if (age is null, 'ALL', age) as age,\n" + " if (sex is null, 'ALL', sex) as sex,\n" + " count(distinct user_id) as bucket_uv\n" + " FROM TABLE(CUMULATE(\n" + " TABLE source_table\n" + " , DESCRIPTOR(row_time)\n" + " , INTERVAL '5' SECOND\n" + " , INTERVAL '1' DAY))\n" + " GROUP BY window_start, \n" + " window_end,\n" + " GROUPING SETS ((), (age), (sex), (age, sex)),\n" + " mod(user_id, 1024)\n" + ")\n" + "group by age,\n" + " sex,\n" + " window_end;"; for (String innerSql : sql.split(";")) { flinkEnv.streamTEnv().executeSql(innerSql); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_04_window_agg/_02_cumulate_window/CumulateWindowTest.java ================================================ package flink.examples.sql._07.query._04_window_agg._02_cumulate_window; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class CumulateWindowTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.env().setParallelism(1); String sql = "CREATE TABLE source_table (\n" + " dim STRING,\n" + " user_id BIGINT,\n" + " price BIGINT,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1000',\n" + " 'fields.dim.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '100000',\n" + " 'fields.price.min' = '1',\n" + " 'fields.price.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " dim STRING,\n" + " pv BIGINT,\n" + " sum_price BIGINT,\n" + " max_price BIGINT,\n" + " min_price BIGINT,\n" + " uv BIGINT,\n" + " window_end bigint\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "insert into sink_table\n" + "select dim,\n" + " sum(bucket_pv) as pv,\n" + " sum(bucket_sum_price) as sum_price,\n" + " max(bucket_max_price) as max_price,\n" + " min(bucket_min_price) as min_price,\n" + " sum(bucket_uv) as uv,\n" + " max(window_end) as window_end\n" + "from (\n" + " SELECT dim,\n" + " UNIX_TIMESTAMP(CAST(window_end AS STRING)) * 1000 as window_end, \n" + " window_start, \n" + " count(*) as bucket_pv,\n" + " sum(price) as bucket_sum_price,\n" + " max(price) as bucket_max_price,\n" + " min(price) as bucket_min_price,\n" + " count(distinct user_id) as bucket_uv\n" + " FROM TABLE(CUMULATE(\n" + " TABLE source_table\n" + " , DESCRIPTOR(row_time)\n" + " , INTERVAL '60' SECOND\n" + " , INTERVAL '1' DAY))\n" + " GROUP BY window_start, \n" + " window_end,\n" + " dim,\n" + " mod(user_id, 1024)\n" + ")\n" + "group by dim,\n" + " window_end;"; String exampleSql = "CREATE TABLE source_table (\n" + " id BIGINT,\n" + " money BIGINT,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp_LTZ(3)),\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1000',\n" + " 'fields.id.min' = '1',\n" + " 'fields.id.max' = '100000',\n" + " 'fields.money.min' = '1',\n" + " 'fields.money.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " window_end bigint,\n" + " window_start timestamp(3),\n" + " sum_money BIGINT,\n" + " count_distinct_id BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "insert into sink_table\n" + "with tmp as (\n" + " SELECT \n" + " window_time as r_time,\n" + " sum(money) as sum_money,\n" + " count(distinct id) as count_distinct_id\n" + " FROM TABLE(CUMULATE(\n" + " TABLE source_table\n" + " , DESCRIPTOR(row_time)\n" + " , INTERVAL '60' SECOND\n" + " , INTERVAL '1' DAY))\n" + " GROUP BY window_start, \n" + " window_end,\n" + " window_time,\n" + " mod(id, 1000)\n" + ")\n" + "SELECT UNIX_TIMESTAMP(CAST(window_end AS STRING)) * 1000 as window_end, \n" + " window_start, \n" + " sum(sum_money) as sum_money,\n" + " sum(count_distinct_id) as count_distinct_id\n" + "FROM TABLE(CUMULATE(\n" + " TABLE tmp\n" + " , DESCRIPTOR(r_time)\n" + " , INTERVAL '60' SECOND\n" + " , INTERVAL '1' DAY))\n" + "GROUP BY window_start, \n" + " window_end"; for (String innerSql : exampleSql.split(";")) { flinkEnv.streamTEnv().executeSql(innerSql); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_04_window_agg/_02_cumulate_window/TumbleWindowEarlyFireTest.java ================================================ package flink.examples.sql._07.query._04_window_agg._02_cumulate_window; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class TumbleWindowEarlyFireTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.getStreamTableEnvironment().getConfig().getConfiguration().setString("table.exec.emit.early-fire.enabled", "true"); flinkEnv.getStreamTableEnvironment().getConfig().getConfiguration().setString("table.exec.emit.early-fire.delay", "60 s"); String sql = "CREATE TABLE source_table (\n" + " dim BIGINT,\n" + " user_id BIGINT,\n" + " price BIGINT,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.dim.min' = '1',\n" + " 'fields.dim.max' = '2',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '100000',\n" + " 'fields.price.min' = '1',\n" + " 'fields.price.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " dim BIGINT,\n" + " pv BIGINT,\n" + " sum_price BIGINT,\n" + " max_price BIGINT,\n" + " min_price BIGINT,\n" + " uv BIGINT,\n" + " window_start bigint\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "insert into sink_table\n" + "select dim,\n" + " sum(bucket_pv) as pv,\n" + " sum(bucket_sum_price) as sum_price,\n" + " max(bucket_max_price) as max_price,\n" + " min(bucket_min_price) as min_price,\n" + " sum(bucket_uv) as uv,\n" + " max(window_start) as window_start\n" + "from (\n" + " select dim,\n" + " count(*) as bucket_pv,\n" + " sum(price) as bucket_sum_price,\n" + " max(price) as bucket_max_price,\n" + " min(price) as bucket_min_price,\n" + " count(distinct user_id) as bucket_uv,\n" + " UNIX_TIMESTAMP(CAST(tumble_start(row_time, interval '1' DAY) AS STRING)) * 1000 as window_start\n" + " from source_table\n" + " group by\n" + " mod(user_id, 1024),\n" + " dim,\n" + " tumble(row_time, interval '1' DAY)\n" + ")\n" + "group by dim,\n" + " window_start"; flinkEnv.getStreamTableEnvironment().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 WINDOW TVF TUMBLE WINDOW EARLY FIRE 案例"); Arrays.stream(sql.split(";")) .forEach(flinkEnv.getStreamTableEnvironment()::executeSql); /** * 两阶段聚合 * 本地 agg:{@link org.apache.flink.table.runtime.operators.aggregate.window.LocalSlicingWindowAggOperator} * -> {@link org.apache.flink.table.runtime.operators.aggregate.window.combines.LocalAggCombiner} * * key agg;{@link org.apache.flink.table.runtime.operators.window.slicing.SlicingWindowOperator} * -> {@link org.apache.flink.table.runtime.operators.aggregate.window.processors.SliceUnsharedWindowAggProcessor} * -> {@link org.apache.flink.table.runtime.operators.aggregate.window.combines.GlobalAggCombiner} */ } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_04_window_agg/_02_cumulate_window/cumulate/global_agg/GlobalWindowAggsHandler$232.java ================================================ //package flink.examples.sql._07.query._04_window_agg._02_cumulate_window.cumulate.global_agg; // // //public final class GlobalWindowAggsHandler$232 // implements org.apache.flink.table.runtime.generated.NamespaceAggsHandleFunction { // // private transient org.apache.flink.table.runtime.operators.window.slicing.SliceAssigners.SlicedSharedSliceAssigner // sliceAssigner$163; // long agg0_count1; // boolean agg0_count1IsNull; // long agg1_sum; // boolean agg1_sumIsNull; // long agg2_max; // boolean agg2_maxIsNull; // long agg3_min; // boolean agg3_minIsNull; // long agg4_count; // boolean agg4_countIsNull; // private transient org.apache.flink.table.runtime.typeutils.ExternalSerializer externalSerializer$164; // private transient org.apache.flink.table.runtime.typeutils.ExternalSerializer externalSerializer$165; // private org.apache.flink.table.runtime.dataview.StateMapView distinctAcc_0_dataview; // private org.apache.flink.table.data.binary.BinaryRawValueData distinctAcc_0_dataview_raw_value; // private org.apache.flink.table.api.dataview.MapView distinct_view_0; // org.apache.flink.table.data.GenericRowData acc$167 = new org.apache.flink.table.data.GenericRowData(6); // org.apache.flink.table.data.GenericRowData acc$169 = new org.apache.flink.table.data.GenericRowData(6); // private org.apache.flink.table.api.dataview.MapView otherMapView$221; // private transient org.apache.flink.table.data.conversion.RawObjectConverter converter$222; // org.apache.flink.table.data.GenericRowData aggValue$231 = new org.apache.flink.table.data.GenericRowData(7); // // private org.apache.flink.table.runtime.dataview.StateDataViewStore store; // // private java.lang.Long namespace; // // public GlobalWindowAggsHandler$232(Object[] references) throws Exception { // sliceAssigner$163 = // (((org.apache.flink.table.runtime.operators.window.slicing.SliceAssigners.SlicedSharedSliceAssigner) references[0])); // externalSerializer$164 = (((org.apache.flink.table.runtime.typeutils.ExternalSerializer) references[1])); // externalSerializer$165 = (((org.apache.flink.table.runtime.typeutils.ExternalSerializer) references[2])); // converter$222 = (((org.apache.flink.table.data.conversion.RawObjectConverter) references[3])); // } // // private org.apache.flink.api.common.functions.RuntimeContext getRuntimeContext() { // return store.getRuntimeContext(); // } // // @Override // public void open(org.apache.flink.table.runtime.dataview.StateDataViewStore store) throws Exception { // this.store = store; // // distinctAcc_0_dataview = (org.apache.flink.table.runtime.dataview.StateMapView) store // .getStateMapView("distinctAcc_0", true, externalSerializer$164, externalSerializer$165); // distinctAcc_0_dataview_raw_value = // org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(distinctAcc_0_dataview); // // distinct_view_0 = distinctAcc_0_dataview; // // converter$222.open(getRuntimeContext().getUserCodeClassLoader()); // // } // // @Override // public void accumulate(org.apache.flink.table.data.RowData accInput) throws Exception { // // boolean isNull$176; // long result$177; // long field$178; // boolean isNull$178; // boolean isNull$179; // long result$180; // boolean isNull$183; // boolean result$184; // boolean isNull$188; // boolean result$189; // long field$193; // boolean isNull$193; // boolean isNull$195; // long result$196; // isNull$178 = accInput.isNullAt(2); // field$178 = -1L; // if (!isNull$178) { // field$178 = accInput.getLong(2); // } // isNull$193 = accInput.isNullAt(3); // field$193 = -1L; // if (!isNull$193) { // field$193 = accInput.getLong(3); // } // // // isNull$176 = agg0_count1IsNull || false; // result$177 = -1L; // if (!isNull$176) { // // result$177 = (long) (agg0_count1 + ((long) 1L)); // // } // // agg0_count1 = result$177; // ; // agg0_count1IsNull = isNull$176; // // // long result$182 = -1L; // boolean isNull$182; // if (isNull$178) { // // isNull$182 = agg1_sumIsNull; // if (!isNull$182) { // result$182 = agg1_sum; // } // } else { // long result$181 = -1L; // boolean isNull$181; // if (agg1_sumIsNull) { // // isNull$181 = isNull$178; // if (!isNull$181) { // result$181 = field$178; // } // } else { // // // isNull$179 = agg1_sumIsNull || isNull$178; // result$180 = -1L; // if (!isNull$179) { // // result$180 = (long) (agg1_sum + field$178); // // } // // isNull$181 = isNull$179; // if (!isNull$181) { // result$181 = result$180; // } // } // isNull$182 = isNull$181; // if (!isNull$182) { // result$182 = result$181; // } // } // agg1_sum = result$182; // ; // agg1_sumIsNull = isNull$182; // // // long result$187 = -1L; // boolean isNull$187; // if (isNull$178) { // // isNull$187 = agg2_maxIsNull; // if (!isNull$187) { // result$187 = agg2_max; // } // } else { // long result$186 = -1L; // boolean isNull$186; // if (agg2_maxIsNull) { // // isNull$186 = isNull$178; // if (!isNull$186) { // result$186 = field$178; // } // } else { // isNull$183 = isNull$178 || agg2_maxIsNull; // result$184 = false; // if (!isNull$183) { // // result$184 = field$178 > agg2_max; // // } // // long result$185 = -1L; // boolean isNull$185; // if (result$184) { // // isNull$185 = isNull$178; // if (!isNull$185) { // result$185 = field$178; // } // } else { // // isNull$185 = agg2_maxIsNull; // if (!isNull$185) { // result$185 = agg2_max; // } // } // isNull$186 = isNull$185; // if (!isNull$186) { // result$186 = result$185; // } // } // isNull$187 = isNull$186; // if (!isNull$187) { // result$187 = result$186; // } // } // agg2_max = result$187; // ; // agg2_maxIsNull = isNull$187; // // // long result$192 = -1L; // boolean isNull$192; // if (isNull$178) { // // isNull$192 = agg3_minIsNull; // if (!isNull$192) { // result$192 = agg3_min; // } // } else { // long result$191 = -1L; // boolean isNull$191; // if (agg3_minIsNull) { // // isNull$191 = isNull$178; // if (!isNull$191) { // result$191 = field$178; // } // } else { // isNull$188 = isNull$178 || agg3_minIsNull; // result$189 = false; // if (!isNull$188) { // // result$189 = field$178 < agg3_min; // // } // // long result$190 = -1L; // boolean isNull$190; // if (result$189) { // // isNull$190 = isNull$178; // if (!isNull$190) { // result$190 = field$178; // } // } else { // // isNull$190 = agg3_minIsNull; // if (!isNull$190) { // result$190 = agg3_min; // } // } // isNull$191 = isNull$190; // if (!isNull$191) { // result$191 = result$190; // } // } // isNull$192 = isNull$191; // if (!isNull$192) { // result$192 = result$191; // } // } // agg3_min = result$192; // ; // agg3_minIsNull = isNull$192; // // // java.lang.Long distinctKey$194 = (java.lang.Long) field$193; // if (isNull$193) { // distinctKey$194 = null; // } // // java.lang.Long value$198 = (java.lang.Long) distinct_view_0.get(distinctKey$194); // if (value$198 == null) { // value$198 = 0L; // } // // boolean is_distinct_value_changed_0 = false; // // long existed$199 = ((long) value$198) & (1L << 0); // if (existed$199 == 0) { // not existed // value$198 = ((long) value$198) | (1L << 0); // is_distinct_value_changed_0 = true; // // long result$197 = -1L; // boolean isNull$197; // if (isNull$193) { // // isNull$197 = agg4_countIsNull; // if (!isNull$197) { // result$197 = agg4_count; // } // } else { // // // isNull$195 = agg4_countIsNull || false; // result$196 = -1L; // if (!isNull$195) { // // result$196 = (long) (agg4_count + ((long) 1L)); // // } // // isNull$197 = isNull$195; // if (!isNull$197) { // result$197 = result$196; // } // } // agg4_count = result$197; // ; // agg4_countIsNull = isNull$197; // // } // // if (is_distinct_value_changed_0) { // distinct_view_0.put(distinctKey$194, value$198); // } // // // } // // @Override // public void retract(org.apache.flink.table.data.RowData retractInput) throws Exception { // // throw new java.lang.RuntimeException( // "This function not require retract method, but the retract method is called."); // // } // // @Override // public void merge(Object ns, org.apache.flink.table.data.RowData otherAcc) throws Exception { // namespace = (java.lang.Long) ns; // // long field$200; // boolean isNull$200; // boolean isNull$201; // long result$202; // long field$203; // boolean isNull$203; // boolean isNull$204; // long result$205; // long field$208; // boolean isNull$208; // boolean isNull$209; // boolean result$210; // long field$214; // boolean isNull$214; // boolean isNull$215; // boolean result$216; // org.apache.flink.table.data.binary.BinaryRawValueData field$220; // boolean isNull$220; // boolean isNull$226; // long result$227; // isNull$208 = otherAcc.isNullAt(2); // field$208 = -1L; // if (!isNull$208) { // field$208 = otherAcc.getLong(2); // } // isNull$203 = otherAcc.isNullAt(1); // field$203 = -1L; // if (!isNull$203) { // field$203 = otherAcc.getLong(1); // } // isNull$200 = otherAcc.isNullAt(0); // field$200 = -1L; // if (!isNull$200) { // field$200 = otherAcc.getLong(0); // } // isNull$214 = otherAcc.isNullAt(3); // field$214 = -1L; // if (!isNull$214) { // field$214 = otherAcc.getLong(3); // } // // isNull$220 = otherAcc.isNullAt(5); // field$220 = null; // if (!isNull$220) { // field$220 = ((org.apache.flink.table.data.binary.BinaryRawValueData) otherAcc.getRawValue(5)); // } // otherMapView$221 = null; // if (!isNull$220) { // otherMapView$221 = // (org.apache.flink.table.api.dataview.MapView) converter$222 // .toExternal((org.apache.flink.table.data.binary.BinaryRawValueData) field$220); // } // // // isNull$201 = agg0_count1IsNull || isNull$200; // result$202 = -1L; // if (!isNull$201) { // // result$202 = (long) (agg0_count1 + field$200); // // } // // agg0_count1 = result$202; // ; // agg0_count1IsNull = isNull$201; // // // long result$207 = -1L; // boolean isNull$207; // if (isNull$203) { // // isNull$207 = agg1_sumIsNull; // if (!isNull$207) { // result$207 = agg1_sum; // } // } else { // long result$206 = -1L; // boolean isNull$206; // if (agg1_sumIsNull) { // // isNull$206 = isNull$203; // if (!isNull$206) { // result$206 = field$203; // } // } else { // // // isNull$204 = agg1_sumIsNull || isNull$203; // result$205 = -1L; // if (!isNull$204) { // // result$205 = (long) (agg1_sum + field$203); // // } // // isNull$206 = isNull$204; // if (!isNull$206) { // result$206 = result$205; // } // } // isNull$207 = isNull$206; // if (!isNull$207) { // result$207 = result$206; // } // } // agg1_sum = result$207; // ; // agg1_sumIsNull = isNull$207; // // // long result$213 = -1L; // boolean isNull$213; // if (isNull$208) { // // isNull$213 = agg2_maxIsNull; // if (!isNull$213) { // result$213 = agg2_max; // } // } else { // long result$212 = -1L; // boolean isNull$212; // if (agg2_maxIsNull) { // // isNull$212 = isNull$208; // if (!isNull$212) { // result$212 = field$208; // } // } else { // isNull$209 = isNull$208 || agg2_maxIsNull; // result$210 = false; // if (!isNull$209) { // // result$210 = field$208 > agg2_max; // // } // // long result$211 = -1L; // boolean isNull$211; // if (result$210) { // // isNull$211 = isNull$208; // if (!isNull$211) { // result$211 = field$208; // } // } else { // // isNull$211 = agg2_maxIsNull; // if (!isNull$211) { // result$211 = agg2_max; // } // } // isNull$212 = isNull$211; // if (!isNull$212) { // result$212 = result$211; // } // } // isNull$213 = isNull$212; // if (!isNull$213) { // result$213 = result$212; // } // } // agg2_max = result$213; // ; // agg2_maxIsNull = isNull$213; // // // long result$219 = -1L; // boolean isNull$219; // if (isNull$214) { // // isNull$219 = agg3_minIsNull; // if (!isNull$219) { // result$219 = agg3_min; // } // } else { // long result$218 = -1L; // boolean isNull$218; // if (agg3_minIsNull) { // // isNull$218 = isNull$214; // if (!isNull$218) { // result$218 = field$214; // } // } else { // isNull$215 = isNull$214 || agg3_minIsNull; // result$216 = false; // if (!isNull$215) { // // result$216 = field$214 < agg3_min; // // } // // long result$217 = -1L; // boolean isNull$217; // if (result$216) { // // isNull$217 = isNull$214; // if (!isNull$217) { // result$217 = field$214; // } // } else { // // isNull$217 = agg3_minIsNull; // if (!isNull$217) { // result$217 = agg3_min; // } // } // isNull$218 = isNull$217; // if (!isNull$218) { // result$218 = result$217; // } // } // isNull$219 = isNull$218; // if (!isNull$219) { // result$219 = result$218; // } // } // agg3_min = result$219; // ; // agg3_minIsNull = isNull$219; // // // java.lang.Iterable otherEntries$229 = // (java.lang.Iterable) otherMapView$221.entries(); // if (otherEntries$229 != null) { // for (java.util.Map.Entry entry : otherEntries$229) { // java.lang.Long distinctKey$223 = (java.lang.Long) entry.getKey(); // long field$224 = -1L; // boolean isNull$225 = true; // if (distinctKey$223 != null) { // isNull$225 = false; // field$224 = (long) distinctKey$223; // } // java.lang.Long otherValue = (java.lang.Long) entry.getValue(); // java.lang.Long thisValue = (java.lang.Long) distinct_view_0.get(distinctKey$223); // if (thisValue == null) { // thisValue = 0L; // } // boolean is_distinct_value_changed_0 = false; // boolean is_distinct_value_empty_0 = false; // // // long existed$230 = ((long) thisValue) & (1L << 0); // if (existed$230 == 0) { // not existed // long otherExisted = ((long) otherValue) & (1L << 0); // if (otherExisted != 0) { // existed in other // is_distinct_value_changed_0 = true; // // do accumulate // // long result$228 = -1L; // boolean isNull$228; // if (isNull$225) { // // isNull$228 = agg4_countIsNull; // if (!isNull$228) { // result$228 = agg4_count; // } // } else { // // // isNull$226 = agg4_countIsNull || false; // result$227 = -1L; // if (!isNull$226) { // // result$227 = (long) (agg4_count + ((long) 1L)); // // } // // isNull$228 = isNull$226; // if (!isNull$228) { // result$228 = result$227; // } // } // agg4_count = result$228; // ; // agg4_countIsNull = isNull$228; // // } // } // // thisValue = ((long) thisValue) | ((long) otherValue); // is_distinct_value_empty_0 = false; // // if (is_distinct_value_empty_0) { // distinct_view_0.remove(distinctKey$223); // } else if (is_distinct_value_changed_0) { // value is not empty and is changed, do update // distinct_view_0.put(distinctKey$223, thisValue); // } // } // end foreach // } // end otherEntries != null // // // } // // @Override // public void setAccumulators(Object ns, org.apache.flink.table.data.RowData acc) // throws Exception { // namespace = (java.lang.Long) ns; // // long field$170; // boolean isNull$170; // long field$171; // boolean isNull$171; // long field$172; // boolean isNull$172; // long field$173; // boolean isNull$173; // long field$174; // boolean isNull$174; // org.apache.flink.table.data.binary.BinaryRawValueData field$175; // boolean isNull$175; // isNull$174 = acc.isNullAt(4); // field$174 = -1L; // if (!isNull$174) { // field$174 = acc.getLong(4); // } // isNull$170 = acc.isNullAt(0); // field$170 = -1L; // if (!isNull$170) { // field$170 = acc.getLong(0); // } // isNull$171 = acc.isNullAt(1); // field$171 = -1L; // if (!isNull$171) { // field$171 = acc.getLong(1); // } // isNull$173 = acc.isNullAt(3); // field$173 = -1L; // if (!isNull$173) { // field$173 = acc.getLong(3); // } // // // when namespace is null, the dataview is used in heap, no key and namespace set // if (namespace != null) { // distinctAcc_0_dataview.setCurrentNamespace(namespace); // distinct_view_0 = distinctAcc_0_dataview; // } else { // isNull$175 = acc.isNullAt(5); // field$175 = null; // if (!isNull$175) { // field$175 = ((org.apache.flink.table.data.binary.BinaryRawValueData) acc.getRawValue(5)); // } // distinct_view_0 = (org.apache.flink.table.api.dataview.MapView) field$175.getJavaObject(); // } // // isNull$172 = acc.isNullAt(2); // field$172 = -1L; // if (!isNull$172) { // field$172 = acc.getLong(2); // } // // agg0_count1 = field$170; // ; // agg0_count1IsNull = isNull$170; // // // agg1_sum = field$171; // ; // agg1_sumIsNull = isNull$171; // // // agg2_max = field$172; // ; // agg2_maxIsNull = isNull$172; // // // agg3_min = field$173; // ; // agg3_minIsNull = isNull$173; // // // agg4_count = field$174; // ; // agg4_countIsNull = isNull$174; // // // } // // @Override // public org.apache.flink.table.data.RowData getAccumulators() throws Exception { // // // acc$169 = new org.apache.flink.table.data.GenericRowData(6); // // // if (agg0_count1IsNull) { // acc$169.setField(0, null); // } else { // acc$169.setField(0, agg0_count1); // } // // // if (agg1_sumIsNull) { // acc$169.setField(1, null); // } else { // acc$169.setField(1, agg1_sum); // } // // // if (agg2_maxIsNull) { // acc$169.setField(2, null); // } else { // acc$169.setField(2, agg2_max); // } // // // if (agg3_minIsNull) { // acc$169.setField(3, null); // } else { // acc$169.setField(3, agg3_min); // } // // // if (agg4_countIsNull) { // acc$169.setField(4, null); // } else { // acc$169.setField(4, agg4_count); // } // // // org.apache.flink.table.data.binary.BinaryRawValueData distinct_acc$168 = // org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(distinct_view_0); // // if (false) { // acc$169.setField(5, null); // } else { // acc$169.setField(5, distinct_acc$168); // } // // // return acc$169; // // } // // @Override // public org.apache.flink.table.data.RowData createAccumulators() throws Exception { // // // acc$167 = new org.apache.flink.table.data.GenericRowData(6); // // // if (false) { // acc$167.setField(0, null); // } else { // acc$167.setField(0, ((long) 0L)); // } // // // if (true) { // acc$167.setField(1, null); // } else { // acc$167.setField(1, ((long) -1L)); // } // // // if (true) { // acc$167.setField(2, null); // } else { // acc$167.setField(2, ((long) -1L)); // } // // // if (true) { // acc$167.setField(3, null); // } else { // acc$167.setField(3, ((long) -1L)); // } // // // if (false) { // acc$167.setField(4, null); // } else { // acc$167.setField(4, ((long) 0L)); // } // // // org.apache.flink.table.api.dataview.MapView mapview$166 = new org.apache.flink.table.api.dataview.MapView(); // org.apache.flink.table.data.binary.BinaryRawValueData distinct_acc$166 = // org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(mapview$166); // // if (false) { // acc$167.setField(5, null); // } else { // acc$167.setField(5, distinct_acc$166); // } // // // return acc$167; // // } // // @Override // public org.apache.flink.table.data.RowData getValue(Object ns) throws Exception { // namespace = (java.lang.Long) ns; // // // aggValue$231 = new org.apache.flink.table.data.GenericRowData(7); // // // if (agg0_count1IsNull) { // aggValue$231.setField(0, null); // } else { // aggValue$231.setField(0, agg0_count1); // } // // // if (agg1_sumIsNull) { // aggValue$231.setField(1, null); // } else { // aggValue$231.setField(1, agg1_sum); // } // // // if (agg2_maxIsNull) { // aggValue$231.setField(2, null); // } else { // aggValue$231.setField(2, agg2_max); // } // // // if (agg3_minIsNull) { // aggValue$231.setField(3, null); // } else { // aggValue$231.setField(3, agg3_min); // } // // // if (agg4_countIsNull) { // aggValue$231.setField(4, null); // } else { // aggValue$231.setField(4, agg4_count); // } // // // if (false) { // aggValue$231.setField(5, null); // } else { // aggValue$231.setField(5, org.apache.flink.table.data.TimestampData // .fromEpochMillis(sliceAssigner$163.getWindowStart(namespace))); // } // // // if (false) { // aggValue$231.setField(6, null); // } else { // aggValue$231.setField(6, org.apache.flink.table.data.TimestampData.fromEpochMillis(namespace)); // } // // // return aggValue$231; // // } // // @Override // public void cleanup(Object ns) throws Exception { // namespace = (java.lang.Long) ns; // // distinctAcc_0_dataview.setCurrentNamespace(namespace); // distinctAcc_0_dataview.clear(); // // // } // // @Override // public void close() throws Exception { // // } //} ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_04_window_agg/_02_cumulate_window/cumulate/global_agg/KeyProjection$301.java ================================================ package flink.examples.sql._07.query._04_window_agg._02_cumulate_window.cumulate.global_agg; public class KeyProjection$301 implements org.apache.flink.table.runtime.generated.Projection { org.apache.flink.table.data.binary.BinaryRowData out = new org.apache.flink.table.data.binary.BinaryRowData(2); org.apache.flink.table.data.writer.BinaryRowWriter outWriter = new org.apache.flink.table.data.writer.BinaryRowWriter(out); public KeyProjection$301(Object[] references) throws Exception { } @Override public org.apache.flink.table.data.binary.BinaryRowData apply(org.apache.flink.table.data.RowData in1) { org.apache.flink.table.data.binary.BinaryStringData field$302; boolean isNull$302; int field$303; boolean isNull$303; outWriter.reset(); isNull$302 = in1.isNullAt(0); field$302 = org.apache.flink.table.data.binary.BinaryStringData.EMPTY_UTF8; if (!isNull$302) { field$302 = ((org.apache.flink.table.data.binary.BinaryStringData) in1.getString(0)); } if (isNull$302) { outWriter.setNullAt(0); } else { outWriter.writeString(0, field$302); } isNull$303 = in1.isNullAt(1); field$303 = -1; if (!isNull$303) { field$303 = in1.getInt(1); } if (isNull$303) { outWriter.setNullAt(1); } else { outWriter.writeInt(1, field$303); } outWriter.complete(); return out; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_04_window_agg/_02_cumulate_window/cumulate/global_agg/LocalWindowAggsHandler$162.java ================================================ //package flink.examples.sql._07.query._04_window_agg._02_cumulate_window.cumulate.global_agg; // // //public final class LocalWindowAggsHandler$162 // implements org.apache.flink.table.runtime.generated.NamespaceAggsHandleFunction { // // private transient org.apache.flink.table.runtime.operators.window.slicing.SliceAssigners.SlicedSharedSliceAssigner // sliceAssigner$95; // long agg0_count1; // boolean agg0_count1IsNull; // long agg1_sum; // boolean agg1_sumIsNull; // long agg2_max; // boolean agg2_maxIsNull; // long agg3_min; // boolean agg3_minIsNull; // long agg4_count; // boolean agg4_countIsNull; // private org.apache.flink.table.api.dataview.MapView distinct_view_0; // org.apache.flink.table.data.GenericRowData acc$97 = new org.apache.flink.table.data.GenericRowData(6); // org.apache.flink.table.data.GenericRowData acc$99 = new org.apache.flink.table.data.GenericRowData(6); // private org.apache.flink.table.api.dataview.MapView otherMapView$151; // private transient org.apache.flink.table.data.conversion.RawObjectConverter converter$152; // org.apache.flink.table.data.GenericRowData aggValue$161 = new org.apache.flink.table.data.GenericRowData(7); // // private org.apache.flink.table.runtime.dataview.StateDataViewStore store; // // private java.lang.Long namespace; // // public LocalWindowAggsHandler$162(Object[] references) throws Exception { // sliceAssigner$95 = // (((org.apache.flink.table.runtime.operators.window.slicing.SliceAssigners.SlicedSharedSliceAssigner) references[0])); // converter$152 = (((org.apache.flink.table.data.conversion.RawObjectConverter) references[1])); // } // // private org.apache.flink.api.common.functions.RuntimeContext getRuntimeContext() { // return store.getRuntimeContext(); // } // // @Override // public void open(org.apache.flink.table.runtime.dataview.StateDataViewStore store) throws Exception { // this.store = store; // // converter$152.open(getRuntimeContext().getUserCodeClassLoader()); // // } // // @Override // public void accumulate(org.apache.flink.table.data.RowData accInput) throws Exception { // // boolean isNull$106; // long result$107; // long field$108; // boolean isNull$108; // boolean isNull$109; // long result$110; // boolean isNull$113; // boolean result$114; // boolean isNull$118; // boolean result$119; // long field$123; // boolean isNull$123; // boolean isNull$125; // long result$126; // isNull$108 = accInput.isNullAt(2); // field$108 = -1L; // if (!isNull$108) { // field$108 = accInput.getLong(2); // } // isNull$123 = accInput.isNullAt(3); // field$123 = -1L; // if (!isNull$123) { // field$123 = accInput.getLong(3); // } // // // isNull$106 = agg0_count1IsNull || false; // result$107 = -1L; // if (!isNull$106) { // // result$107 = (long) (agg0_count1 + ((long) 1L)); // // } // // agg0_count1 = result$107; // ; // agg0_count1IsNull = isNull$106; // // // long result$112 = -1L; // boolean isNull$112; // if (isNull$108) { // // isNull$112 = agg1_sumIsNull; // if (!isNull$112) { // result$112 = agg1_sum; // } // } else { // long result$111 = -1L; // boolean isNull$111; // if (agg1_sumIsNull) { // // isNull$111 = isNull$108; // if (!isNull$111) { // result$111 = field$108; // } // } else { // // // isNull$109 = agg1_sumIsNull || isNull$108; // result$110 = -1L; // if (!isNull$109) { // // result$110 = (long) (agg1_sum + field$108); // // } // // isNull$111 = isNull$109; // if (!isNull$111) { // result$111 = result$110; // } // } // isNull$112 = isNull$111; // if (!isNull$112) { // result$112 = result$111; // } // } // agg1_sum = result$112; // ; // agg1_sumIsNull = isNull$112; // // // long result$117 = -1L; // boolean isNull$117; // if (isNull$108) { // // isNull$117 = agg2_maxIsNull; // if (!isNull$117) { // result$117 = agg2_max; // } // } else { // long result$116 = -1L; // boolean isNull$116; // if (agg2_maxIsNull) { // // isNull$116 = isNull$108; // if (!isNull$116) { // result$116 = field$108; // } // } else { // isNull$113 = isNull$108 || agg2_maxIsNull; // result$114 = false; // if (!isNull$113) { // // result$114 = field$108 > agg2_max; // // } // // long result$115 = -1L; // boolean isNull$115; // if (result$114) { // // isNull$115 = isNull$108; // if (!isNull$115) { // result$115 = field$108; // } // } else { // // isNull$115 = agg2_maxIsNull; // if (!isNull$115) { // result$115 = agg2_max; // } // } // isNull$116 = isNull$115; // if (!isNull$116) { // result$116 = result$115; // } // } // isNull$117 = isNull$116; // if (!isNull$117) { // result$117 = result$116; // } // } // agg2_max = result$117; // ; // agg2_maxIsNull = isNull$117; // // // long result$122 = -1L; // boolean isNull$122; // if (isNull$108) { // // isNull$122 = agg3_minIsNull; // if (!isNull$122) { // result$122 = agg3_min; // } // } else { // long result$121 = -1L; // boolean isNull$121; // if (agg3_minIsNull) { // // isNull$121 = isNull$108; // if (!isNull$121) { // result$121 = field$108; // } // } else { // isNull$118 = isNull$108 || agg3_minIsNull; // result$119 = false; // if (!isNull$118) { // // result$119 = field$108 < agg3_min; // // } // // long result$120 = -1L; // boolean isNull$120; // if (result$119) { // // isNull$120 = isNull$108; // if (!isNull$120) { // result$120 = field$108; // } // } else { // // isNull$120 = agg3_minIsNull; // if (!isNull$120) { // result$120 = agg3_min; // } // } // isNull$121 = isNull$120; // if (!isNull$121) { // result$121 = result$120; // } // } // isNull$122 = isNull$121; // if (!isNull$122) { // result$122 = result$121; // } // } // agg3_min = result$122; // ; // agg3_minIsNull = isNull$122; // // // java.lang.Long distinctKey$124 = (java.lang.Long) field$123; // if (isNull$123) { // distinctKey$124 = null; // } // // java.lang.Long value$128 = (java.lang.Long) distinct_view_0.get(distinctKey$124); // if (value$128 == null) { // value$128 = 0L; // } // // boolean is_distinct_value_changed_0 = false; // // long existed$129 = ((long) value$128) & (1L << 0); // if (existed$129 == 0) { // not existed // value$128 = ((long) value$128) | (1L << 0); // is_distinct_value_changed_0 = true; // // long result$127 = -1L; // boolean isNull$127; // if (isNull$123) { // // isNull$127 = agg4_countIsNull; // if (!isNull$127) { // result$127 = agg4_count; // } // } else { // // // isNull$125 = agg4_countIsNull || false; // result$126 = -1L; // if (!isNull$125) { // // result$126 = (long) (agg4_count + ((long) 1L)); // // } // // isNull$127 = isNull$125; // if (!isNull$127) { // result$127 = result$126; // } // } // agg4_count = result$127; // ; // agg4_countIsNull = isNull$127; // // } // // if (is_distinct_value_changed_0) { // distinct_view_0.put(distinctKey$124, value$128); // } // // // } // // @Override // public void retract(org.apache.flink.table.data.RowData retractInput) throws Exception { // // throw new java.lang.RuntimeException( // "This function not require retract method, but the retract method is called."); // // } // // @Override // public void merge(Object ns, org.apache.flink.table.data.RowData otherAcc) throws Exception { // namespace = (java.lang.Long) ns; // // long field$130; // boolean isNull$130; // boolean isNull$131; // long result$132; // long field$133; // boolean isNull$133; // boolean isNull$134; // long result$135; // long field$138; // boolean isNull$138; // boolean isNull$139; // boolean result$140; // long field$144; // boolean isNull$144; // boolean isNull$145; // boolean result$146; // org.apache.flink.table.data.binary.BinaryRawValueData field$150; // boolean isNull$150; // boolean isNull$156; // long result$157; // isNull$130 = otherAcc.isNullAt(2); // field$130 = -1L; // if (!isNull$130) { // field$130 = otherAcc.getLong(2); // } // isNull$133 = otherAcc.isNullAt(3); // field$133 = -1L; // if (!isNull$133) { // field$133 = otherAcc.getLong(3); // } // // isNull$150 = otherAcc.isNullAt(7); // field$150 = null; // if (!isNull$150) { // field$150 = ((org.apache.flink.table.data.binary.BinaryRawValueData) otherAcc.getRawValue(7)); // } // otherMapView$151 = null; // if (!isNull$150) { // otherMapView$151 = // (org.apache.flink.table.api.dataview.MapView) converter$152 // .toExternal((org.apache.flink.table.data.binary.BinaryRawValueData) field$150); // } // // isNull$144 = otherAcc.isNullAt(5); // field$144 = -1L; // if (!isNull$144) { // field$144 = otherAcc.getLong(5); // } // isNull$138 = otherAcc.isNullAt(4); // field$138 = -1L; // if (!isNull$138) { // field$138 = otherAcc.getLong(4); // } // // // isNull$131 = agg0_count1IsNull || isNull$130; // result$132 = -1L; // if (!isNull$131) { // // result$132 = (long) (agg0_count1 + field$130); // // } // // agg0_count1 = result$132; // ; // agg0_count1IsNull = isNull$131; // // // long result$137 = -1L; // boolean isNull$137; // if (isNull$133) { // // isNull$137 = agg1_sumIsNull; // if (!isNull$137) { // result$137 = agg1_sum; // } // } else { // long result$136 = -1L; // boolean isNull$136; // if (agg1_sumIsNull) { // // isNull$136 = isNull$133; // if (!isNull$136) { // result$136 = field$133; // } // } else { // // // isNull$134 = agg1_sumIsNull || isNull$133; // result$135 = -1L; // if (!isNull$134) { // // result$135 = (long) (agg1_sum + field$133); // // } // // isNull$136 = isNull$134; // if (!isNull$136) { // result$136 = result$135; // } // } // isNull$137 = isNull$136; // if (!isNull$137) { // result$137 = result$136; // } // } // agg1_sum = result$137; // ; // agg1_sumIsNull = isNull$137; // // // long result$143 = -1L; // boolean isNull$143; // if (isNull$138) { // // isNull$143 = agg2_maxIsNull; // if (!isNull$143) { // result$143 = agg2_max; // } // } else { // long result$142 = -1L; // boolean isNull$142; // if (agg2_maxIsNull) { // // isNull$142 = isNull$138; // if (!isNull$142) { // result$142 = field$138; // } // } else { // isNull$139 = isNull$138 || agg2_maxIsNull; // result$140 = false; // if (!isNull$139) { // // result$140 = field$138 > agg2_max; // // } // // long result$141 = -1L; // boolean isNull$141; // if (result$140) { // // isNull$141 = isNull$138; // if (!isNull$141) { // result$141 = field$138; // } // } else { // // isNull$141 = agg2_maxIsNull; // if (!isNull$141) { // result$141 = agg2_max; // } // } // isNull$142 = isNull$141; // if (!isNull$142) { // result$142 = result$141; // } // } // isNull$143 = isNull$142; // if (!isNull$143) { // result$143 = result$142; // } // } // agg2_max = result$143; // ; // agg2_maxIsNull = isNull$143; // // // long result$149 = -1L; // boolean isNull$149; // if (isNull$144) { // // isNull$149 = agg3_minIsNull; // if (!isNull$149) { // result$149 = agg3_min; // } // } else { // long result$148 = -1L; // boolean isNull$148; // if (agg3_minIsNull) { // // isNull$148 = isNull$144; // if (!isNull$148) { // result$148 = field$144; // } // } else { // isNull$145 = isNull$144 || agg3_minIsNull; // result$146 = false; // if (!isNull$145) { // // result$146 = field$144 < agg3_min; // // } // // long result$147 = -1L; // boolean isNull$147; // if (result$146) { // // isNull$147 = isNull$144; // if (!isNull$147) { // result$147 = field$144; // } // } else { // // isNull$147 = agg3_minIsNull; // if (!isNull$147) { // result$147 = agg3_min; // } // } // isNull$148 = isNull$147; // if (!isNull$148) { // result$148 = result$147; // } // } // isNull$149 = isNull$148; // if (!isNull$149) { // result$149 = result$148; // } // } // agg3_min = result$149; // ; // agg3_minIsNull = isNull$149; // // // java.lang.Iterable otherEntries$159 = // (java.lang.Iterable) otherMapView$151.entries(); // if (otherEntries$159 != null) { // for (java.util.Map.Entry entry : otherEntries$159) { // java.lang.Long distinctKey$153 = (java.lang.Long) entry.getKey(); // long field$154 = -1L; // boolean isNull$155 = true; // if (distinctKey$153 != null) { // isNull$155 = false; // field$154 = (long) distinctKey$153; // } // java.lang.Long otherValue = (java.lang.Long) entry.getValue(); // java.lang.Long thisValue = (java.lang.Long) distinct_view_0.get(distinctKey$153); // if (thisValue == null) { // thisValue = 0L; // } // boolean is_distinct_value_changed_0 = false; // boolean is_distinct_value_empty_0 = false; // // // long existed$160 = ((long) thisValue) & (1L << 0); // if (existed$160 == 0) { // not existed // long otherExisted = ((long) otherValue) & (1L << 0); // if (otherExisted != 0) { // existed in other // is_distinct_value_changed_0 = true; // // do accumulate // // long result$158 = -1L; // boolean isNull$158; // if (isNull$155) { // // isNull$158 = agg4_countIsNull; // if (!isNull$158) { // result$158 = agg4_count; // } // } else { // // // isNull$156 = agg4_countIsNull || false; // result$157 = -1L; // if (!isNull$156) { // // result$157 = (long) (agg4_count + ((long) 1L)); // // } // // isNull$158 = isNull$156; // if (!isNull$158) { // result$158 = result$157; // } // } // agg4_count = result$158; // ; // agg4_countIsNull = isNull$158; // // } // } // // thisValue = ((long) thisValue) | ((long) otherValue); // is_distinct_value_empty_0 = false; // // if (is_distinct_value_empty_0) { // distinct_view_0.remove(distinctKey$153); // } else if (is_distinct_value_changed_0) { // value is not empty and is changed, do update // distinct_view_0.put(distinctKey$153, thisValue); // } // } // end foreach // } // end otherEntries != null // // // } // // @Override // public void setAccumulators(Object ns, org.apache.flink.table.data.RowData acc) // throws Exception { // namespace = (java.lang.Long) ns; // // long field$100; // boolean isNull$100; // long field$101; // boolean isNull$101; // long field$102; // boolean isNull$102; // long field$103; // boolean isNull$103; // long field$104; // boolean isNull$104; // org.apache.flink.table.data.binary.BinaryRawValueData field$105; // boolean isNull$105; // isNull$104 = acc.isNullAt(4); // field$104 = -1L; // if (!isNull$104) { // field$104 = acc.getLong(4); // } // isNull$100 = acc.isNullAt(0); // field$100 = -1L; // if (!isNull$100) { // field$100 = acc.getLong(0); // } // isNull$101 = acc.isNullAt(1); // field$101 = -1L; // if (!isNull$101) { // field$101 = acc.getLong(1); // } // isNull$103 = acc.isNullAt(3); // field$103 = -1L; // if (!isNull$103) { // field$103 = acc.getLong(3); // } // // isNull$105 = acc.isNullAt(5); // field$105 = null; // if (!isNull$105) { // field$105 = ((org.apache.flink.table.data.binary.BinaryRawValueData) acc.getRawValue(5)); // } // distinct_view_0 = (org.apache.flink.table.api.dataview.MapView) field$105.getJavaObject(); // // isNull$102 = acc.isNullAt(2); // field$102 = -1L; // if (!isNull$102) { // field$102 = acc.getLong(2); // } // // agg0_count1 = field$100; // ; // agg0_count1IsNull = isNull$100; // // // agg1_sum = field$101; // ; // agg1_sumIsNull = isNull$101; // // // agg2_max = field$102; // ; // agg2_maxIsNull = isNull$102; // // // agg3_min = field$103; // ; // agg3_minIsNull = isNull$103; // // // agg4_count = field$104; // ; // agg4_countIsNull = isNull$104; // // // } // // @Override // public org.apache.flink.table.data.RowData getAccumulators() throws Exception { // // // acc$99 = new org.apache.flink.table.data.GenericRowData(6); // // // if (agg0_count1IsNull) { // acc$99.setField(0, null); // } else { // acc$99.setField(0, agg0_count1); // } // // // if (agg1_sumIsNull) { // acc$99.setField(1, null); // } else { // acc$99.setField(1, agg1_sum); // } // // // if (agg2_maxIsNull) { // acc$99.setField(2, null); // } else { // acc$99.setField(2, agg2_max); // } // // // if (agg3_minIsNull) { // acc$99.setField(3, null); // } else { // acc$99.setField(3, agg3_min); // } // // // if (agg4_countIsNull) { // acc$99.setField(4, null); // } else { // acc$99.setField(4, agg4_count); // } // // // org.apache.flink.table.data.binary.BinaryRawValueData distinct_acc$98 = // org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(distinct_view_0); // // if (false) { // acc$99.setField(5, null); // } else { // acc$99.setField(5, distinct_acc$98); // } // // // return acc$99; // // } // // @Override // public org.apache.flink.table.data.RowData createAccumulators() throws Exception { // // // acc$97 = new org.apache.flink.table.data.GenericRowData(6); // // // if (false) { // acc$97.setField(0, null); // } else { // acc$97.setField(0, ((long) 0L)); // } // // // if (true) { // acc$97.setField(1, null); // } else { // acc$97.setField(1, ((long) -1L)); // } // // // if (true) { // acc$97.setField(2, null); // } else { // acc$97.setField(2, ((long) -1L)); // } // // // if (true) { // acc$97.setField(3, null); // } else { // acc$97.setField(3, ((long) -1L)); // } // // // if (false) { // acc$97.setField(4, null); // } else { // acc$97.setField(4, ((long) 0L)); // } // // // org.apache.flink.table.api.dataview.MapView mapview$96 = new org.apache.flink.table.api.dataview.MapView(); // org.apache.flink.table.data.binary.BinaryRawValueData distinct_acc$96 = // org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(mapview$96); // // if (false) { // acc$97.setField(5, null); // } else { // acc$97.setField(5, distinct_acc$96); // } // // // return acc$97; // // } // // @Override // public org.apache.flink.table.data.RowData getValue(Object ns) throws Exception { // namespace = (java.lang.Long) ns; // // // aggValue$161 = new org.apache.flink.table.data.GenericRowData(7); // // // if (agg0_count1IsNull) { // aggValue$161.setField(0, null); // } else { // aggValue$161.setField(0, agg0_count1); // } // // // if (agg1_sumIsNull) { // aggValue$161.setField(1, null); // } else { // aggValue$161.setField(1, agg1_sum); // } // // // if (agg2_maxIsNull) { // aggValue$161.setField(2, null); // } else { // aggValue$161.setField(2, agg2_max); // } // // // if (agg3_minIsNull) { // aggValue$161.setField(3, null); // } else { // aggValue$161.setField(3, agg3_min); // } // // // if (agg4_countIsNull) { // aggValue$161.setField(4, null); // } else { // aggValue$161.setField(4, agg4_count); // } // // // if (false) { // aggValue$161.setField(5, null); // } else { // aggValue$161.setField(5, org.apache.flink.table.data.TimestampData // .fromEpochMillis(sliceAssigner$95.getWindowStart(namespace))); // } // // // if (false) { // aggValue$161.setField(6, null); // } else { // aggValue$161.setField(6, org.apache.flink.table.data.TimestampData.fromEpochMillis(namespace)); // } // // // return aggValue$161; // // } // // @Override // public void cleanup(Object ns) throws Exception { // namespace = (java.lang.Long) ns; // // // } // // @Override // public void close() throws Exception { // // } //} ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_04_window_agg/_02_cumulate_window/cumulate/global_agg/StateWindowAggsHandler$300.java ================================================ //package flink.examples.sql._07.query._04_window_agg._02_cumulate_window.cumulate.global_agg; // // //public final class StateWindowAggsHandler$300 // implements org.apache.flink.table.runtime.generated.NamespaceAggsHandleFunction { // // private transient org.apache.flink.table.runtime.operators.window.slicing.SliceAssigners.SlicedSharedSliceAssigner // sliceAssigner$233; // long agg0_count1; // boolean agg0_count1IsNull; // long agg1_sum; // boolean agg1_sumIsNull; // long agg2_max; // boolean agg2_maxIsNull; // long agg3_min; // boolean agg3_minIsNull; // long agg4_count; // boolean agg4_countIsNull; // private transient org.apache.flink.table.runtime.typeutils.ExternalSerializer externalSerializer$234; // private transient org.apache.flink.table.runtime.typeutils.ExternalSerializer externalSerializer$235; // private org.apache.flink.table.runtime.dataview.StateMapView distinctAcc_0_dataview; // private org.apache.flink.table.data.binary.BinaryRawValueData distinctAcc_0_dataview_raw_value; // private org.apache.flink.table.runtime.dataview.StateMapView distinctAcc_0_dataview_backup; // private org.apache.flink.table.data.binary.BinaryRawValueData distinctAcc_0_dataview_backup_raw_value; // private org.apache.flink.table.api.dataview.MapView distinct_view_0; // private org.apache.flink.table.api.dataview.MapView distinct_backup_view_0; // org.apache.flink.table.data.GenericRowData acc$237 = new org.apache.flink.table.data.GenericRowData(6); // org.apache.flink.table.data.GenericRowData acc$239 = new org.apache.flink.table.data.GenericRowData(6); // org.apache.flink.table.data.GenericRowData aggValue$299 = new org.apache.flink.table.data.GenericRowData(7); // // private org.apache.flink.table.runtime.dataview.StateDataViewStore store; // // private java.lang.Long namespace; // // public StateWindowAggsHandler$300(Object[] references) throws Exception { // sliceAssigner$233 = // (((org.apache.flink.table.runtime.operators.window.slicing.SliceAssigners.SlicedSharedSliceAssigner) references[0])); // externalSerializer$234 = (((org.apache.flink.table.runtime.typeutils.ExternalSerializer) references[1])); // externalSerializer$235 = (((org.apache.flink.table.runtime.typeutils.ExternalSerializer) references[2])); // } // // private org.apache.flink.api.common.functions.RuntimeContext getRuntimeContext() { // return store.getRuntimeContext(); // } // // @Override // public void open(org.apache.flink.table.runtime.dataview.StateDataViewStore store) throws Exception { // this.store = store; // // distinctAcc_0_dataview = (org.apache.flink.table.runtime.dataview.StateMapView) store // .getStateMapView("distinctAcc_0", true, externalSerializer$234, externalSerializer$235); // distinctAcc_0_dataview_raw_value = // org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(distinctAcc_0_dataview); // // // distinctAcc_0_dataview_backup = (org.apache.flink.table.runtime.dataview.StateMapView) store // .getStateMapView("distinctAcc_0", true, externalSerializer$234, externalSerializer$235); // distinctAcc_0_dataview_backup_raw_value = // org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(distinctAcc_0_dataview_backup); // // distinct_view_0 = distinctAcc_0_dataview; // distinct_backup_view_0 = distinctAcc_0_dataview_backup; // } // // @Override // public void accumulate(org.apache.flink.table.data.RowData accInput) throws Exception { // // boolean isNull$246; // long result$247; // long field$248; // boolean isNull$248; // boolean isNull$249; // long result$250; // boolean isNull$253; // boolean result$254; // boolean isNull$258; // boolean result$259; // long field$263; // boolean isNull$263; // boolean isNull$265; // long result$266; // isNull$248 = accInput.isNullAt(2); // field$248 = -1L; // if (!isNull$248) { // field$248 = accInput.getLong(2); // } // isNull$263 = accInput.isNullAt(3); // field$263 = -1L; // if (!isNull$263) { // field$263 = accInput.getLong(3); // } // // // isNull$246 = agg0_count1IsNull || false; // result$247 = -1L; // if (!isNull$246) { // // result$247 = (long) (agg0_count1 + ((long) 1L)); // // } // // agg0_count1 = result$247; // ; // agg0_count1IsNull = isNull$246; // // // long result$252 = -1L; // boolean isNull$252; // if (isNull$248) { // // isNull$252 = agg1_sumIsNull; // if (!isNull$252) { // result$252 = agg1_sum; // } // } else { // long result$251 = -1L; // boolean isNull$251; // if (agg1_sumIsNull) { // // isNull$251 = isNull$248; // if (!isNull$251) { // result$251 = field$248; // } // } else { // // // isNull$249 = agg1_sumIsNull || isNull$248; // result$250 = -1L; // if (!isNull$249) { // // result$250 = (long) (agg1_sum + field$248); // // } // // isNull$251 = isNull$249; // if (!isNull$251) { // result$251 = result$250; // } // } // isNull$252 = isNull$251; // if (!isNull$252) { // result$252 = result$251; // } // } // agg1_sum = result$252; // ; // agg1_sumIsNull = isNull$252; // // // long result$257 = -1L; // boolean isNull$257; // if (isNull$248) { // // isNull$257 = agg2_maxIsNull; // if (!isNull$257) { // result$257 = agg2_max; // } // } else { // long result$256 = -1L; // boolean isNull$256; // if (agg2_maxIsNull) { // // isNull$256 = isNull$248; // if (!isNull$256) { // result$256 = field$248; // } // } else { // isNull$253 = isNull$248 || agg2_maxIsNull; // result$254 = false; // if (!isNull$253) { // // result$254 = field$248 > agg2_max; // // } // // long result$255 = -1L; // boolean isNull$255; // if (result$254) { // // isNull$255 = isNull$248; // if (!isNull$255) { // result$255 = field$248; // } // } else { // // isNull$255 = agg2_maxIsNull; // if (!isNull$255) { // result$255 = agg2_max; // } // } // isNull$256 = isNull$255; // if (!isNull$256) { // result$256 = result$255; // } // } // isNull$257 = isNull$256; // if (!isNull$257) { // result$257 = result$256; // } // } // agg2_max = result$257; // ; // agg2_maxIsNull = isNull$257; // // // long result$262 = -1L; // boolean isNull$262; // if (isNull$248) { // // isNull$262 = agg3_minIsNull; // if (!isNull$262) { // result$262 = agg3_min; // } // } else { // long result$261 = -1L; // boolean isNull$261; // if (agg3_minIsNull) { // // isNull$261 = isNull$248; // if (!isNull$261) { // result$261 = field$248; // } // } else { // isNull$258 = isNull$248 || agg3_minIsNull; // result$259 = false; // if (!isNull$258) { // // result$259 = field$248 < agg3_min; // // } // // long result$260 = -1L; // boolean isNull$260; // if (result$259) { // // isNull$260 = isNull$248; // if (!isNull$260) { // result$260 = field$248; // } // } else { // // isNull$260 = agg3_minIsNull; // if (!isNull$260) { // result$260 = agg3_min; // } // } // isNull$261 = isNull$260; // if (!isNull$261) { // result$261 = result$260; // } // } // isNull$262 = isNull$261; // if (!isNull$262) { // result$262 = result$261; // } // } // agg3_min = result$262; // ; // agg3_minIsNull = isNull$262; // // // java.lang.Long distinctKey$264 = (java.lang.Long) field$263; // if (isNull$263) { // distinctKey$264 = null; // } // // java.lang.Long value$268 = (java.lang.Long) distinct_view_0.get(distinctKey$264); // if (value$268 == null) { // value$268 = 0L; // } // // boolean is_distinct_value_changed_0 = false; // // long existed$269 = ((long) value$268) & (1L << 0); // if (existed$269 == 0) { // not existed // value$268 = ((long) value$268) | (1L << 0); // is_distinct_value_changed_0 = true; // // long result$267 = -1L; // boolean isNull$267; // if (isNull$263) { // // isNull$267 = agg4_countIsNull; // if (!isNull$267) { // result$267 = agg4_count; // } // } else { // // // isNull$265 = agg4_countIsNull || false; // result$266 = -1L; // if (!isNull$265) { // // result$266 = (long) (agg4_count + ((long) 1L)); // // } // // isNull$267 = isNull$265; // if (!isNull$267) { // result$267 = result$266; // } // } // agg4_count = result$267; // ; // agg4_countIsNull = isNull$267; // // } // // if (is_distinct_value_changed_0) { // distinct_view_0.put(distinctKey$264, value$268); // } // // // } // // @Override // public void retract(org.apache.flink.table.data.RowData retractInput) throws Exception { // // throw new java.lang.RuntimeException( // "This function not require retract method, but the retract method is called."); // // } // // @Override // public void merge(Object ns, org.apache.flink.table.data.RowData otherAcc) throws Exception { // namespace = (java.lang.Long) ns; // // long field$270; // boolean isNull$270; // boolean isNull$271; // long result$272; // long field$273; // boolean isNull$273; // boolean isNull$274; // long result$275; // long field$278; // boolean isNull$278; // boolean isNull$279; // boolean result$280; // long field$284; // boolean isNull$284; // boolean isNull$285; // boolean result$286; // org.apache.flink.table.data.binary.BinaryRawValueData field$290; // boolean isNull$290; // boolean isNull$294; // long result$295; // isNull$278 = otherAcc.isNullAt(2); // field$278 = -1L; // if (!isNull$278) { // field$278 = otherAcc.getLong(2); // } // isNull$273 = otherAcc.isNullAt(1); // field$273 = -1L; // if (!isNull$273) { // field$273 = otherAcc.getLong(1); // } // isNull$270 = otherAcc.isNullAt(0); // field$270 = -1L; // if (!isNull$270) { // field$270 = otherAcc.getLong(0); // } // isNull$284 = otherAcc.isNullAt(3); // field$284 = -1L; // if (!isNull$284) { // field$284 = otherAcc.getLong(3); // } // // // when namespace is null, the dataview is used in heap, no key and namespace set // if (namespace != null) { // distinctAcc_0_dataview_backup.setCurrentNamespace(namespace); // distinct_backup_view_0 = distinctAcc_0_dataview_backup; // } else { // isNull$290 = otherAcc.isNullAt(5); // field$290 = null; // if (!isNull$290) { // field$290 = ((org.apache.flink.table.data.binary.BinaryRawValueData) otherAcc.getRawValue(5)); // } // distinct_backup_view_0 = (org.apache.flink.table.api.dataview.MapView) field$290.getJavaObject(); // } // // // isNull$271 = agg0_count1IsNull || isNull$270; // result$272 = -1L; // if (!isNull$271) { // // result$272 = (long) (agg0_count1 + field$270); // // } // // agg0_count1 = result$272; // ; // agg0_count1IsNull = isNull$271; // // // long result$277 = -1L; // boolean isNull$277; // if (isNull$273) { // // isNull$277 = agg1_sumIsNull; // if (!isNull$277) { // result$277 = agg1_sum; // } // } else { // long result$276 = -1L; // boolean isNull$276; // if (agg1_sumIsNull) { // // isNull$276 = isNull$273; // if (!isNull$276) { // result$276 = field$273; // } // } else { // // // isNull$274 = agg1_sumIsNull || isNull$273; // result$275 = -1L; // if (!isNull$274) { // // result$275 = (long) (agg1_sum + field$273); // // } // // isNull$276 = isNull$274; // if (!isNull$276) { // result$276 = result$275; // } // } // isNull$277 = isNull$276; // if (!isNull$277) { // result$277 = result$276; // } // } // agg1_sum = result$277; // ; // agg1_sumIsNull = isNull$277; // // // long result$283 = -1L; // boolean isNull$283; // if (isNull$278) { // // isNull$283 = agg2_maxIsNull; // if (!isNull$283) { // result$283 = agg2_max; // } // } else { // long result$282 = -1L; // boolean isNull$282; // if (agg2_maxIsNull) { // // isNull$282 = isNull$278; // if (!isNull$282) { // result$282 = field$278; // } // } else { // isNull$279 = isNull$278 || agg2_maxIsNull; // result$280 = false; // if (!isNull$279) { // // result$280 = field$278 > agg2_max; // // } // // long result$281 = -1L; // boolean isNull$281; // if (result$280) { // // isNull$281 = isNull$278; // if (!isNull$281) { // result$281 = field$278; // } // } else { // // isNull$281 = agg2_maxIsNull; // if (!isNull$281) { // result$281 = agg2_max; // } // } // isNull$282 = isNull$281; // if (!isNull$282) { // result$282 = result$281; // } // } // isNull$283 = isNull$282; // if (!isNull$283) { // result$283 = result$282; // } // } // agg2_max = result$283; // ; // agg2_maxIsNull = isNull$283; // // // long result$289 = -1L; // boolean isNull$289; // if (isNull$284) { // // isNull$289 = agg3_minIsNull; // if (!isNull$289) { // result$289 = agg3_min; // } // } else { // long result$288 = -1L; // boolean isNull$288; // if (agg3_minIsNull) { // // isNull$288 = isNull$284; // if (!isNull$288) { // result$288 = field$284; // } // } else { // isNull$285 = isNull$284 || agg3_minIsNull; // result$286 = false; // if (!isNull$285) { // // result$286 = field$284 < agg3_min; // // } // // long result$287 = -1L; // boolean isNull$287; // if (result$286) { // // isNull$287 = isNull$284; // if (!isNull$287) { // result$287 = field$284; // } // } else { // // isNull$287 = agg3_minIsNull; // if (!isNull$287) { // result$287 = agg3_min; // } // } // isNull$288 = isNull$287; // if (!isNull$288) { // result$288 = result$287; // } // } // isNull$289 = isNull$288; // if (!isNull$289) { // result$289 = result$288; // } // } // agg3_min = result$289; // ; // agg3_minIsNull = isNull$289; // // // java.lang.Iterable otherEntries$297 = // (java.lang.Iterable) distinct_backup_view_0.entries(); // if (otherEntries$297 != null) { // for (java.util.Map.Entry entry : otherEntries$297) { // java.lang.Long distinctKey$291 = (java.lang.Long) entry.getKey(); // long field$292 = -1L; // boolean isNull$293 = true; // if (distinctKey$291 != null) { // isNull$293 = false; // field$292 = (long) distinctKey$291; // } // java.lang.Long otherValue = (java.lang.Long) entry.getValue(); // java.lang.Long thisValue = (java.lang.Long) distinct_view_0.get(distinctKey$291); // if (thisValue == null) { // thisValue = 0L; // } // boolean is_distinct_value_changed_0 = false; // boolean is_distinct_value_empty_0 = false; // // // long existed$298 = ((long) thisValue) & (1L << 0); // if (existed$298 == 0) { // not existed // long otherExisted = ((long) otherValue) & (1L << 0); // if (otherExisted != 0) { // existed in other // is_distinct_value_changed_0 = true; // // do accumulate // // long result$296 = -1L; // boolean isNull$296; // if (isNull$293) { // // isNull$296 = agg4_countIsNull; // if (!isNull$296) { // result$296 = agg4_count; // } // } else { // // // isNull$294 = agg4_countIsNull || false; // result$295 = -1L; // if (!isNull$294) { // // result$295 = (long) (agg4_count + ((long) 1L)); // // } // // isNull$296 = isNull$294; // if (!isNull$296) { // result$296 = result$295; // } // } // agg4_count = result$296; // ; // agg4_countIsNull = isNull$296; // // } // } // // thisValue = ((long) thisValue) | ((long) otherValue); // is_distinct_value_empty_0 = false; // // if (is_distinct_value_empty_0) { // distinct_view_0.remove(distinctKey$291); // } else if (is_distinct_value_changed_0) { // value is not empty and is changed, do update // distinct_view_0.put(distinctKey$291, thisValue); // } // } // end foreach // } // end otherEntries != null // // // } // // @Override // public void setAccumulators(Object ns, org.apache.flink.table.data.RowData acc) // throws Exception { // namespace = (java.lang.Long) ns; // // long field$240; // boolean isNull$240; // long field$241; // boolean isNull$241; // long field$242; // boolean isNull$242; // long field$243; // boolean isNull$243; // long field$244; // boolean isNull$244; // org.apache.flink.table.data.binary.BinaryRawValueData field$245; // boolean isNull$245; // isNull$244 = acc.isNullAt(4); // field$244 = -1L; // if (!isNull$244) { // field$244 = acc.getLong(4); // } // isNull$240 = acc.isNullAt(0); // field$240 = -1L; // if (!isNull$240) { // field$240 = acc.getLong(0); // } // isNull$241 = acc.isNullAt(1); // field$241 = -1L; // if (!isNull$241) { // field$241 = acc.getLong(1); // } // isNull$243 = acc.isNullAt(3); // field$243 = -1L; // if (!isNull$243) { // field$243 = acc.getLong(3); // } // // // when namespace is null, the dataview is used in heap, no key and namespace set // if (namespace != null) { // distinctAcc_0_dataview.setCurrentNamespace(namespace); // distinct_view_0 = distinctAcc_0_dataview; // } else { // isNull$245 = acc.isNullAt(5); // field$245 = null; // if (!isNull$245) { // field$245 = ((org.apache.flink.table.data.binary.BinaryRawValueData) acc.getRawValue(5)); // } // distinct_view_0 = (org.apache.flink.table.api.dataview.MapView) field$245.getJavaObject(); // } // // isNull$242 = acc.isNullAt(2); // field$242 = -1L; // if (!isNull$242) { // field$242 = acc.getLong(2); // } // // agg0_count1 = field$240; // ; // agg0_count1IsNull = isNull$240; // // // agg1_sum = field$241; // ; // agg1_sumIsNull = isNull$241; // // // agg2_max = field$242; // ; // agg2_maxIsNull = isNull$242; // // // agg3_min = field$243; // ; // agg3_minIsNull = isNull$243; // // // agg4_count = field$244; // ; // agg4_countIsNull = isNull$244; // // // } // // @Override // public org.apache.flink.table.data.RowData getAccumulators() throws Exception { // // // acc$239 = new org.apache.flink.table.data.GenericRowData(6); // // // if (agg0_count1IsNull) { // acc$239.setField(0, null); // } else { // acc$239.setField(0, agg0_count1); // } // // // if (agg1_sumIsNull) { // acc$239.setField(1, null); // } else { // acc$239.setField(1, agg1_sum); // } // // // if (agg2_maxIsNull) { // acc$239.setField(2, null); // } else { // acc$239.setField(2, agg2_max); // } // // // if (agg3_minIsNull) { // acc$239.setField(3, null); // } else { // acc$239.setField(3, agg3_min); // } // // // if (agg4_countIsNull) { // acc$239.setField(4, null); // } else { // acc$239.setField(4, agg4_count); // } // // // org.apache.flink.table.data.binary.BinaryRawValueData distinct_acc$238 = // org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(distinct_view_0); // // if (false) { // acc$239.setField(5, null); // } else { // acc$239.setField(5, distinct_acc$238); // } // // // return acc$239; // // } // // @Override // public org.apache.flink.table.data.RowData createAccumulators() throws Exception { // // // acc$237 = new org.apache.flink.table.data.GenericRowData(6); // // // if (false) { // acc$237.setField(0, null); // } else { // acc$237.setField(0, ((long) 0L)); // } // // // if (true) { // acc$237.setField(1, null); // } else { // acc$237.setField(1, ((long) -1L)); // } // // // if (true) { // acc$237.setField(2, null); // } else { // acc$237.setField(2, ((long) -1L)); // } // // // if (true) { // acc$237.setField(3, null); // } else { // acc$237.setField(3, ((long) -1L)); // } // // // if (false) { // acc$237.setField(4, null); // } else { // acc$237.setField(4, ((long) 0L)); // } // // // org.apache.flink.table.api.dataview.MapView mapview$236 = new org.apache.flink.table.api.dataview.MapView(); // org.apache.flink.table.data.binary.BinaryRawValueData distinct_acc$236 = // org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(mapview$236); // // if (false) { // acc$237.setField(5, null); // } else { // acc$237.setField(5, distinct_acc$236); // } // // // return acc$237; // // } // // @Override // public org.apache.flink.table.data.RowData getValue(Object ns) throws Exception { // namespace = (java.lang.Long) ns; // // // aggValue$299 = new org.apache.flink.table.data.GenericRowData(7); // // // if (agg0_count1IsNull) { // aggValue$299.setField(0, null); // } else { // aggValue$299.setField(0, agg0_count1); // } // // // if (agg1_sumIsNull) { // aggValue$299.setField(1, null); // } else { // aggValue$299.setField(1, agg1_sum); // } // // // if (agg2_maxIsNull) { // aggValue$299.setField(2, null); // } else { // aggValue$299.setField(2, agg2_max); // } // // // if (agg3_minIsNull) { // aggValue$299.setField(3, null); // } else { // aggValue$299.setField(3, agg3_min); // } // // // if (agg4_countIsNull) { // aggValue$299.setField(4, null); // } else { // aggValue$299.setField(4, agg4_count); // } // // // if (false) { // aggValue$299.setField(5, null); // } else { // aggValue$299.setField(5, org.apache.flink.table.data.TimestampData // .fromEpochMillis(sliceAssigner$233.getWindowStart(namespace))); // } // // // if (false) { // aggValue$299.setField(6, null); // } else { // aggValue$299.setField(6, org.apache.flink.table.data.TimestampData.fromEpochMillis(namespace)); // } // // // return aggValue$299; // // } // // @Override // public void cleanup(Object ns) throws Exception { // namespace = (java.lang.Long) ns; // // distinctAcc_0_dataview.setCurrentNamespace(namespace); // distinctAcc_0_dataview.clear(); // // // } // // @Override // public void close() throws Exception { // // } //} ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_04_window_agg/_02_cumulate_window/cumulate/local_agg/KeyProjection$89.java ================================================ package flink.examples.sql._07.query._04_window_agg._02_cumulate_window.cumulate.local_agg; public class KeyProjection$89 implements org.apache.flink.table.runtime.generated.Projection { org.apache.flink.table.data.binary.BinaryRowData out = new org.apache.flink.table.data.binary.BinaryRowData(2); org.apache.flink.table.data.writer.BinaryRowWriter outWriter = new org.apache.flink.table.data.writer.BinaryRowWriter(out); public KeyProjection$89(Object[] references) throws Exception { } @Override public org.apache.flink.table.data.binary.BinaryRowData apply(org.apache.flink.table.data.RowData in1) { org.apache.flink.table.data.binary.BinaryStringData field$90; boolean isNull$90; int field$91; boolean isNull$91; outWriter.reset(); isNull$90 = in1.isNullAt(0); field$90 = org.apache.flink.table.data.binary.BinaryStringData.EMPTY_UTF8; if (!isNull$90) { field$90 = ((org.apache.flink.table.data.binary.BinaryStringData) in1.getString(0)); } if (isNull$90) { outWriter.setNullAt(0); } else { outWriter.writeString(0, field$90); } isNull$91 = in1.isNullAt(1); field$91 = -1; if (!isNull$91) { field$91 = in1.getInt(1); } if (isNull$91) { outWriter.setNullAt(1); } else { outWriter.writeInt(1, field$91); } outWriter.complete(); return out; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_04_window_agg/_02_cumulate_window/cumulate/local_agg/LocalWindowAggsHandler$88.java ================================================ //package flink.examples.sql._07.query._04_window_agg._02_cumulate_window.cumulate.local_agg; // // //public final class LocalWindowAggsHandler$88 // implements org.apache.flink.table.runtime.generated.NamespaceAggsHandleFunction { // // private transient org.apache.flink.table.runtime.operators.window.slicing.SliceAssigners.CumulativeSliceAssigner // sliceAssigner$21; // long agg0_count1; // boolean agg0_count1IsNull; // long agg1_sum; // boolean agg1_sumIsNull; // long agg2_max; // boolean agg2_maxIsNull; // long agg3_min; // boolean agg3_minIsNull; // long agg4_count; // boolean agg4_countIsNull; // private org.apache.flink.table.api.dataview.MapView distinct_view_0; // org.apache.flink.table.data.GenericRowData acc$23 = new org.apache.flink.table.data.GenericRowData(6); // org.apache.flink.table.data.GenericRowData acc$25 = new org.apache.flink.table.data.GenericRowData(6); // private org.apache.flink.table.api.dataview.MapView otherMapView$77; // private transient org.apache.flink.table.data.conversion.RawObjectConverter converter$78; // org.apache.flink.table.data.GenericRowData aggValue$87 = new org.apache.flink.table.data.GenericRowData(5); // // private org.apache.flink.table.runtime.dataview.StateDataViewStore store; // // private java.lang.Long namespace; // // public LocalWindowAggsHandler$88(Object[] references) throws Exception { // sliceAssigner$21 = // (((org.apache.flink.table.runtime.operators.window.slicing.SliceAssigners.CumulativeSliceAssigner) references[0])); // converter$78 = (((org.apache.flink.table.data.conversion.RawObjectConverter) references[1])); // } // // private org.apache.flink.api.common.functions.RuntimeContext getRuntimeContext() { // return store.getRuntimeContext(); // } // // @Override // public void open(org.apache.flink.table.runtime.dataview.StateDataViewStore store) throws Exception { // this.store = store; // // converter$78.open(getRuntimeContext().getUserCodeClassLoader()); // // } // // @Override // public void accumulate(org.apache.flink.table.data.RowData accInput) throws Exception { // // boolean isNull$32; // long result$33; // long field$34; // boolean isNull$34; // boolean isNull$35; // long result$36; // boolean isNull$39; // boolean result$40; // boolean isNull$44; // boolean result$45; // long field$49; // boolean isNull$49; // boolean isNull$51; // long result$52; // isNull$34 = accInput.isNullAt(2); // field$34 = -1L; // if (!isNull$34) { // field$34 = accInput.getLong(2); // } // isNull$49 = accInput.isNullAt(3); // field$49 = -1L; // if (!isNull$49) { // field$49 = accInput.getLong(3); // } // // // isNull$32 = agg0_count1IsNull || false; // result$33 = -1L; // if (!isNull$32) { // // result$33 = (long) (agg0_count1 + ((long) 1L)); // // } // // agg0_count1 = result$33; // ; // agg0_count1IsNull = isNull$32; // // // long result$38 = -1L; // boolean isNull$38; // if (isNull$34) { // // isNull$38 = agg1_sumIsNull; // if (!isNull$38) { // result$38 = agg1_sum; // } // } else { // long result$37 = -1L; // boolean isNull$37; // if (agg1_sumIsNull) { // // isNull$37 = isNull$34; // if (!isNull$37) { // result$37 = field$34; // } // } else { // // // isNull$35 = agg1_sumIsNull || isNull$34; // result$36 = -1L; // if (!isNull$35) { // // result$36 = (long) (agg1_sum + field$34); // // } // // isNull$37 = isNull$35; // if (!isNull$37) { // result$37 = result$36; // } // } // isNull$38 = isNull$37; // if (!isNull$38) { // result$38 = result$37; // } // } // agg1_sum = result$38; // ; // agg1_sumIsNull = isNull$38; // // // long result$43 = -1L; // boolean isNull$43; // if (isNull$34) { // // isNull$43 = agg2_maxIsNull; // if (!isNull$43) { // result$43 = agg2_max; // } // } else { // long result$42 = -1L; // boolean isNull$42; // if (agg2_maxIsNull) { // // isNull$42 = isNull$34; // if (!isNull$42) { // result$42 = field$34; // } // } else { // isNull$39 = isNull$34 || agg2_maxIsNull; // result$40 = false; // if (!isNull$39) { // // result$40 = field$34 > agg2_max; // // } // // long result$41 = -1L; // boolean isNull$41; // if (result$40) { // // isNull$41 = isNull$34; // if (!isNull$41) { // result$41 = field$34; // } // } else { // // isNull$41 = agg2_maxIsNull; // if (!isNull$41) { // result$41 = agg2_max; // } // } // isNull$42 = isNull$41; // if (!isNull$42) { // result$42 = result$41; // } // } // isNull$43 = isNull$42; // if (!isNull$43) { // result$43 = result$42; // } // } // agg2_max = result$43; // ; // agg2_maxIsNull = isNull$43; // // // long result$48 = -1L; // boolean isNull$48; // if (isNull$34) { // // isNull$48 = agg3_minIsNull; // if (!isNull$48) { // result$48 = agg3_min; // } // } else { // long result$47 = -1L; // boolean isNull$47; // if (agg3_minIsNull) { // // isNull$47 = isNull$34; // if (!isNull$47) { // result$47 = field$34; // } // } else { // isNull$44 = isNull$34 || agg3_minIsNull; // result$45 = false; // if (!isNull$44) { // // result$45 = field$34 < agg3_min; // // } // // long result$46 = -1L; // boolean isNull$46; // if (result$45) { // // isNull$46 = isNull$34; // if (!isNull$46) { // result$46 = field$34; // } // } else { // // isNull$46 = agg3_minIsNull; // if (!isNull$46) { // result$46 = agg3_min; // } // } // isNull$47 = isNull$46; // if (!isNull$47) { // result$47 = result$46; // } // } // isNull$48 = isNull$47; // if (!isNull$48) { // result$48 = result$47; // } // } // agg3_min = result$48; // ; // agg3_minIsNull = isNull$48; // // // java.lang.Long distinctKey$50 = (java.lang.Long) field$49; // if (isNull$49) { // distinctKey$50 = null; // } // // java.lang.Long value$54 = (java.lang.Long) distinct_view_0.get(distinctKey$50); // if (value$54 == null) { // value$54 = 0L; // } // // boolean is_distinct_value_changed_0 = false; // // long existed$55 = ((long) value$54) & (1L << 0); // if (existed$55 == 0) { // not existed // value$54 = ((long) value$54) | (1L << 0); // is_distinct_value_changed_0 = true; // // long result$53 = -1L; // boolean isNull$53; // if (isNull$49) { // // isNull$53 = agg4_countIsNull; // if (!isNull$53) { // result$53 = agg4_count; // } // } else { // // // isNull$51 = agg4_countIsNull || false; // result$52 = -1L; // if (!isNull$51) { // // result$52 = (long) (agg4_count + ((long) 1L)); // // } // // isNull$53 = isNull$51; // if (!isNull$53) { // result$53 = result$52; // } // } // agg4_count = result$53; // ; // agg4_countIsNull = isNull$53; // // } // // if (is_distinct_value_changed_0) { // distinct_view_0.put(distinctKey$50, value$54); // } // // // } // // @Override // public void retract(org.apache.flink.table.data.RowData retractInput) throws Exception { // // throw new java.lang.RuntimeException( // "This function not require retract method, but the retract method is called."); // // } // // @Override // public void merge(Object ns, org.apache.flink.table.data.RowData otherAcc) throws Exception { // namespace = (java.lang.Long) ns; // // long field$56; // boolean isNull$56; // boolean isNull$57; // long result$58; // long field$59; // boolean isNull$59; // boolean isNull$60; // long result$61; // long field$64; // boolean isNull$64; // boolean isNull$65; // boolean result$66; // long field$70; // boolean isNull$70; // boolean isNull$71; // boolean result$72; // org.apache.flink.table.data.binary.BinaryRawValueData field$76; // boolean isNull$76; // boolean isNull$82; // long result$83; // isNull$64 = otherAcc.isNullAt(2); // field$64 = -1L; // if (!isNull$64) { // field$64 = otherAcc.getLong(2); // } // isNull$59 = otherAcc.isNullAt(1); // field$59 = -1L; // if (!isNull$59) { // field$59 = otherAcc.getLong(1); // } // isNull$56 = otherAcc.isNullAt(0); // field$56 = -1L; // if (!isNull$56) { // field$56 = otherAcc.getLong(0); // } // isNull$70 = otherAcc.isNullAt(3); // field$70 = -1L; // if (!isNull$70) { // field$70 = otherAcc.getLong(3); // } // // isNull$76 = otherAcc.isNullAt(5); // field$76 = null; // if (!isNull$76) { // field$76 = ((org.apache.flink.table.data.binary.BinaryRawValueData) otherAcc.getRawValue(5)); // } // otherMapView$77 = null; // if (!isNull$76) { // otherMapView$77 = // (org.apache.flink.table.api.dataview.MapView) converter$78 // .toExternal((org.apache.flink.table.data.binary.BinaryRawValueData) field$76); // } // // // isNull$57 = agg0_count1IsNull || isNull$56; // result$58 = -1L; // if (!isNull$57) { // // result$58 = (long) (agg0_count1 + field$56); // // } // // agg0_count1 = result$58; // ; // agg0_count1IsNull = isNull$57; // // // long result$63 = -1L; // boolean isNull$63; // if (isNull$59) { // // isNull$63 = agg1_sumIsNull; // if (!isNull$63) { // result$63 = agg1_sum; // } // } else { // long result$62 = -1L; // boolean isNull$62; // if (agg1_sumIsNull) { // // isNull$62 = isNull$59; // if (!isNull$62) { // result$62 = field$59; // } // } else { // // // isNull$60 = agg1_sumIsNull || isNull$59; // result$61 = -1L; // if (!isNull$60) { // // result$61 = (long) (agg1_sum + field$59); // // } // // isNull$62 = isNull$60; // if (!isNull$62) { // result$62 = result$61; // } // } // isNull$63 = isNull$62; // if (!isNull$63) { // result$63 = result$62; // } // } // agg1_sum = result$63; // ; // agg1_sumIsNull = isNull$63; // // // long result$69 = -1L; // boolean isNull$69; // if (isNull$64) { // // isNull$69 = agg2_maxIsNull; // if (!isNull$69) { // result$69 = agg2_max; // } // } else { // long result$68 = -1L; // boolean isNull$68; // if (agg2_maxIsNull) { // // isNull$68 = isNull$64; // if (!isNull$68) { // result$68 = field$64; // } // } else { // isNull$65 = isNull$64 || agg2_maxIsNull; // result$66 = false; // if (!isNull$65) { // // result$66 = field$64 > agg2_max; // // } // // long result$67 = -1L; // boolean isNull$67; // if (result$66) { // // isNull$67 = isNull$64; // if (!isNull$67) { // result$67 = field$64; // } // } else { // // isNull$67 = agg2_maxIsNull; // if (!isNull$67) { // result$67 = agg2_max; // } // } // isNull$68 = isNull$67; // if (!isNull$68) { // result$68 = result$67; // } // } // isNull$69 = isNull$68; // if (!isNull$69) { // result$69 = result$68; // } // } // agg2_max = result$69; // ; // agg2_maxIsNull = isNull$69; // // // long result$75 = -1L; // boolean isNull$75; // if (isNull$70) { // // isNull$75 = agg3_minIsNull; // if (!isNull$75) { // result$75 = agg3_min; // } // } else { // long result$74 = -1L; // boolean isNull$74; // if (agg3_minIsNull) { // // isNull$74 = isNull$70; // if (!isNull$74) { // result$74 = field$70; // } // } else { // isNull$71 = isNull$70 || agg3_minIsNull; // result$72 = false; // if (!isNull$71) { // // result$72 = field$70 < agg3_min; // // } // // long result$73 = -1L; // boolean isNull$73; // if (result$72) { // // isNull$73 = isNull$70; // if (!isNull$73) { // result$73 = field$70; // } // } else { // // isNull$73 = agg3_minIsNull; // if (!isNull$73) { // result$73 = agg3_min; // } // } // isNull$74 = isNull$73; // if (!isNull$74) { // result$74 = result$73; // } // } // isNull$75 = isNull$74; // if (!isNull$75) { // result$75 = result$74; // } // } // agg3_min = result$75; // ; // agg3_minIsNull = isNull$75; // // // java.lang.Iterable otherEntries$85 = // (java.lang.Iterable) otherMapView$77.entries(); // if (otherEntries$85 != null) { // for (java.util.Map.Entry entry : otherEntries$85) { // java.lang.Long distinctKey$79 = (java.lang.Long) entry.getKey(); // long field$80 = -1L; // boolean isNull$81 = true; // if (distinctKey$79 != null) { // isNull$81 = false; // field$80 = (long) distinctKey$79; // } // java.lang.Long otherValue = (java.lang.Long) entry.getValue(); // java.lang.Long thisValue = (java.lang.Long) distinct_view_0.get(distinctKey$79); // if (thisValue == null) { // thisValue = 0L; // } // boolean is_distinct_value_changed_0 = false; // boolean is_distinct_value_empty_0 = false; // // // long existed$86 = ((long) thisValue) & (1L << 0); // if (existed$86 == 0) { // not existed // long otherExisted = ((long) otherValue) & (1L << 0); // if (otherExisted != 0) { // existed in other // is_distinct_value_changed_0 = true; // // do accumulate // // long result$84 = -1L; // boolean isNull$84; // if (isNull$81) { // // isNull$84 = agg4_countIsNull; // if (!isNull$84) { // result$84 = agg4_count; // } // } else { // // // isNull$82 = agg4_countIsNull || false; // result$83 = -1L; // if (!isNull$82) { // // result$83 = (long) (agg4_count + ((long) 1L)); // // } // // isNull$84 = isNull$82; // if (!isNull$84) { // result$84 = result$83; // } // } // agg4_count = result$84; // ; // agg4_countIsNull = isNull$84; // // } // } // // thisValue = ((long) thisValue) | ((long) otherValue); // is_distinct_value_empty_0 = false; // // if (is_distinct_value_empty_0) { // distinct_view_0.remove(distinctKey$79); // } else if (is_distinct_value_changed_0) { // value is not empty and is changed, do update // distinct_view_0.put(distinctKey$79, thisValue); // } // } // end foreach // } // end otherEntries != null // // // } // // @Override // public void setAccumulators(Object ns, org.apache.flink.table.data.RowData acc) // throws Exception { // namespace = (java.lang.Long) ns; // // long field$26; // boolean isNull$26; // long field$27; // boolean isNull$27; // long field$28; // boolean isNull$28; // long field$29; // boolean isNull$29; // long field$30; // boolean isNull$30; // org.apache.flink.table.data.binary.BinaryRawValueData field$31; // boolean isNull$31; // isNull$30 = acc.isNullAt(4); // field$30 = -1L; // if (!isNull$30) { // field$30 = acc.getLong(4); // } // isNull$26 = acc.isNullAt(0); // field$26 = -1L; // if (!isNull$26) { // field$26 = acc.getLong(0); // } // isNull$27 = acc.isNullAt(1); // field$27 = -1L; // if (!isNull$27) { // field$27 = acc.getLong(1); // } // isNull$29 = acc.isNullAt(3); // field$29 = -1L; // if (!isNull$29) { // field$29 = acc.getLong(3); // } // // isNull$31 = acc.isNullAt(5); // field$31 = null; // if (!isNull$31) { // field$31 = ((org.apache.flink.table.data.binary.BinaryRawValueData) acc.getRawValue(5)); // } // distinct_view_0 = (org.apache.flink.table.api.dataview.MapView) field$31.getJavaObject(); // // isNull$28 = acc.isNullAt(2); // field$28 = -1L; // if (!isNull$28) { // field$28 = acc.getLong(2); // } // // agg0_count1 = field$26; // ; // agg0_count1IsNull = isNull$26; // // // agg1_sum = field$27; // ; // agg1_sumIsNull = isNull$27; // // // agg2_max = field$28; // ; // agg2_maxIsNull = isNull$28; // // // agg3_min = field$29; // ; // agg3_minIsNull = isNull$29; // // // agg4_count = field$30; // ; // agg4_countIsNull = isNull$30; // // // } // // @Override // public org.apache.flink.table.data.RowData getAccumulators() throws Exception { // // // acc$25 = new org.apache.flink.table.data.GenericRowData(6); // // // if (agg0_count1IsNull) { // acc$25.setField(0, null); // } else { // acc$25.setField(0, agg0_count1); // } // // // if (agg1_sumIsNull) { // acc$25.setField(1, null); // } else { // acc$25.setField(1, agg1_sum); // } // // // if (agg2_maxIsNull) { // acc$25.setField(2, null); // } else { // acc$25.setField(2, agg2_max); // } // // // if (agg3_minIsNull) { // acc$25.setField(3, null); // } else { // acc$25.setField(3, agg3_min); // } // // // if (agg4_countIsNull) { // acc$25.setField(4, null); // } else { // acc$25.setField(4, agg4_count); // } // // // org.apache.flink.table.data.binary.BinaryRawValueData distinct_acc$24 = // org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(distinct_view_0); // // if (false) { // acc$25.setField(5, null); // } else { // acc$25.setField(5, distinct_acc$24); // } // // // return acc$25; // // } // // @Override // public org.apache.flink.table.data.RowData createAccumulators() throws Exception { // // // acc$23 = new org.apache.flink.table.data.GenericRowData(6); // // // if (false) { // acc$23.setField(0, null); // } else { // acc$23.setField(0, ((long) 0L)); // } // // // if (true) { // acc$23.setField(1, null); // } else { // acc$23.setField(1, ((long) -1L)); // } // // // if (true) { // acc$23.setField(2, null); // } else { // acc$23.setField(2, ((long) -1L)); // } // // // if (true) { // acc$23.setField(3, null); // } else { // acc$23.setField(3, ((long) -1L)); // } // // // if (false) { // acc$23.setField(4, null); // } else { // acc$23.setField(4, ((long) 0L)); // } // // // org.apache.flink.table.api.dataview.MapView mapview$22 = new org.apache.flink.table.api.dataview.MapView(); // org.apache.flink.table.data.binary.BinaryRawValueData distinct_acc$22 = // org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(mapview$22); // // if (false) { // acc$23.setField(5, null); // } else { // acc$23.setField(5, distinct_acc$22); // } // // // return acc$23; // // } // // @Override // public org.apache.flink.table.data.RowData getValue(Object ns) throws Exception { // namespace = (java.lang.Long) ns; // // // aggValue$87 = new org.apache.flink.table.data.GenericRowData(5); // // // if (agg0_count1IsNull) { // aggValue$87.setField(0, null); // } else { // aggValue$87.setField(0, agg0_count1); // } // // // if (agg1_sumIsNull) { // aggValue$87.setField(1, null); // } else { // aggValue$87.setField(1, agg1_sum); // } // // // if (agg2_maxIsNull) { // aggValue$87.setField(2, null); // } else { // aggValue$87.setField(2, agg2_max); // } // // // if (agg3_minIsNull) { // aggValue$87.setField(3, null); // } else { // aggValue$87.setField(3, agg3_min); // } // // // if (agg4_countIsNull) { // aggValue$87.setField(4, null); // } else { // aggValue$87.setField(4, agg4_count); // } // // // return aggValue$87; // // } // // @Override // public void cleanup(Object ns) throws Exception { // namespace = (java.lang.Long) ns; // // // } // // @Override // public void close() throws Exception { // // } //} ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_04_window_agg/_02_cumulate_window/earlyfire/GroupAggsHandler$210.java ================================================ package flink.examples.sql._07.query._04_window_agg._02_cumulate_window.earlyfire; /** * {@link org.apache.flink.streaming.api.operators.KeyedProcessOperator} */ public final class GroupAggsHandler$210 implements org.apache.flink.table.runtime.generated.AggsHandleFunction { long agg0_sum; boolean agg0_sumIsNull; long agg0_count; boolean agg0_countIsNull; long agg1_sum; boolean agg1_sumIsNull; long agg1_count; boolean agg1_countIsNull; private transient org.apache.flink.table.planner.functions.aggfunctions.MaxWithRetractAggFunction function_org$apache$flink$table$planner$functions$aggfunctions$MaxWithRetractAggFunction$d78f624eeff2a86742b5f64899608448; private transient org.apache.flink.table.runtime.typeutils.ExternalSerializer externalSerializer$105; private transient org.apache.flink.table.runtime.typeutils.ExternalSerializer externalSerializer$106; private org.apache.flink.table.runtime.dataview.StateMapView agg2$map_dataview; private org.apache.flink.table.data.binary.BinaryRawValueData agg2$map_dataview_raw_value; private org.apache.flink.table.runtime.dataview.StateMapView agg2$map_dataview_backup; private org.apache.flink.table.data.binary.BinaryRawValueData agg2$map_dataview_backup_raw_value; private transient org.apache.flink.table.planner.functions.aggfunctions.MinWithRetractAggFunction function_org$apache$flink$table$planner$functions$aggfunctions$MinWithRetractAggFunction$00780063e1d540e25ad535dd2f326396; private org.apache.flink.table.runtime.dataview.StateMapView agg3$map_dataview; private org.apache.flink.table.data.binary.BinaryRawValueData agg3$map_dataview_raw_value; private org.apache.flink.table.runtime.dataview.StateMapView agg3$map_dataview_backup; private org.apache.flink.table.data.binary.BinaryRawValueData agg3$map_dataview_backup_raw_value; long agg4_sum; boolean agg4_sumIsNull; long agg4_count; boolean agg4_countIsNull; private org.apache.flink.table.runtime.dataview.StateMapView agg5$map_dataview; private org.apache.flink.table.data.binary.BinaryRawValueData agg5$map_dataview_raw_value; private org.apache.flink.table.runtime.dataview.StateMapView agg5$map_dataview_backup; private org.apache.flink.table.data.binary.BinaryRawValueData agg5$map_dataview_backup_raw_value; long agg6_count1; boolean agg6_count1IsNull; private transient org.apache.flink.table.data.conversion.StructuredObjectConverter converter$107; private transient org.apache.flink.table.data.conversion.StructuredObjectConverter converter$109; org.apache.flink.table.data.GenericRowData acc$112 = new org.apache.flink.table.data.GenericRowData(10); org.apache.flink.table.data.GenericRowData acc$113 = new org.apache.flink.table.data.GenericRowData(10); org.apache.flink.table.data.UpdatableRowData field$119; private org.apache.flink.table.data.RowData agg2_acc_internal; private org.apache.flink.table.planner.functions.aggfunctions.MaxWithRetractAggFunction.MaxWithRetractAccumulator agg2_acc_external; org.apache.flink.table.data.UpdatableRowData field$121; private org.apache.flink.table.data.RowData agg3_acc_internal; private org.apache.flink.table.planner.functions.aggfunctions.MinWithRetractAggFunction.MinWithRetractAccumulator agg3_acc_external; org.apache.flink.table.data.UpdatableRowData field$125; private org.apache.flink.table.data.RowData agg5_acc_internal; private org.apache.flink.table.planner.functions.aggfunctions.MaxWithRetractAggFunction.MaxWithRetractAccumulator agg5_acc_external; org.apache.flink.table.data.GenericRowData aggValue$209 = new org.apache.flink.table.data.GenericRowData(6); private org.apache.flink.table.runtime.dataview.StateDataViewStore store; public GroupAggsHandler$210(java.lang.Object[] references) throws Exception { function_org$apache$flink$table$planner$functions$aggfunctions$MaxWithRetractAggFunction$d78f624eeff2a86742b5f64899608448 = (((org.apache.flink.table.planner.functions.aggfunctions.MaxWithRetractAggFunction) references[0])); externalSerializer$105 = (((org.apache.flink.table.runtime.typeutils.ExternalSerializer) references[1])); externalSerializer$106 = (((org.apache.flink.table.runtime.typeutils.ExternalSerializer) references[2])); function_org$apache$flink$table$planner$functions$aggfunctions$MinWithRetractAggFunction$00780063e1d540e25ad535dd2f326396 = (((org.apache.flink.table.planner.functions.aggfunctions.MinWithRetractAggFunction) references[3])); function_org$apache$flink$table$planner$functions$aggfunctions$MaxWithRetractAggFunction$d78f624eeff2a86742b5f64899608448 = (((org.apache.flink.table.planner.functions.aggfunctions.MaxWithRetractAggFunction) references[4])); converter$107 = (((org.apache.flink.table.data.conversion.StructuredObjectConverter) references[5])); converter$109 = (((org.apache.flink.table.data.conversion.StructuredObjectConverter) references[6])); } private org.apache.flink.api.common.functions.RuntimeContext getRuntimeContext() { return store.getRuntimeContext(); } @Override public void open(org.apache.flink.table.runtime.dataview.StateDataViewStore store) throws Exception { this.store = store; function_org$apache$flink$table$planner$functions$aggfunctions$MaxWithRetractAggFunction$d78f624eeff2a86742b5f64899608448 .open(new org.apache.flink.table.functions.FunctionContext(store.getRuntimeContext())); agg2$map_dataview = (org.apache.flink.table.runtime.dataview.StateMapView) store .getStateMapView("agg2$map", false, externalSerializer$105, externalSerializer$106); agg2$map_dataview_raw_value = org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(agg2$map_dataview); agg2$map_dataview_backup = (org.apache.flink.table.runtime.dataview.StateMapView) store .getStateMapView("agg2$map", false, externalSerializer$105, externalSerializer$106); agg2$map_dataview_backup_raw_value = org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(agg2$map_dataview_backup); function_org$apache$flink$table$planner$functions$aggfunctions$MinWithRetractAggFunction$00780063e1d540e25ad535dd2f326396 .open(new org.apache.flink.table.functions.FunctionContext(store.getRuntimeContext())); agg3$map_dataview = (org.apache.flink.table.runtime.dataview.StateMapView) store .getStateMapView("agg3$map", false, externalSerializer$105, externalSerializer$106); agg3$map_dataview_raw_value = org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(agg3$map_dataview); agg3$map_dataview_backup = (org.apache.flink.table.runtime.dataview.StateMapView) store .getStateMapView("agg3$map", false, externalSerializer$105, externalSerializer$106); agg3$map_dataview_backup_raw_value = org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(agg3$map_dataview_backup); agg5$map_dataview = (org.apache.flink.table.runtime.dataview.StateMapView) store .getStateMapView("agg5$map", false, externalSerializer$105, externalSerializer$106); agg5$map_dataview_raw_value = org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(agg5$map_dataview); agg5$map_dataview_backup = (org.apache.flink.table.runtime.dataview.StateMapView) store .getStateMapView("agg5$map", false, externalSerializer$105, externalSerializer$106); agg5$map_dataview_backup_raw_value = org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(agg5$map_dataview_backup); converter$107.open(getRuntimeContext().getUserCodeClassLoader()); converter$109.open(getRuntimeContext().getUserCodeClassLoader()); } @Override public void accumulate(org.apache.flink.table.data.RowData accInput) throws Exception { long field$127; boolean isNull$127; boolean isNull$128; long result$129; boolean isNull$132; long result$133; long field$135; boolean isNull$135; boolean isNull$136; long result$137; boolean isNull$140; long result$141; long field$143; boolean isNull$143; long field$144; boolean isNull$144; long field$145; boolean isNull$145; boolean isNull$146; long result$147; boolean isNull$150; long result$151; long field$153; boolean isNull$153; boolean isNull$154; long result$155; isNull$144 = accInput.isNullAt(5); field$144 = -1L; if (!isNull$144) { field$144 = accInput.getLong(5); } isNull$127 = accInput.isNullAt(2); field$127 = -1L; if (!isNull$127) { field$127 = accInput.getLong(2); } isNull$143 = accInput.isNullAt(4); field$143 = -1L; if (!isNull$143) { field$143 = accInput.getLong(4); } isNull$135 = accInput.isNullAt(3); field$135 = -1L; if (!isNull$135) { field$135 = accInput.getLong(3); } isNull$145 = accInput.isNullAt(6); field$145 = -1L; if (!isNull$145) { field$145 = accInput.getLong(6); } isNull$153 = accInput.isNullAt(1); field$153 = -1L; if (!isNull$153) { field$153 = accInput.getLong(1); } long result$131 = -1L; boolean isNull$131; if (isNull$127) { isNull$131 = agg0_sumIsNull; if (!isNull$131) { result$131 = agg0_sum; } } else { long result$130 = -1L; boolean isNull$130; if (agg0_sumIsNull) { isNull$130 = isNull$127; if (!isNull$130) { result$130 = field$127; } } else { isNull$128 = agg0_sumIsNull || isNull$127; result$129 = -1L; if (!isNull$128) { result$129 = (long) (agg0_sum + field$127); } isNull$130 = isNull$128; if (!isNull$130) { result$130 = result$129; } } isNull$131 = isNull$130; if (!isNull$131) { result$131 = result$130; } } agg0_sum = result$131; ; agg0_sumIsNull = isNull$131; long result$134 = -1L; boolean isNull$134; if (isNull$127) { isNull$134 = agg0_countIsNull; if (!isNull$134) { result$134 = agg0_count; } } else { isNull$132 = agg0_countIsNull || false; result$133 = -1L; if (!isNull$132) { result$133 = (long) (agg0_count + ((long) 1L)); } isNull$134 = isNull$132; if (!isNull$134) { result$134 = result$133; } } agg0_count = result$134; ; agg0_countIsNull = isNull$134; long result$139 = -1L; boolean isNull$139; if (isNull$135) { isNull$139 = agg1_sumIsNull; if (!isNull$139) { result$139 = agg1_sum; } } else { long result$138 = -1L; boolean isNull$138; if (agg1_sumIsNull) { isNull$138 = isNull$135; if (!isNull$138) { result$138 = field$135; } } else { isNull$136 = agg1_sumIsNull || isNull$135; result$137 = -1L; if (!isNull$136) { result$137 = (long) (agg1_sum + field$135); } isNull$138 = isNull$136; if (!isNull$138) { result$138 = result$137; } } isNull$139 = isNull$138; if (!isNull$139) { result$139 = result$138; } } agg1_sum = result$139; ; agg1_sumIsNull = isNull$139; long result$142 = -1L; boolean isNull$142; if (isNull$135) { isNull$142 = agg1_countIsNull; if (!isNull$142) { result$142 = agg1_count; } } else { isNull$140 = agg1_countIsNull || false; result$141 = -1L; if (!isNull$140) { result$141 = (long) (agg1_count + ((long) 1L)); } isNull$142 = isNull$140; if (!isNull$142) { result$142 = result$141; } } agg1_count = result$142; ; agg1_countIsNull = isNull$142; function_org$apache$flink$table$planner$functions$aggfunctions$MaxWithRetractAggFunction$d78f624eeff2a86742b5f64899608448 .accumulate(agg2_acc_external, isNull$143 ? null : ((java.lang.Long) field$143)); function_org$apache$flink$table$planner$functions$aggfunctions$MinWithRetractAggFunction$00780063e1d540e25ad535dd2f326396 .accumulate(agg3_acc_external, isNull$144 ? null : ((java.lang.Long) field$144)); long result$149 = -1L; boolean isNull$149; if (isNull$145) { isNull$149 = agg4_sumIsNull; if (!isNull$149) { result$149 = agg4_sum; } } else { long result$148 = -1L; boolean isNull$148; if (agg4_sumIsNull) { isNull$148 = isNull$145; if (!isNull$148) { result$148 = field$145; } } else { isNull$146 = agg4_sumIsNull || isNull$145; result$147 = -1L; if (!isNull$146) { result$147 = (long) (agg4_sum + field$145); } isNull$148 = isNull$146; if (!isNull$148) { result$148 = result$147; } } isNull$149 = isNull$148; if (!isNull$149) { result$149 = result$148; } } agg4_sum = result$149; ; agg4_sumIsNull = isNull$149; long result$152 = -1L; boolean isNull$152; if (isNull$145) { isNull$152 = agg4_countIsNull; if (!isNull$152) { result$152 = agg4_count; } } else { isNull$150 = agg4_countIsNull || false; result$151 = -1L; if (!isNull$150) { result$151 = (long) (agg4_count + ((long) 1L)); } isNull$152 = isNull$150; if (!isNull$152) { result$152 = result$151; } } agg4_count = result$152; ; agg4_countIsNull = isNull$152; function_org$apache$flink$table$planner$functions$aggfunctions$MaxWithRetractAggFunction$d78f624eeff2a86742b5f64899608448 .accumulate(agg5_acc_external, isNull$153 ? null : ((java.lang.Long) field$153)); isNull$154 = agg6_count1IsNull || false; result$155 = -1L; if (!isNull$154) { result$155 = (long) (agg6_count1 + ((long) 1L)); } agg6_count1 = result$155; ; agg6_count1IsNull = isNull$154; } @Override public void retract(org.apache.flink.table.data.RowData retractInput) throws Exception { long field$156; boolean isNull$156; boolean isNull$157; long result$158; boolean isNull$159; long result$160; boolean isNull$163; long result$164; long field$166; boolean isNull$166; boolean isNull$167; long result$168; boolean isNull$169; long result$170; boolean isNull$173; long result$174; long field$176; boolean isNull$176; long field$177; boolean isNull$177; long field$178; boolean isNull$178; boolean isNull$179; long result$180; boolean isNull$181; long result$182; boolean isNull$185; long result$186; long field$188; boolean isNull$188; boolean isNull$189; long result$190; isNull$166 = retractInput.isNullAt(3); field$166 = -1L; if (!isNull$166) { field$166 = retractInput.getLong(3); } isNull$156 = retractInput.isNullAt(2); field$156 = -1L; if (!isNull$156) { field$156 = retractInput.getLong(2); } isNull$178 = retractInput.isNullAt(6); field$178 = -1L; if (!isNull$178) { field$178 = retractInput.getLong(6); } isNull$176 = retractInput.isNullAt(4); field$176 = -1L; if (!isNull$176) { field$176 = retractInput.getLong(4); } isNull$177 = retractInput.isNullAt(5); field$177 = -1L; if (!isNull$177) { field$177 = retractInput.getLong(5); } isNull$188 = retractInput.isNullAt(1); field$188 = -1L; if (!isNull$188) { field$188 = retractInput.getLong(1); } long result$162 = -1L; boolean isNull$162; if (isNull$156) { isNull$162 = agg0_sumIsNull; if (!isNull$162) { result$162 = agg0_sum; } } else { long result$161 = -1L; boolean isNull$161; if (agg0_sumIsNull) { isNull$157 = false || isNull$156; result$158 = -1L; if (!isNull$157) { result$158 = (long) (((long) 0L) - field$156); } isNull$161 = isNull$157; if (!isNull$161) { result$161 = result$158; } } else { isNull$159 = agg0_sumIsNull || isNull$156; result$160 = -1L; if (!isNull$159) { result$160 = (long) (agg0_sum - field$156); } isNull$161 = isNull$159; if (!isNull$161) { result$161 = result$160; } } isNull$162 = isNull$161; if (!isNull$162) { result$162 = result$161; } } agg0_sum = result$162; ; agg0_sumIsNull = isNull$162; long result$165 = -1L; boolean isNull$165; if (isNull$156) { isNull$165 = agg0_countIsNull; if (!isNull$165) { result$165 = agg0_count; } } else { isNull$163 = agg0_countIsNull || false; result$164 = -1L; if (!isNull$163) { result$164 = (long) (agg0_count - ((long) 1L)); } isNull$165 = isNull$163; if (!isNull$165) { result$165 = result$164; } } agg0_count = result$165; ; agg0_countIsNull = isNull$165; long result$172 = -1L; boolean isNull$172; if (isNull$166) { isNull$172 = agg1_sumIsNull; if (!isNull$172) { result$172 = agg1_sum; } } else { long result$171 = -1L; boolean isNull$171; if (agg1_sumIsNull) { isNull$167 = false || isNull$166; result$168 = -1L; if (!isNull$167) { result$168 = (long) (((long) 0L) - field$166); } isNull$171 = isNull$167; if (!isNull$171) { result$171 = result$168; } } else { isNull$169 = agg1_sumIsNull || isNull$166; result$170 = -1L; if (!isNull$169) { result$170 = (long) (agg1_sum - field$166); } isNull$171 = isNull$169; if (!isNull$171) { result$171 = result$170; } } isNull$172 = isNull$171; if (!isNull$172) { result$172 = result$171; } } agg1_sum = result$172; ; agg1_sumIsNull = isNull$172; long result$175 = -1L; boolean isNull$175; if (isNull$166) { isNull$175 = agg1_countIsNull; if (!isNull$175) { result$175 = agg1_count; } } else { isNull$173 = agg1_countIsNull || false; result$174 = -1L; if (!isNull$173) { result$174 = (long) (agg1_count - ((long) 1L)); } isNull$175 = isNull$173; if (!isNull$175) { result$175 = result$174; } } agg1_count = result$175; ; agg1_countIsNull = isNull$175; function_org$apache$flink$table$planner$functions$aggfunctions$MaxWithRetractAggFunction$d78f624eeff2a86742b5f64899608448 .retract(agg2_acc_external, isNull$176 ? null : ((java.lang.Long) field$176)); function_org$apache$flink$table$planner$functions$aggfunctions$MinWithRetractAggFunction$00780063e1d540e25ad535dd2f326396 .retract(agg3_acc_external, isNull$177 ? null : ((java.lang.Long) field$177)); long result$184 = -1L; boolean isNull$184; if (isNull$178) { isNull$184 = agg4_sumIsNull; if (!isNull$184) { result$184 = agg4_sum; } } else { long result$183 = -1L; boolean isNull$183; if (agg4_sumIsNull) { isNull$179 = false || isNull$178; result$180 = -1L; if (!isNull$179) { result$180 = (long) (((long) 0L) - field$178); } isNull$183 = isNull$179; if (!isNull$183) { result$183 = result$180; } } else { isNull$181 = agg4_sumIsNull || isNull$178; result$182 = -1L; if (!isNull$181) { result$182 = (long) (agg4_sum - field$178); } isNull$183 = isNull$181; if (!isNull$183) { result$183 = result$182; } } isNull$184 = isNull$183; if (!isNull$184) { result$184 = result$183; } } agg4_sum = result$184; ; agg4_sumIsNull = isNull$184; long result$187 = -1L; boolean isNull$187; if (isNull$178) { isNull$187 = agg4_countIsNull; if (!isNull$187) { result$187 = agg4_count; } } else { isNull$185 = agg4_countIsNull || false; result$186 = -1L; if (!isNull$185) { result$186 = (long) (agg4_count - ((long) 1L)); } isNull$187 = isNull$185; if (!isNull$187) { result$187 = result$186; } } agg4_count = result$187; ; agg4_countIsNull = isNull$187; function_org$apache$flink$table$planner$functions$aggfunctions$MaxWithRetractAggFunction$d78f624eeff2a86742b5f64899608448 .retract(agg5_acc_external, isNull$188 ? null : ((java.lang.Long) field$188)); isNull$189 = agg6_count1IsNull || false; result$190 = -1L; if (!isNull$189) { result$190 = (long) (agg6_count1 - ((long) 1L)); } agg6_count1 = result$190; ; agg6_count1IsNull = isNull$189; } @Override public void merge(org.apache.flink.table.data.RowData otherAcc) throws Exception { throw new java.lang.RuntimeException("This function not require merge method, but the merge method is called."); } @Override public void setAccumulators(org.apache.flink.table.data.RowData acc) throws Exception { long field$114; boolean isNull$114; long field$115; boolean isNull$115; long field$116; boolean isNull$116; long field$117; boolean isNull$117; org.apache.flink.table.data.RowData field$118; boolean isNull$118; org.apache.flink.table.data.RowData field$120; boolean isNull$120; long field$122; boolean isNull$122; long field$123; boolean isNull$123; org.apache.flink.table.data.RowData field$124; boolean isNull$124; long field$126; boolean isNull$126; isNull$124 = acc.isNullAt(8); field$124 = null; if (!isNull$124) { field$124 = acc.getRow(8, 3); } field$125 = null; if (!isNull$124) { field$125 = new org.apache.flink.table.data.UpdatableRowData( field$124, 3); agg5$map_dataview_raw_value.setJavaObject(agg5$map_dataview); field$125.setField(2, agg5$map_dataview_raw_value); } isNull$126 = acc.isNullAt(9); field$126 = -1L; if (!isNull$126) { field$126 = acc.getLong(9); } isNull$122 = acc.isNullAt(6); field$122 = -1L; if (!isNull$122) { field$122 = acc.getLong(6); } isNull$118 = acc.isNullAt(4); field$118 = null; if (!isNull$118) { field$118 = acc.getRow(4, 3); } field$119 = null; if (!isNull$118) { field$119 = new org.apache.flink.table.data.UpdatableRowData( field$118, 3); agg2$map_dataview_raw_value.setJavaObject(agg2$map_dataview); field$119.setField(2, agg2$map_dataview_raw_value); } isNull$123 = acc.isNullAt(7); field$123 = -1L; if (!isNull$123) { field$123 = acc.getLong(7); } isNull$114 = acc.isNullAt(0); field$114 = -1L; if (!isNull$114) { field$114 = acc.getLong(0); } isNull$115 = acc.isNullAt(1); field$115 = -1L; if (!isNull$115) { field$115 = acc.getLong(1); } isNull$117 = acc.isNullAt(3); field$117 = -1L; if (!isNull$117) { field$117 = acc.getLong(3); } isNull$120 = acc.isNullAt(5); field$120 = null; if (!isNull$120) { field$120 = acc.getRow(5, 3); } field$121 = null; if (!isNull$120) { field$121 = new org.apache.flink.table.data.UpdatableRowData( field$120, 3); agg3$map_dataview_raw_value.setJavaObject(agg3$map_dataview); field$121.setField(2, agg3$map_dataview_raw_value); } isNull$116 = acc.isNullAt(2); field$116 = -1L; if (!isNull$116) { field$116 = acc.getLong(2); } agg0_sum = field$114; ; agg0_sumIsNull = isNull$114; agg0_count = field$115; ; agg0_countIsNull = isNull$115; agg1_sum = field$116; ; agg1_sumIsNull = isNull$116; agg1_count = field$117; ; agg1_countIsNull = isNull$117; agg2_acc_internal = field$119; agg2_acc_external = (org.apache.flink.table.planner.functions.aggfunctions.MaxWithRetractAggFunction.MaxWithRetractAccumulator) converter$107 .toExternal((org.apache.flink.table.data.RowData) agg2_acc_internal); agg3_acc_internal = field$121; agg3_acc_external = (org.apache.flink.table.planner.functions.aggfunctions.MinWithRetractAggFunction.MinWithRetractAccumulator) converter$109 .toExternal((org.apache.flink.table.data.RowData) agg3_acc_internal); agg4_sum = field$122; ; agg4_sumIsNull = isNull$122; agg4_count = field$123; ; agg4_countIsNull = isNull$123; agg5_acc_internal = field$125; agg5_acc_external = (org.apache.flink.table.planner.functions.aggfunctions.MaxWithRetractAggFunction.MaxWithRetractAccumulator) converter$107 .toExternal((org.apache.flink.table.data.RowData) agg5_acc_internal); agg6_count1 = field$126; ; agg6_count1IsNull = isNull$126; } @Override public void resetAccumulators() throws Exception { agg0_sum = ((long) -1L); agg0_sumIsNull = true; agg0_count = ((long) 0L); agg0_countIsNull = false; agg1_sum = ((long) -1L); agg1_sumIsNull = true; agg1_count = ((long) 0L); agg1_countIsNull = false; agg2_acc_external = (org.apache.flink.table.planner.functions.aggfunctions.MaxWithRetractAggFunction.MaxWithRetractAccumulator) function_org$apache$flink$table$planner$functions$aggfunctions$MaxWithRetractAggFunction$d78f624eeff2a86742b5f64899608448 .createAccumulator(); agg2_acc_internal = (org.apache.flink.table.data.RowData) converter$107.toInternalOrNull( (org.apache.flink.table.planner.functions.aggfunctions.MaxWithRetractAggFunction.MaxWithRetractAccumulator) agg2_acc_external); agg3_acc_external = (org.apache.flink.table.planner.functions.aggfunctions.MinWithRetractAggFunction.MinWithRetractAccumulator) function_org$apache$flink$table$planner$functions$aggfunctions$MinWithRetractAggFunction$00780063e1d540e25ad535dd2f326396 .createAccumulator(); agg3_acc_internal = (org.apache.flink.table.data.RowData) converter$109.toInternalOrNull( (org.apache.flink.table.planner.functions.aggfunctions.MinWithRetractAggFunction.MinWithRetractAccumulator) agg3_acc_external); agg4_sum = ((long) -1L); agg4_sumIsNull = true; agg4_count = ((long) 0L); agg4_countIsNull = false; agg5_acc_external = (org.apache.flink.table.planner.functions.aggfunctions.MaxWithRetractAggFunction.MaxWithRetractAccumulator) function_org$apache$flink$table$planner$functions$aggfunctions$MaxWithRetractAggFunction$d78f624eeff2a86742b5f64899608448 .createAccumulator(); agg5_acc_internal = (org.apache.flink.table.data.RowData) converter$107.toInternalOrNull( (org.apache.flink.table.planner.functions.aggfunctions.MaxWithRetractAggFunction.MaxWithRetractAccumulator) agg5_acc_external); agg6_count1 = ((long) 0L); agg6_count1IsNull = false; } @Override public org.apache.flink.table.data.RowData getAccumulators() throws Exception { acc$113 = new org.apache.flink.table.data.GenericRowData(10); if (agg0_sumIsNull) { acc$113.setField(0, null); } else { acc$113.setField(0, agg0_sum); } if (agg0_countIsNull) { acc$113.setField(1, null); } else { acc$113.setField(1, agg0_count); } if (agg1_sumIsNull) { acc$113.setField(2, null); } else { acc$113.setField(2, agg1_sum); } if (agg1_countIsNull) { acc$113.setField(3, null); } else { acc$113.setField(3, agg1_count); } agg2_acc_internal = (org.apache.flink.table.data.RowData) converter$107.toInternalOrNull( (org.apache.flink.table.planner.functions.aggfunctions.MaxWithRetractAggFunction.MaxWithRetractAccumulator) agg2_acc_external); if (false) { acc$113.setField(4, null); } else { acc$113.setField(4, agg2_acc_internal); } agg3_acc_internal = (org.apache.flink.table.data.RowData) converter$109.toInternalOrNull( (org.apache.flink.table.planner.functions.aggfunctions.MinWithRetractAggFunction.MinWithRetractAccumulator) agg3_acc_external); if (false) { acc$113.setField(5, null); } else { acc$113.setField(5, agg3_acc_internal); } if (agg4_sumIsNull) { acc$113.setField(6, null); } else { acc$113.setField(6, agg4_sum); } if (agg4_countIsNull) { acc$113.setField(7, null); } else { acc$113.setField(7, agg4_count); } agg5_acc_internal = (org.apache.flink.table.data.RowData) converter$107.toInternalOrNull( (org.apache.flink.table.planner.functions.aggfunctions.MaxWithRetractAggFunction.MaxWithRetractAccumulator) agg5_acc_external); if (false) { acc$113.setField(8, null); } else { acc$113.setField(8, agg5_acc_internal); } if (agg6_count1IsNull) { acc$113.setField(9, null); } else { acc$113.setField(9, agg6_count1); } return acc$113; } @Override public org.apache.flink.table.data.RowData createAccumulators() throws Exception { acc$112 = new org.apache.flink.table.data.GenericRowData(10); if (true) { acc$112.setField(0, null); } else { acc$112.setField(0, ((long) -1L)); } if (false) { acc$112.setField(1, null); } else { acc$112.setField(1, ((long) 0L)); } if (true) { acc$112.setField(2, null); } else { acc$112.setField(2, ((long) -1L)); } if (false) { acc$112.setField(3, null); } else { acc$112.setField(3, ((long) 0L)); } org.apache.flink.table.data.RowData acc_internal$108 = (org.apache.flink.table.data.RowData) (org.apache.flink.table.data.RowData) converter$107 .toInternalOrNull( (org.apache.flink.table.planner.functions.aggfunctions.MaxWithRetractAggFunction.MaxWithRetractAccumulator) function_org$apache$flink$table$planner$functions$aggfunctions$MaxWithRetractAggFunction$d78f624eeff2a86742b5f64899608448 .createAccumulator()); if (false) { acc$112.setField(4, null); } else { acc$112.setField(4, acc_internal$108); } org.apache.flink.table.data.RowData acc_internal$110 = (org.apache.flink.table.data.RowData) (org.apache.flink.table.data.RowData) converter$109 .toInternalOrNull( (org.apache.flink.table.planner.functions.aggfunctions.MinWithRetractAggFunction.MinWithRetractAccumulator) function_org$apache$flink$table$planner$functions$aggfunctions$MinWithRetractAggFunction$00780063e1d540e25ad535dd2f326396 .createAccumulator()); if (false) { acc$112.setField(5, null); } else { acc$112.setField(5, acc_internal$110); } if (true) { acc$112.setField(6, null); } else { acc$112.setField(6, ((long) -1L)); } if (false) { acc$112.setField(7, null); } else { acc$112.setField(7, ((long) 0L)); } org.apache.flink.table.data.RowData acc_internal$111 = (org.apache.flink.table.data.RowData) (org.apache.flink.table.data.RowData) converter$107 .toInternalOrNull( (org.apache.flink.table.planner.functions.aggfunctions.MaxWithRetractAggFunction.MaxWithRetractAccumulator) function_org$apache$flink$table$planner$functions$aggfunctions$MaxWithRetractAggFunction$d78f624eeff2a86742b5f64899608448 .createAccumulator()); if (false) { acc$112.setField(8, null); } else { acc$112.setField(8, acc_internal$111); } if (false) { acc$112.setField(9, null); } else { acc$112.setField(9, ((long) 0L)); } return acc$112; } @Override public org.apache.flink.table.data.RowData getValue() throws Exception { boolean isNull$191; boolean result$192; boolean isNull$194; boolean result$195; boolean isNull$203; boolean result$204; aggValue$209 = new org.apache.flink.table.data.GenericRowData(6); isNull$191 = agg0_countIsNull || false; result$192 = false; if (!isNull$191) { result$192 = agg0_count == ((long) 0L); } long result$193 = -1L; boolean isNull$193; if (result$192) { isNull$193 = true; if (!isNull$193) { result$193 = ((long) -1L); } } else { isNull$193 = agg0_sumIsNull; if (!isNull$193) { result$193 = agg0_sum; } } if (isNull$193) { aggValue$209.setField(0, null); } else { aggValue$209.setField(0, result$193); } isNull$194 = agg1_countIsNull || false; result$195 = false; if (!isNull$194) { result$195 = agg1_count == ((long) 0L); } long result$196 = -1L; boolean isNull$196; if (result$195) { isNull$196 = true; if (!isNull$196) { result$196 = ((long) -1L); } } else { isNull$196 = agg1_sumIsNull; if (!isNull$196) { result$196 = agg1_sum; } } if (isNull$196) { aggValue$209.setField(1, null); } else { aggValue$209.setField(1, result$196); } java.lang.Long value_external$197 = (java.lang.Long) function_org$apache$flink$table$planner$functions$aggfunctions$MaxWithRetractAggFunction$d78f624eeff2a86742b5f64899608448 .getValue(agg2_acc_external); java.lang.Long value_internal$198 = value_external$197; boolean valueIsNull$199 = value_internal$198 == null; if (valueIsNull$199) { aggValue$209.setField(2, null); } else { aggValue$209.setField(2, value_internal$198); } java.lang.Long value_external$200 = (java.lang.Long) function_org$apache$flink$table$planner$functions$aggfunctions$MinWithRetractAggFunction$00780063e1d540e25ad535dd2f326396 .getValue(agg3_acc_external); java.lang.Long value_internal$201 = value_external$200; boolean valueIsNull$202 = value_internal$201 == null; if (valueIsNull$202) { aggValue$209.setField(3, null); } else { aggValue$209.setField(3, value_internal$201); } isNull$203 = agg4_countIsNull || false; result$204 = false; if (!isNull$203) { result$204 = agg4_count == ((long) 0L); } long result$205 = -1L; boolean isNull$205; if (result$204) { isNull$205 = true; if (!isNull$205) { result$205 = ((long) -1L); } } else { isNull$205 = agg4_sumIsNull; if (!isNull$205) { result$205 = agg4_sum; } } if (isNull$205) { aggValue$209.setField(4, null); } else { aggValue$209.setField(4, result$205); } java.lang.Long value_external$206 = (java.lang.Long) function_org$apache$flink$table$planner$functions$aggfunctions$MaxWithRetractAggFunction$d78f624eeff2a86742b5f64899608448 .getValue(agg5_acc_external); java.lang.Long value_internal$207 = value_external$206; boolean valueIsNull$208 = value_internal$207 == null; if (valueIsNull$208) { aggValue$209.setField(5, null); } else { aggValue$209.setField(5, value_internal$207); } return aggValue$209; } @Override public void cleanup() throws Exception { agg2$map_dataview.clear(); agg3$map_dataview.clear(); agg5$map_dataview.clear(); } @Override public void close() throws Exception { function_org$apache$flink$table$planner$functions$aggfunctions$MaxWithRetractAggFunction$d78f624eeff2a86742b5f64899608448 .close(); function_org$apache$flink$table$planner$functions$aggfunctions$MinWithRetractAggFunction$00780063e1d540e25ad535dd2f326396 .close(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_04_window_agg/_02_cumulate_window/earlyfire/GroupingWindowAggsHandler$57.java ================================================ //package flink.examples.sql._07.query._04_window_agg._02_cumulate_window.earlyfire; // ///** // * {@link org.apache.flink.table.runtime.operators.window.AggregateWindowOperator} // */ //public final class GroupingWindowAggsHandler$57 // implements // org.apache.flink.table.runtime.generated.NamespaceAggsHandleFunction { // // long agg0_count1; // boolean agg0_count1IsNull; // long agg1_sum; // boolean agg1_sumIsNull; // long agg2_max; // boolean agg2_maxIsNull; // long agg3_min; // boolean agg3_minIsNull; // long agg4_count; // boolean agg4_countIsNull; // private transient org.apache.flink.table.runtime.typeutils.ExternalSerializer externalSerializer$20; // private transient org.apache.flink.table.runtime.typeutils.ExternalSerializer externalSerializer$21; // private org.apache.flink.table.runtime.dataview.StateMapView distinctAcc_0_dataview; // private org.apache.flink.table.data.binary.BinaryRawValueData distinctAcc_0_dataview_raw_value; // private org.apache.flink.table.api.dataview.MapView distinct_view_0; // org.apache.flink.table.data.GenericRowData acc$23 = new org.apache.flink.table.data.GenericRowData(6); // org.apache.flink.table.data.GenericRowData acc$25 = new org.apache.flink.table.data.GenericRowData(6); // org.apache.flink.table.data.GenericRowData aggValue$56 = new org.apache.flink.table.data.GenericRowData(9); // // private org.apache.flink.table.runtime.dataview.StateDataViewStore store; // // private org.apache.flink.table.runtime.operators.window.TimeWindow namespace; // // public GroupingWindowAggsHandler$57(Object[] references) throws Exception { // externalSerializer$20 = (((org.apache.flink.table.runtime.typeutils.ExternalSerializer) references[0])); // externalSerializer$21 = (((org.apache.flink.table.runtime.typeutils.ExternalSerializer) references[1])); // } // // private org.apache.flink.api.common.functions.RuntimeContext getRuntimeContext() { // return store.getRuntimeContext(); // } // // @Override // public void open(org.apache.flink.table.runtime.dataview.StateDataViewStore store) throws Exception { // this.store = store; // // distinctAcc_0_dataview = (org.apache.flink.table.runtime.dataview.StateMapView) store // .getStateMapView("distinctAcc_0", true, externalSerializer$20, externalSerializer$21); // distinctAcc_0_dataview_raw_value = // org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(distinctAcc_0_dataview); // // distinct_view_0 = distinctAcc_0_dataview; // } // // @Override // public void accumulate(org.apache.flink.table.data.RowData accInput) throws Exception { // // boolean isNull$32; // long result$33; // long field$34; // boolean isNull$34; // boolean isNull$35; // long result$36; // boolean isNull$39; // boolean result$40; // boolean isNull$44; // boolean result$45; // long field$49; // boolean isNull$49; // boolean isNull$51; // long result$52; // isNull$49 = accInput.isNullAt(4); // field$49 = -1L; // if (!isNull$49) { // field$49 = accInput.getLong(4); // } // isNull$34 = accInput.isNullAt(3); // field$34 = -1L; // if (!isNull$34) { // field$34 = accInput.getLong(3); // } // // // isNull$32 = agg0_count1IsNull || false; // result$33 = -1L; // if (!isNull$32) { // // result$33 = (long) (agg0_count1 + ((long) 1L)); // // } // // agg0_count1 = result$33; // ; // agg0_count1IsNull = isNull$32; // // // long result$38 = -1L; // boolean isNull$38; // if (isNull$34) { // // isNull$38 = agg1_sumIsNull; // if (!isNull$38) { // result$38 = agg1_sum; // } // } else { // long result$37 = -1L; // boolean isNull$37; // if (agg1_sumIsNull) { // // isNull$37 = isNull$34; // if (!isNull$37) { // result$37 = field$34; // } // } else { // // // isNull$35 = agg1_sumIsNull || isNull$34; // result$36 = -1L; // if (!isNull$35) { // // result$36 = (long) (agg1_sum + field$34); // // } // // isNull$37 = isNull$35; // if (!isNull$37) { // result$37 = result$36; // } // } // isNull$38 = isNull$37; // if (!isNull$38) { // result$38 = result$37; // } // } // agg1_sum = result$38; // ; // agg1_sumIsNull = isNull$38; // // // long result$43 = -1L; // boolean isNull$43; // if (isNull$34) { // // isNull$43 = agg2_maxIsNull; // if (!isNull$43) { // result$43 = agg2_max; // } // } else { // long result$42 = -1L; // boolean isNull$42; // if (agg2_maxIsNull) { // // isNull$42 = isNull$34; // if (!isNull$42) { // result$42 = field$34; // } // } else { // isNull$39 = isNull$34 || agg2_maxIsNull; // result$40 = false; // if (!isNull$39) { // // result$40 = field$34 > agg2_max; // // } // // long result$41 = -1L; // boolean isNull$41; // if (result$40) { // // isNull$41 = isNull$34; // if (!isNull$41) { // result$41 = field$34; // } // } else { // // isNull$41 = agg2_maxIsNull; // if (!isNull$41) { // result$41 = agg2_max; // } // } // isNull$42 = isNull$41; // if (!isNull$42) { // result$42 = result$41; // } // } // isNull$43 = isNull$42; // if (!isNull$43) { // result$43 = result$42; // } // } // agg2_max = result$43; // ; // agg2_maxIsNull = isNull$43; // // // long result$48 = -1L; // boolean isNull$48; // if (isNull$34) { // // isNull$48 = agg3_minIsNull; // if (!isNull$48) { // result$48 = agg3_min; // } // } else { // long result$47 = -1L; // boolean isNull$47; // if (agg3_minIsNull) { // // isNull$47 = isNull$34; // if (!isNull$47) { // result$47 = field$34; // } // } else { // isNull$44 = isNull$34 || agg3_minIsNull; // result$45 = false; // if (!isNull$44) { // // result$45 = field$34 < agg3_min; // // } // // long result$46 = -1L; // boolean isNull$46; // if (result$45) { // // isNull$46 = isNull$34; // if (!isNull$46) { // result$46 = field$34; // } // } else { // // isNull$46 = agg3_minIsNull; // if (!isNull$46) { // result$46 = agg3_min; // } // } // isNull$47 = isNull$46; // if (!isNull$47) { // result$47 = result$46; // } // } // isNull$48 = isNull$47; // if (!isNull$48) { // result$48 = result$47; // } // } // agg3_min = result$48; // ; // agg3_minIsNull = isNull$48; // // // java.lang.Long distinctKey$50 = (java.lang.Long) field$49; // if (isNull$49) { // distinctKey$50 = null; // } // // java.lang.Long value$54 = (java.lang.Long) distinct_view_0.get(distinctKey$50); // if (value$54 == null) { // value$54 = 0L; // } // // boolean is_distinct_value_changed_0 = false; // // long existed$55 = ((long) value$54) & (1L << 0); // if (existed$55 == 0) { // not existed // value$54 = ((long) value$54) | (1L << 0); // is_distinct_value_changed_0 = true; // // long result$53 = -1L; // boolean isNull$53; // if (isNull$49) { // // isNull$53 = agg4_countIsNull; // if (!isNull$53) { // result$53 = agg4_count; // } // } else { // // // isNull$51 = agg4_countIsNull || false; // result$52 = -1L; // if (!isNull$51) { // // result$52 = (long) (agg4_count + ((long) 1L)); // // } // // isNull$53 = isNull$51; // if (!isNull$53) { // result$53 = result$52; // } // } // agg4_count = result$53; // ; // agg4_countIsNull = isNull$53; // // } // // if (is_distinct_value_changed_0) { // distinct_view_0.put(distinctKey$50, value$54); // } // // // } // // @Override // public void retract(org.apache.flink.table.data.RowData retractInput) throws Exception { // // throw new java.lang.RuntimeException( // "This function not require retract method, but the retract method is called."); // // } // // @Override // public void merge(Object ns, org.apache.flink.table.data.RowData otherAcc) throws Exception { // namespace = (org.apache.flink.table.runtime.operators.window.TimeWindow) ns; // // throw new java.lang.RuntimeException("This function not require merge method, but the merge method is called."); // // } // // @Override // public void setAccumulators(Object ns, org.apache.flink.table.data.RowData acc) // throws Exception { // namespace = (org.apache.flink.table.runtime.operators.window.TimeWindow) ns; // // long field$26; // boolean isNull$26; // long field$27; // boolean isNull$27; // long field$28; // boolean isNull$28; // long field$29; // boolean isNull$29; // long field$30; // boolean isNull$30; // org.apache.flink.table.data.binary.BinaryRawValueData field$31; // boolean isNull$31; // isNull$30 = acc.isNullAt(4); // field$30 = -1L; // if (!isNull$30) { // field$30 = acc.getLong(4); // } // isNull$26 = acc.isNullAt(0); // field$26 = -1L; // if (!isNull$26) { // field$26 = acc.getLong(0); // } // isNull$27 = acc.isNullAt(1); // field$27 = -1L; // if (!isNull$27) { // field$27 = acc.getLong(1); // } // isNull$29 = acc.isNullAt(3); // field$29 = -1L; // if (!isNull$29) { // field$29 = acc.getLong(3); // } // // // when namespace is null, the dataview is used in heap, no key and namespace set // if (namespace != null) { // distinctAcc_0_dataview.setCurrentNamespace(namespace); // distinct_view_0 = distinctAcc_0_dataview; // } else { // isNull$31 = acc.isNullAt(5); // field$31 = null; // if (!isNull$31) { // field$31 = ((org.apache.flink.table.data.binary.BinaryRawValueData) acc.getRawValue(5)); // } // distinct_view_0 = (org.apache.flink.table.api.dataview.MapView) field$31.getJavaObject(); // } // // isNull$28 = acc.isNullAt(2); // field$28 = -1L; // if (!isNull$28) { // field$28 = acc.getLong(2); // } // // agg0_count1 = field$26; // ; // agg0_count1IsNull = isNull$26; // // // agg1_sum = field$27; // ; // agg1_sumIsNull = isNull$27; // // // agg2_max = field$28; // ; // agg2_maxIsNull = isNull$28; // // // agg3_min = field$29; // ; // agg3_minIsNull = isNull$29; // // // agg4_count = field$30; // ; // agg4_countIsNull = isNull$30; // // // } // // @Override // public org.apache.flink.table.data.RowData getAccumulators() throws Exception { // // // acc$25 = new org.apache.flink.table.data.GenericRowData(6); // // // if (agg0_count1IsNull) { // acc$25.setField(0, null); // } else { // acc$25.setField(0, agg0_count1); // } // // // if (agg1_sumIsNull) { // acc$25.setField(1, null); // } else { // acc$25.setField(1, agg1_sum); // } // // // if (agg2_maxIsNull) { // acc$25.setField(2, null); // } else { // acc$25.setField(2, agg2_max); // } // // // if (agg3_minIsNull) { // acc$25.setField(3, null); // } else { // acc$25.setField(3, agg3_min); // } // // // if (agg4_countIsNull) { // acc$25.setField(4, null); // } else { // acc$25.setField(4, agg4_count); // } // // // org.apache.flink.table.data.binary.BinaryRawValueData distinct_acc$24 = // org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(distinct_view_0); // // if (false) { // acc$25.setField(5, null); // } else { // acc$25.setField(5, distinct_acc$24); // } // // // return acc$25; // // } // // @Override // public org.apache.flink.table.data.RowData createAccumulators() throws Exception { // // // acc$23 = new org.apache.flink.table.data.GenericRowData(6); // // // if (false) { // acc$23.setField(0, null); // } else { // acc$23.setField(0, ((long) 0L)); // } // // // if (true) { // acc$23.setField(1, null); // } else { // acc$23.setField(1, ((long) -1L)); // } // // // if (true) { // acc$23.setField(2, null); // } else { // acc$23.setField(2, ((long) -1L)); // } // // // if (true) { // acc$23.setField(3, null); // } else { // acc$23.setField(3, ((long) -1L)); // } // // // if (false) { // acc$23.setField(4, null); // } else { // acc$23.setField(4, ((long) 0L)); // } // // // org.apache.flink.table.api.dataview.MapView mapview$22 = new org.apache.flink.table.api.dataview.MapView(); // org.apache.flink.table.data.binary.BinaryRawValueData distinct_acc$22 = // org.apache.flink.table.data.binary.BinaryRawValueData.fromObject(mapview$22); // // if (false) { // acc$23.setField(5, null); // } else { // acc$23.setField(5, distinct_acc$22); // } // // // return acc$23; // // } // // @Override // public org.apache.flink.table.data.RowData getValue(Object ns) throws Exception { // namespace = (org.apache.flink.table.runtime.operators.window.TimeWindow) ns; // // // aggValue$56 = new org.apache.flink.table.data.GenericRowData(9); // // // if (agg0_count1IsNull) { // aggValue$56.setField(0, null); // } else { // aggValue$56.setField(0, agg0_count1); // } // // // if (agg1_sumIsNull) { // aggValue$56.setField(1, null); // } else { // aggValue$56.setField(1, agg1_sum); // } // // // if (agg2_maxIsNull) { // aggValue$56.setField(2, null); // } else { // aggValue$56.setField(2, agg2_max); // } // // // if (agg3_minIsNull) { // aggValue$56.setField(3, null); // } else { // aggValue$56.setField(3, agg3_min); // } // // // if (agg4_countIsNull) { // aggValue$56.setField(4, null); // } else { // aggValue$56.setField(4, agg4_count); // } // // // if (false) { // aggValue$56.setField(5, null); // } else { // aggValue$56.setField(5, org.apache.flink.table.data.TimestampData.fromEpochMillis(namespace.getStart())); // } // // // if (false) { // aggValue$56.setField(6, null); // } else { // aggValue$56.setField(6, org.apache.flink.table.data.TimestampData.fromEpochMillis(namespace.getEnd())); // } // // // if (false) { // aggValue$56.setField(7, null); // } else { // aggValue$56.setField(7, // org.apache.flink.table.data.TimestampData.fromEpochMillis( // namespace.getEnd() - 1) // ); // } // // // if (true) { // aggValue$56.setField(8, null); // } else { // aggValue$56.setField(8, org.apache.flink.table.data.TimestampData.fromEpochMillis(-1L)); // } // // // return aggValue$56; // // } // // @Override // public void cleanup(Object ns) throws Exception { // namespace = (org.apache.flink.table.runtime.operators.window.TimeWindow) ns; // // distinctAcc_0_dataview.setCurrentNamespace(namespace); // distinctAcc_0_dataview.clear(); // // // } // // @Override // public void close() throws Exception { // // } //} ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_04_window_agg/_03_hop_window/HopWindowGroupWindowAggTest.java ================================================ package flink.examples.sql._07.query._04_window_agg._03_hop_window; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class HopWindowGroupWindowAggTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); String sql = "-- 数据源表,用户购买行为记录表\n" + "CREATE TABLE source_table (\n" + " -- 维度数据\n" + " dim STRING,\n" + " -- 用户 id\n" + " user_id BIGINT,\n" + " -- 用户\n" + " price BIGINT,\n" + " -- 事件时间戳\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " -- watermark 设置\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '5',\n" + " 'fields.dim.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '100000',\n" + " 'fields.price.min' = '1',\n" + " 'fields.price.max' = '100000'\n" + ");\n" + "\n" + "-- 数据汇表\n" + "CREATE TABLE sink_table (\n" + " dim STRING,\n" + " pv BIGINT, -- 购买商品数量\n" + " window_start bigint\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "-- 数据处理逻辑\n" + "insert into sink_table\n" + "select dim,\n" + "\t sum(bucket_pv) as pv,\n" + "\t window_start\n" + "from (\n" + "\t SELECT dim,\n" + "\t \t UNIX_TIMESTAMP(CAST(session_start(row_time, interval '1' second) AS STRING)) * 1000 as " + "window_start, \n" + "\t count(1) as bucket_pv\n" + "\t FROM source_table\n" + "\t GROUP BY dim\n" + "\t\t\t , mod(user_id, 1024)\n" + " , session(row_time, interval '1' second)\n" + ")\n" + "group by dim,\n" + "\t\t window_start"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_05_over/_01_row_number/RowNumberOrderByBigintTest.java ================================================ package flink.examples.sql._07.query._05_over._01_row_number; import java.util.Arrays; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; public class RowNumberOrderByBigintTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(10); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); tEnv.getConfig().getConfiguration().setString("pipeline.name", "1.13.5 TUMBLE WINDOW 案例"); tEnv.getConfig().getConfiguration().setString("state.backend", "rocksdb"); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " name STRING,\n" + " server_timestamp BIGINT\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.name.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '100000',\n" + " 'fields.server_timestamp.min' = '1',\n" + " 'fields.server_timestamp.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " name STRING,\n" + " rn BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "select user_id,\n" + " name,\n" + " rn\n" + "from (\n" + " SELECT\n" + " user_id,\n" + " name,\n" + " row_number() over(partition by user_id order by server_timestamp) as rn\n" + " FROM source_table\n" + ")\n" + "where rn = 1"; /** * join 算子:{@link org.apache.flink.table.runtime.operators.join.stream.StreamingJoinOperator} */ Arrays.stream(sql.split(";")) .forEach(tEnv::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_05_over/_01_row_number/RowNumberOrderByStringTest.java ================================================ package flink.examples.sql._07.query._05_over._01_row_number; import java.util.Arrays; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; public class RowNumberOrderByStringTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(10); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); tEnv.getConfig().getConfiguration().setString("pipeline.name", "1.13.5 TUMBLE WINDOW 案例"); tEnv.getConfig().getConfiguration().setString("state.backend", "rocksdb"); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " name STRING,\n" + " server_timestamp STRING\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.name.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '100000',\n" + " 'fields.server_timestamp.length' = '1'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " name STRING,\n" + " server_timestamp STRING,\n" + " rn BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "select user_id,\n" + " name,\n" + " server_timestamp,\n" + " rn\n" + "from (\n" + " SELECT\n" + " user_id,\n" + " name,\n" + " server_timestamp,\n" + " row_number() over(partition by user_id order by server_timestamp) as rn\n" + " FROM source_table\n" + ")\n" + "where rn = 1"; /** * join 算子:{@link org.apache.flink.table.runtime.operators.join.stream.StreamingJoinOperator} */ Arrays.stream(sql.split(";")) .forEach(tEnv::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_05_over/_01_row_number/RowNumberOrderByUnixTimestampTest.java ================================================ package flink.examples.sql._07.query._05_over._01_row_number; import java.util.Arrays; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; public class RowNumberOrderByUnixTimestampTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(10); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); tEnv.getConfig().getConfiguration().setString("pipeline.name", "1.13.5 TUMBLE WINDOW 案例"); tEnv.getConfig().getConfiguration().setString("state.backend", "rocksdb"); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " name STRING,\n" + " server_timestamp as UNIX_TIMESTAMP()\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.name.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " name STRING,\n" + " rn BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "select user_id,\n" + " name,\n" + " rn\n" + "from (\n" + " SELECT\n" + " user_id,\n" + " name,\n" + " row_number() over(partition by user_id order by server_timestamp) as rn\n" + " FROM source_table\n" + ")\n" + "where rn = 1"; /** * join 算子:{@link org.apache.flink.table.runtime.operators.join.stream.StreamingJoinOperator} */ Arrays.stream(sql.split(";")) .forEach(tEnv::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_05_over/_01_row_number/RowNumberWithoutPartitionKeyTest.java ================================================ package flink.examples.sql._07.query._05_over._01_row_number; import java.util.Arrays; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; public class RowNumberWithoutPartitionKeyTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(10); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); tEnv.getConfig().getConfiguration().setString("pipeline.name", "1.13.5 TUMBLE WINDOW 案例"); tEnv.getConfig().getConfiguration().setString("state.backend", "rocksdb"); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " name STRING,\n" + " server_timestamp BIGINT\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.name.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '100000',\n" + " 'fields.server_timestamp.min' = '1',\n" + " 'fields.server_timestamp.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " name STRING,\n" + " rn BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "select user_id,\n" + " name,\n" + " rn\n" + "from (\n" + " SELECT\n" + " user_id,\n" + " name,\n" + " row_number() over(order by server_timestamp) as rn\n" + " FROM source_table\n" + ")\n" + "where rn = 1"; /** * join 算子:{@link org.apache.flink.table.runtime.operators.join.stream.StreamingJoinOperator} */ Arrays.stream(sql.split(";")) .forEach(tEnv::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_05_over/_01_row_number/RowNumberWithoutRowNumberEqual1Test.java ================================================ package flink.examples.sql._07.query._05_over._01_row_number; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class RowNumberWithoutRowNumberEqual1Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " name STRING,\n" + " server_timestamp BIGINT,\n" + " proctime AS PROCTIME()\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.name.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '100000',\n" + " 'fields.server_timestamp.min' = '1',\n" + " 'fields.server_timestamp.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " name STRING,\n" + " rn BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "CREATE FUNCTION mod_udf as 'flink.examples.sql._07.query._05_over._01_row_number.Scalar_UDF';\n" + "\n" + "INSERT INTO sink_table\n" + "select mod_udf(user_id, 1024) as user_id,\n" + " name,\n" + " rn\n" + "from (\n" + " SELECT\n" + " user_id,\n" + " name,\n" + " row_number() over(partition by user_id order by proctime) as rn\n" + " FROM source_table\n" + ")\n"; /** * join 算子:{@link org.apache.flink.table.runtime.operators.join.stream.StreamingJoinOperator} */ Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_05_over/_01_row_number/Scalar_UDF.java ================================================ package flink.examples.sql._07.query._05_over._01_row_number; import org.apache.flink.table.functions.FunctionContext; import org.apache.flink.table.functions.ScalarFunction; public class Scalar_UDF extends ScalarFunction { @Override public void open(FunctionContext context) throws Exception { super.open(context); } public int eval(Long id, int remainder) { return (int) (id % remainder); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_05_over/_02_agg/RangeIntervalProctimeTest.java ================================================ package flink.examples.sql._07.query._05_over._02_agg; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class RangeIntervalProctimeTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[]{"--enable.hive.module.v2", "false"}); flinkEnv.env().setParallelism(1); String sql = "CREATE TABLE source_table (\n" + " order_id BIGINT,\n" + " product BIGINT,\n" + " amount BIGINT,\n" + " order_time as PROCTIME()\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.order_id.min' = '1',\n" + " 'fields.order_id.max' = '2',\n" + " 'fields.amount.min' = '1',\n" + " 'fields.amount.max' = '10',\n" + " 'fields.product.min' = '1',\n" + " 'fields.product.max' = '2'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " product BIGINT,\n" + " order_time TIMESTAMP(3),\n" + " amount BIGINT,\n" + " one_hour_prod_amount_sum BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT product, order_time, amount,\n" + " SUM(amount) OVER (\n" + " PARTITION BY product\n" + " ORDER BY order_time\n" + " RANGE BETWEEN INTERVAL '1' HOUR PRECEDING AND CURRENT ROW\n" + " ) AS one_hour_prod_amount_sum\n" + "FROM source_table"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_05_over/_02_agg/RangeIntervalRowtimeAscendingTest.java ================================================ package flink.examples.sql._07.query._05_over._02_agg; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class RangeIntervalRowtimeAscendingTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[]{"--enable.hive.module.v2", "false"}); flinkEnv.env().setParallelism(1); String sql = "CREATE TABLE source_table (\n" + " order_id BIGINT,\n" + " product BIGINT,\n" + " amount BIGINT,\n" + " order_time as cast(CURRENT_TIMESTAMP as TIMESTAMP(3)),\n" + " WATERMARK FOR order_time AS order_time - INTERVAL '0.001' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.order_id.min' = '1',\n" + " 'fields.order_id.max' = '2',\n" + " 'fields.amount.min' = '1',\n" + " 'fields.amount.max' = '10',\n" + " 'fields.product.min' = '1',\n" + " 'fields.product.max' = '2'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " product BIGINT,\n" + " order_time TIMESTAMP(3),\n" + " amount BIGINT,\n" + " one_hour_prod_amount_sum BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT product, order_time, amount,\n" + " SUM(amount) OVER (\n" + " PARTITION BY product\n" + " ORDER BY order_time\n" + " RANGE BETWEEN INTERVAL '1' HOUR PRECEDING AND CURRENT ROW\n" + " ) AS one_hour_prod_amount_sum\n" + "FROM source_table"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_05_over/_02_agg/RangeIntervalRowtimeBoundedOutOfOrdernessTest.java ================================================ package flink.examples.sql._07.query._05_over._02_agg; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class RangeIntervalRowtimeBoundedOutOfOrdernessTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[]{"--enable.hive.module.v2", "false"}); flinkEnv.env().setParallelism(1); String sql = "CREATE TABLE source_table (\n" + " order_id BIGINT,\n" + " product BIGINT,\n" + " amount BIGINT,\n" + " order_time as cast(CURRENT_TIMESTAMP as TIMESTAMP(3)),\n" + " WATERMARK FOR order_time AS order_time - INTERVAL '10' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.order_id.min' = '1',\n" + " 'fields.order_id.max' = '2',\n" + " 'fields.amount.min' = '1',\n" + " 'fields.amount.max' = '10',\n" + " 'fields.product.min' = '1',\n" + " 'fields.product.max' = '2'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " product BIGINT,\n" + " order_time TIMESTAMP(3),\n" + " amount BIGINT,\n" + " one_hour_prod_amount_sum BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT product, order_time, amount,\n" + " SUM(amount) OVER (\n" + " PARTITION BY product\n" + " ORDER BY order_time\n" + " RANGE BETWEEN INTERVAL '1' HOUR PRECEDING AND CURRENT ROW\n" + " ) AS one_hour_prod_amount_sum\n" + "FROM source_table"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_05_over/_02_agg/RangeIntervalRowtimeStrictlyAscendingTest.java ================================================ package flink.examples.sql._07.query._05_over._02_agg; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class RangeIntervalRowtimeStrictlyAscendingTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[]{"--enable.hive.module.v2", "false"}); flinkEnv.env().setParallelism(1); String sql = "CREATE TABLE source_table (\n" + " order_id BIGINT,\n" + " product BIGINT,\n" + " amount BIGINT,\n" + " order_time as cast(CURRENT_TIMESTAMP as TIMESTAMP(3)),\n" + " WATERMARK FOR order_time AS order_time\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.order_id.min' = '1',\n" + " 'fields.order_id.max' = '2',\n" + " 'fields.amount.min' = '1',\n" + " 'fields.amount.max' = '10',\n" + " 'fields.product.min' = '1',\n" + " 'fields.product.max' = '2'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " product BIGINT,\n" + " order_time TIMESTAMP(3),\n" + " amount BIGINT,\n" + " one_hour_prod_amount_sum BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT product, order_time, amount,\n" + " SUM(amount) OVER (\n" + " PARTITION BY product\n" + " ORDER BY order_time\n" + " RANGE BETWEEN INTERVAL '1' HOUR PRECEDING AND CURRENT ROW\n" + " ) AS one_hour_prod_amount_sum\n" + "FROM source_table"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_05_over/_02_agg/RowIntervalTest.java ================================================ package flink.examples.sql._07.query._05_over._02_agg; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class RowIntervalTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[]{"--enable.hive.module.v2", "false"}); flinkEnv.env().setParallelism(1); String sql = "CREATE TABLE source_table (\n" + " order_id BIGINT,\n" + " product BIGINT,\n" + " amount BIGINT,\n" + " order_time as cast(CURRENT_TIMESTAMP as TIMESTAMP(3)),\n" + " WATERMARK FOR order_time AS order_time - INTERVAL '0.001' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.order_id.min' = '1',\n" + " 'fields.order_id.max' = '2',\n" + " 'fields.amount.min' = '1',\n" + " 'fields.amount.max' = '2',\n" + " 'fields.product.min' = '1',\n" + " 'fields.product.max' = '2'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " product BIGINT,\n" + " order_time TIMESTAMP(3),\n" + " amount BIGINT,\n" + " one_hour_prod_amount_sum BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT product, order_time, amount,\n" + " SUM(amount) OVER (\n" + " PARTITION BY product\n" + " ORDER BY order_time\n" + " ROWS BETWEEN 5 PRECEDING AND CURRENT ROW\n" + " ) AS one_hour_prod_amount_sum\n" + "FROM source_table"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_01_regular_joins/_01_inner_join/ConditionFunction$4.java ================================================ package flink.examples.sql._07.query._06_joins._01_regular_joins._01_inner_join; public class ConditionFunction$4 extends org.apache.flink.api.common.functions.AbstractRichFunction implements org.apache.flink.table.runtime.generated.JoinCondition { public ConditionFunction$4(Object[] references) throws Exception { } @Override public void open(org.apache.flink.configuration.Configuration parameters) throws Exception { } @Override public boolean apply(org.apache.flink.table.data.RowData in1, org.apache.flink.table.data.RowData in2) { return true; } @Override public void close() throws Exception { super.close(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_01_regular_joins/_01_inner_join/_01_InnerJoinsTest.java ================================================ package flink.examples.sql._07.query._06_joins._01_regular_joins._01_inner_join; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class _01_InnerJoinsTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.env().setParallelism(1); String sql = "CREATE TABLE show_log_table (\n" + " log_id BIGINT,\n" + " show_params STRING\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '2',\n" + " 'fields.show_params.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '100'\n" + ");\n" + "\n" + "CREATE TABLE click_log_table (\n" + " log_id BIGINT,\n" + " click_params STRING\n" + ")\n" + "WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '2',\n" + " 'fields.click_params.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " s_id BIGINT,\n" + " s_params STRING,\n" + " c_id BIGINT,\n" + " c_params STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " show_log_table.log_id as s_id,\n" + " show_log_table.show_params as s_params,\n" + " click_log_table.log_id as c_id,\n" + " click_log_table.click_params as c_params\n" + "FROM show_log_table\n" + "INNER JOIN click_log_table ON show_log_table.log_id = click_log_table.log_id;"; /** * join 算子:{@link org.apache.flink.table.runtime.operators.join.stream.StreamingJoinOperator} */ Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_01_regular_joins/_01_inner_join/_02_InnerJoinsOnNotEqualTest.java ================================================ package flink.examples.sql._07.query._06_joins._01_regular_joins._01_inner_join; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class _02_InnerJoinsOnNotEqualTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); String sql = "CREATE TABLE show_log_table (\n" + " log_id BIGINT,\n" + " show_params STRING\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '2',\n" + " 'fields.show_params.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE click_log_table (\n" + " log_id BIGINT,\n" + " click_params STRING\n" + ")\n" + "WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '2',\n" + " 'fields.click_params.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " s_id BIGINT,\n" + " s_params STRING,\n" + " c_id BIGINT,\n" + " c_params STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " show_log_table.log_id as s_id,\n" + " show_log_table.show_params as s_params,\n" + " click_log_table.log_id as c_id,\n" + " click_log_table.click_params as c_params\n" + "FROM show_log_table\n" + "INNER JOIN click_log_table ON show_log_table.log_id > click_log_table.log_id;"; /** * join 算子:{@link org.apache.flink.table.runtime.operators.join.stream.StreamingJoinOperator} */ Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_01_regular_joins/_02_outer_join/_01_LeftJoinsTest.java ================================================ package flink.examples.sql._07.query._06_joins._01_regular_joins._02_outer_join; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class _01_LeftJoinsTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.env().setParallelism(1); String sql = "CREATE TABLE show_log_table (\n" + " log_id BIGINT,\n" + " show_params STRING\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.show_params.length' = '3',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE click_log_table (\n" + " log_id BIGINT,\n" + " click_params STRING\n" + ")\n" + "WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.click_params.length' = '3',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " s_id BIGINT,\n" + " s_params STRING,\n" + " c_id BIGINT,\n" + " c_params STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " show_log_table.log_id as s_id,\n" + " show_log_table.show_params as s_params,\n" + " click_log_table.log_id as c_id,\n" + " click_log_table.click_params as c_params\n" + "FROM show_log_table\n" + "LEFT JOIN click_log_table ON show_log_table.log_id = click_log_table.log_id;"; /** * join 算子:{@link org.apache.flink.table.runtime.operators.join.stream.StreamingJoinOperator} */ Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_01_regular_joins/_02_outer_join/_02_RightJoinsTest.java ================================================ package flink.examples.sql._07.query._06_joins._01_regular_joins._02_outer_join; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class _02_RightJoinsTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.env().setParallelism(1); String sql = "CREATE TABLE show_log_table (\n" + " log_id BIGINT,\n" + " show_params STRING\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '2',\n" + " 'fields.show_params.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE click_log_table (\n" + " log_id BIGINT,\n" + " click_params STRING\n" + ")\n" + "WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '2',\n" + " 'fields.click_params.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " s_id BIGINT,\n" + " s_params STRING,\n" + " c_id BIGINT,\n" + " c_params STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " show_log_table.log_id as s_id,\n" + " show_log_table.show_params as s_params,\n" + " click_log_table.log_id as c_id,\n" + " click_log_table.click_params as c_params\n" + "FROM show_log_table\n" + "RIGHT JOIN click_log_table ON show_log_table.log_id = click_log_table.log_id;"; /** * join 算子:{@link org.apache.flink.table.runtime.operators.join.stream.StreamingJoinOperator} */ Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_01_regular_joins/_02_outer_join/_03_FullJoinsTest.java ================================================ package flink.examples.sql._07.query._06_joins._01_regular_joins._02_outer_join; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class _03_FullJoinsTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.env().setParallelism(1); String sql = "CREATE TABLE show_log_table (\n" + " log_id BIGINT,\n" + " show_params STRING\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '2',\n" + " 'fields.show_params.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE click_log_table (\n" + " log_id BIGINT,\n" + " click_params STRING\n" + ")\n" + "WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '2',\n" + " 'fields.click_params.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " s_id BIGINT,\n" + " s_params STRING,\n" + " c_id BIGINT,\n" + " c_params STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " show_log_table.log_id as s_id,\n" + " show_log_table.show_params as s_params,\n" + " click_log_table.log_id as c_id,\n" + " click_log_table.click_params as c_params\n" + "FROM show_log_table\n" + "FULL JOIN click_log_table ON show_log_table.log_id = click_log_table.log_id;"; /** * join 算子:{@link org.apache.flink.table.runtime.operators.join.stream.StreamingJoinOperator} */ Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_02_interval_joins/_01_proctime/Interval_Full_Joins_ProcesingTime_Test.java ================================================ package flink.examples.sql._07.query._06_joins._02_interval_joins._01_proctime; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class Interval_Full_Joins_ProcesingTime_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 Interval Join 处理时间案例"); flinkEnv.env().setParallelism(1); String exampleSql = "CREATE TABLE show_log_table (\n" + " log_id BIGINT,\n" + " show_params STRING,\n" + " proctime AS PROCTIME()\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.show_params.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE click_log_table (\n" + " log_id BIGINT,\n" + " click_params STRING,\n" + " proctime AS PROCTIME()\n" + ")\n" + "WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.click_params.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " s_id BIGINT,\n" + " s_params STRING,\n" + " c_id BIGINT,\n" + " c_params STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " show_log_table.log_id as s_id,\n" + " show_log_table.show_params as s_params,\n" + " click_log_table.log_id as c_id,\n" + " click_log_table.click_params as c_params\n" + "FROM show_log_table FULL JOIN click_log_table ON show_log_table.log_id = click_log_table.log_id\n" + "AND show_log_table.proctime BETWEEN click_log_table.proctime - INTERVAL '4' HOUR AND click_log_table.proctime;"; /** * join 算子:{@link org.apache.flink.streaming.api.operators.co.KeyedCoProcessOperator} * -> {@link org.apache.flink.table.runtime.operators.join.interval.ProcTimeIntervalJoin} * -> {@link org.apache.flink.table.runtime.operators.join.interval.IntervalJoinFunction} */ Arrays.stream(exampleSql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_02_interval_joins/_01_proctime/Interval_Inner_Joins_ProcesingTime_Test.java ================================================ package flink.examples.sql._07.query._06_joins._02_interval_joins._01_proctime; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class Interval_Inner_Joins_ProcesingTime_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 Interval Join 处理时间案例"); flinkEnv.env().setParallelism(1); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " name STRING,\n" + " proctime AS PROCTIME()\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.name.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE dim_table (\n" + " user_id BIGINT,\n" + " platform STRING,\n" + " proctime AS PROCTIME()\n" + ")\n" + "WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.platform.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " name STRING,\n" + " platform STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " s.user_id as user_id,\n" + " s.name as name,\n" + " d.platform as platform\n" + "FROM source_table s, dim_table as d\n" + "WHERE s.user_id = d.user_id\n" + "AND s.proctime BETWEEN d.proctime - INTERVAL '4' HOUR AND d.proctime;"; String exampleSql = "CREATE TABLE show_log_table (\n" + " log_id BIGINT,\n" + " show_params STRING,\n" + " proctime AS PROCTIME()\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.show_params.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE click_log_table (\n" + " log_id BIGINT,\n" + " click_params STRING,\n" + " proctime AS PROCTIME()\n" + ")\n" + "WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.click_params.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " s_id BIGINT,\n" + " s_params STRING,\n" + " c_id BIGINT,\n" + " c_params STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " show_log_table.log_id as s_id,\n" + " show_log_table.show_params as s_params,\n" + " click_log_table.log_id as c_id,\n" + " click_log_table.click_params as c_params\n" + "FROM show_log_table INNER JOIN click_log_table ON show_log_table.log_id = click_log_table.log_id\n" + "AND show_log_table.proctime BETWEEN click_log_table.proctime - INTERVAL '4' HOUR AND click_log_table.proctime;"; /** * join 算子:{@link org.apache.flink.streaming.api.operators.co.KeyedCoProcessOperator} * -> {@link org.apache.flink.table.runtime.operators.join.interval.ProcTimeIntervalJoin} * -> {@link org.apache.flink.table.runtime.operators.join.interval.IntervalJoinFunction} */ Arrays.stream(exampleSql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_02_interval_joins/_01_proctime/Interval_Left_Joins_ProcesingTime_Test.java ================================================ package flink.examples.sql._07.query._06_joins._02_interval_joins._01_proctime; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class Interval_Left_Joins_ProcesingTime_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 Interval Join 处理时间案例"); flinkEnv.env().setParallelism(1); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " name STRING,\n" + " proctime AS PROCTIME()\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.name.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE dim_table (\n" + " user_id BIGINT,\n" + " platform STRING,\n" + " proctime AS PROCTIME()\n" + ")\n" + "WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.platform.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " name STRING,\n" + " platform STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " s.user_id as user_id,\n" + " s.name as name,\n" + " d.platform as platform\n" + "FROM source_table s, dim_table as d\n" + "WHERE s.user_id = d.user_id\n" + "AND s.proctime BETWEEN d.proctime - INTERVAL '4' HOUR AND d.proctime;"; String exampleSql = "CREATE TABLE show_log_table (\n" + " log_id BIGINT,\n" + " show_params STRING,\n" + " proctime AS PROCTIME()\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.show_params.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE click_log_table (\n" + " log_id BIGINT,\n" + " click_params STRING,\n" + " proctime AS PROCTIME()\n" + ")\n" + "WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.click_params.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " s_id BIGINT,\n" + " s_params STRING,\n" + " c_id BIGINT,\n" + " c_params STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " show_log_table.log_id as s_id,\n" + " show_log_table.show_params as s_params,\n" + " click_log_table.log_id as c_id,\n" + " click_log_table.click_params as c_params\n" + "FROM show_log_table LEFT JOIN click_log_table ON show_log_table.log_id = click_log_table.log_id\n" + "AND show_log_table.proctime BETWEEN click_log_table.proctime - INTERVAL '4' HOUR AND click_log_table.proctime;"; /** * join 算子:{@link org.apache.flink.streaming.api.operators.co.KeyedCoProcessOperator} * -> {@link org.apache.flink.table.runtime.operators.join.interval.ProcTimeIntervalJoin} * -> {@link org.apache.flink.table.runtime.operators.join.interval.IntervalJoinFunction} */ Arrays.stream(exampleSql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_02_interval_joins/_01_proctime/Interval_Right_Joins_ProcesingTime_Test.java ================================================ package flink.examples.sql._07.query._06_joins._02_interval_joins._01_proctime; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class Interval_Right_Joins_ProcesingTime_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 Interval Join 处理时间案例"); flinkEnv.env().setParallelism(1); String exampleSql = "CREATE TABLE show_log_table (\n" + " log_id BIGINT,\n" + " show_params STRING,\n" + " proctime AS PROCTIME()\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.show_params.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE click_log_table (\n" + " log_id BIGINT,\n" + " click_params STRING,\n" + " proctime AS PROCTIME()\n" + ")\n" + "WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.click_params.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " s_id BIGINT,\n" + " s_params STRING,\n" + " c_id BIGINT,\n" + " c_params STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " show_log_table.log_id as s_id,\n" + " show_log_table.show_params as s_params,\n" + " click_log_table.log_id as c_id,\n" + " click_log_table.click_params as c_params\n" + "FROM show_log_table RIGHT JOIN click_log_table ON show_log_table.log_id = click_log_table.log_id\n" + "AND show_log_table.proctime BETWEEN click_log_table.proctime - INTERVAL '4' HOUR AND click_log_table.proctime;"; /** * join 算子:{@link org.apache.flink.streaming.api.operators.co.KeyedCoProcessOperator} * -> {@link org.apache.flink.table.runtime.operators.join.interval.ProcTimeIntervalJoin} * -> {@link org.apache.flink.table.runtime.operators.join.interval.IntervalJoinFunction} */ Arrays.stream(exampleSql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_02_interval_joins/_02_row_time/Interval_Full_JoinsOnNotEqual_EventTime_Test.java ================================================ package flink.examples.sql._07.query._06_joins._02_interval_joins._02_row_time; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class Interval_Full_JoinsOnNotEqual_EventTime_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 Interval Outer Join 事件时间案例"); String sql = "CREATE TABLE show_log_table (\n" + " log_id BIGINT,\n" + " show_params STRING,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.show_params.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE click_log_table (\n" + " log_id BIGINT,\n" + " click_params STRING,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time\n" + ")\n" + "WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.click_params.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " s_id BIGINT,\n" + " s_params STRING,\n" + " c_id BIGINT,\n" + " c_params STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " show_log_table.log_id as s_id,\n" + " show_log_table.show_params as s_params,\n" + " click_log_table.log_id as c_id,\n" + " click_log_table.click_params as c_params\n" + "FROM show_log_table FULL JOIN click_log_table ON show_log_table.log_id > click_log_table.log_id\n" + "AND show_log_table.row_time BETWEEN click_log_table.row_time - INTERVAL '4' HOUR AND click_log_table.row_time;"; /** * join 算子:{@link org.apache.flink.table.runtime.operators.join.KeyedCoProcessOperatorWithWatermarkDelay} * -> {@link org.apache.flink.table.runtime.operators.join.interval.RowTimeIntervalJoin} * -> {@link org.apache.flink.table.runtime.operators.join.interval.IntervalJoinFunction} */ Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_02_interval_joins/_02_row_time/Interval_Full_Joins_EventTime_Test.java ================================================ package flink.examples.sql._07.query._06_joins._02_interval_joins._02_row_time; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class Interval_Full_Joins_EventTime_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 Interval Outer Join 事件时间案例"); String sql = "CREATE TABLE show_log_table (\n" + " log_id BIGINT,\n" + " show_params STRING,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.show_params.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE click_log_table (\n" + " log_id BIGINT,\n" + " click_params STRING,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time\n" + ")\n" + "WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.click_params.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " s_id BIGINT,\n" + " s_params STRING,\n" + " c_id BIGINT,\n" + " c_params STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " show_log_table.log_id as s_id,\n" + " show_log_table.show_params as s_params,\n" + " click_log_table.log_id as c_id,\n" + " click_log_table.click_params as c_params\n" + "FROM show_log_table FULL JOIN click_log_table ON show_log_table.log_id = click_log_table.log_id\n" + "AND show_log_table.row_time BETWEEN click_log_table.row_time - INTERVAL '5' SECOND AND click_log_table.row_time;"; /** * join 算子:{@link org.apache.flink.table.runtime.operators.join.KeyedCoProcessOperatorWithWatermarkDelay} * -> {@link org.apache.flink.table.runtime.operators.join.interval.RowTimeIntervalJoin} * -> {@link org.apache.flink.table.runtime.operators.join.interval.IntervalJoinFunction} */ Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_02_interval_joins/_02_row_time/Interval_Inner_Joins_EventTime_Test.java ================================================ package flink.examples.sql._07.query._06_joins._02_interval_joins._02_row_time; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; // https://developer.aliyun.com/article/679659 public class Interval_Inner_Joins_EventTime_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 Interval Outer Join 事件时间案例"); String sql = "CREATE TABLE show_log_table (\n" + " log_id BIGINT,\n" + " show_params STRING,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.show_params.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE click_log_table (\n" + " log_id BIGINT,\n" + " click_params STRING,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time\n" + ")\n" + "WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.click_params.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " s_id BIGINT,\n" + " s_params STRING,\n" + " c_id BIGINT,\n" + " c_params STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " show_log_table.log_id as s_id,\n" + " show_log_table.show_params as s_params,\n" + " click_log_table.log_id as c_id,\n" + " click_log_table.click_params as c_params\n" + "FROM show_log_table INNER JOIN click_log_table ON show_log_table.log_id = click_log_table.log_id\n" + "AND show_log_table.row_time BETWEEN click_log_table.row_time - INTERVAL '4' HOUR AND click_log_table.row_time;"; /** * join 算子:{@link org.apache.flink.table.runtime.operators.join.KeyedCoProcessOperatorWithWatermarkDelay} * -> {@link org.apache.flink.table.runtime.operators.join.interval.RowTimeIntervalJoin} * -> {@link org.apache.flink.table.runtime.operators.join.interval.IntervalJoinFunction} */ Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_02_interval_joins/_02_row_time/Interval_Left_Joins_EventTime_Test.java ================================================ package flink.examples.sql._07.query._06_joins._02_interval_joins._02_row_time; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class Interval_Left_Joins_EventTime_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 Interval Outer Join 事件时间案例"); String sql = "CREATE TABLE show_log (\n" + " log_id BIGINT,\n" + " show_params STRING,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.show_params.length' = '1',\n" + " 'fields.log_id.min' = '5',\n" + " 'fields.log_id.max' = '15'\n" + ");\n" + "\n" + "CREATE TABLE click_log (\n" + " log_id BIGINT,\n" + " click_params STRING,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time\n" + ")\n" + "WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.click_params.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " s_id BIGINT,\n" + " s_params STRING,\n" + " c_id BIGINT,\n" + " c_params STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " show_log.log_id as s_id,\n" + " show_log.show_params as s_params,\n" + " click_log.log_id as c_id,\n" + " click_log.click_params as c_params\n" + "FROM show_log LEFT JOIN click_log ON show_log.log_id = click_log.log_id\n" + "AND show_log.row_time BETWEEN click_log.row_time - INTERVAL '5' SECOND AND click_log.row_time + INTERVAL '5' SECOND;"; /** * join 算子:{@link org.apache.flink.table.runtime.operators.join.KeyedCoProcessOperatorWithWatermarkDelay} * -> {@link org.apache.flink.table.runtime.operators.join.interval.RowTimeIntervalJoin} * -> {@link org.apache.flink.table.runtime.operators.join.interval.IntervalJoinFunction} */ Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_02_interval_joins/_02_row_time/Interval_Right_Joins_EventTime_Test.java ================================================ package flink.examples.sql._07.query._06_joins._02_interval_joins._02_row_time; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class Interval_Right_Joins_EventTime_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 Interval Outer Join 事件时间案例"); String sql = "CREATE TABLE show_log_table (\n" + " log_id BIGINT,\n" + " show_params STRING,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.show_params.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE click_log_table (\n" + " log_id BIGINT,\n" + " click_params STRING,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time\n" + ")\n" + "WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.click_params.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " s_id BIGINT,\n" + " s_params STRING,\n" + " c_id BIGINT,\n" + " c_params STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " show_log_table.log_id as s_id,\n" + " show_log_table.show_params as s_params,\n" + " click_log_table.log_id as c_id,\n" + " click_log_table.click_params as c_params\n" + "FROM show_log_table RIGHT JOIN click_log_table ON show_log_table.log_id = click_log_table.log_id\n" + "AND show_log_table.row_time BETWEEN click_log_table.row_time - INTERVAL '4' HOUR AND click_log_table.row_time;"; /** * join 算子:{@link org.apache.flink.table.runtime.operators.join.KeyedCoProcessOperatorWithWatermarkDelay} * -> {@link org.apache.flink.table.runtime.operators.join.interval.RowTimeIntervalJoin} * -> {@link org.apache.flink.table.runtime.operators.join.interval.IntervalJoinFunction} */ Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_03_temporal_join/_01_proctime/Temporal_Join_ProcesingTime_Test.java ================================================ package flink.examples.sql._07.query._06_joins._03_temporal_join._01_proctime; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; // https://developer.aliyun.com/article/679659 public class Temporal_Join_ProcesingTime_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 Temporal Join 处理时间案例"); flinkEnv.env().setParallelism(1); String exampleSql = "CREATE TABLE show_log_table (\n" + " log_id BIGINT,\n" + " show_params STRING,\n" + " proctime AS PROCTIME()\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.show_params.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE click_log_table (\n" + " log_id BIGINT,\n" + " click_params STRING,\n" + " proctime AS PROCTIME()\n" + ")\n" + "WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.click_params.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " s_id BIGINT,\n" + " s_params STRING,\n" + " c_id BIGINT,\n" + " c_params STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " show_log_table.log_id as s_id,\n" + " show_log_table.show_params as s_params,\n" + " click_log_table.log_id as c_id,\n" + " click_log_table.click_params as c_params\n" + "FROM show_log_table FULL JOIN click_log_table ON show_log_table.log_id = click_log_table.log_id\n" + "AND show_log_table.proctime BETWEEN click_log_table.proctime - INTERVAL '4' HOUR AND click_log_table.proctime;"; Arrays.stream(exampleSql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_03_temporal_join/_02_row_time/Temporal_Join_EventTime_Test.java ================================================ package flink.examples.sql._07.query._06_joins._03_temporal_join._02_row_time; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class Temporal_Join_EventTime_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "1.13.5 Interval Join 处理时间案例"); flinkEnv.env().setParallelism(1); String exampleSql = "CREATE TABLE show_log (\n" + " log_id BIGINT,\n" + " show_params STRING,\n" + " proctime AS PROCTIME()\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.show_params.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE click_log (\n" + " log_id BIGINT,\n" + " click_params STRING,\n" + " proctime AS PROCTIME()\n" + ")\n" + "WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.click_params.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " s_id BIGINT,\n" + " s_params STRING,\n" + " c_id BIGINT,\n" + " c_params STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " show_log.log_id as s_id,\n" + " show_log.show_params as s_params,\n" + " click_log.log_id as c_id,\n" + " click_log.click_params as c_params\n" + "FROM show_log FULL JOIN click_log ON show_log.log_id = click_log.log_id\n" + "AND show_log.proctime BETWEEN click_log.proctime - INTERVAL '4' HOUR AND click_log.proctime;"; Arrays.stream(exampleSql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_04_lookup_join/_01_redis/RedisBatchLookupTest2.java ================================================ package flink.examples.sql._07.query._06_joins._04_lookup_join._01_redis; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; /** * redis 安装:https://blog.csdn.net/realize_dream/article/details/106227622 * redis java client:https://www.cnblogs.com/chenyanbin/p/12088796.html */ public class RedisBatchLookupTest2 { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils .getStreamTableEnv(args); flinkEnv.env().setParallelism(1); flinkEnv.streamTEnv().getConfig() .getConfiguration() .setBoolean("is.dim.batch.mode", true); String exampleSql = "CREATE TABLE show_log (\n" + " log_id BIGINT,\n" + " `timestamp` as cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " user_id STRING,\n" + " proctime AS PROCTIME()\n" + ")\n" + "WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10000000',\n" + " 'fields.user_id.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE user_profile (\n" + " user_id STRING,\n" + " age STRING,\n" + " sex STRING\n" + " ) WITH (\n" + " 'connector' = 'redis',\n" + " 'hostname' = '127.0.0.1',\n" + " 'port' = '6379',\n" + " 'format' = 'json',\n" + " 'lookup.cache.max-rows' = '500',\n" + " 'lookup.cache.ttl' = '3600',\n" + " 'lookup.max-retries' = '1'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " log_id BIGINT,\n" + " `timestamp` TIMESTAMP(3),\n" + " user_id STRING,\n" + " proctime TIMESTAMP(3),\n" + " age STRING,\n" + " sex STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT \n" + " s.log_id as log_id\n" + " , s.`timestamp` as `timestamp`\n" + " , s.user_id as user_id\n" + " , s.proctime as proctime\n" + " , u.sex as sex\n" + " , u.age as age\n" + "FROM show_log AS s\n" + "LEFT JOIN user_profile FOR SYSTEM_TIME AS OF s.proctime AS u\n" + "ON s.user_id = u.user_id"; Arrays.stream(exampleSql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_04_lookup_join/_01_redis/RedisDemo.java ================================================ package flink.examples.sql._07.query._06_joins._04_lookup_join._01_redis; import java.util.HashMap; import java.util.List; import com.google.gson.Gson; import redis.clients.jedis.Jedis; import redis.clients.jedis.JedisPool; import redis.clients.jedis.Pipeline; /** * redis 安装:https://blog.csdn.net/realize_dream/article/details/106227622 * redis java client:https://www.cnblogs.com/chenyanbin/p/12088796.html */ public class RedisDemo { public static void main(String[] args) { singleConnect(); // poolConnect(); // pipeline(); } public static void singleConnect() { // jedis单实例连接 Jedis jedis = new Jedis("127.0.0.1", 6379); String result = jedis.get("a"); HashMap h = new HashMap<>(); h.put("sex", "男"); h.put("age", "18-24"); String s = new Gson().toJson(h); jedis.set("c", s); System.out.println(result); jedis.close(); } public static void poolConnect() { //jedis连接池 JedisPool pool = new JedisPool("127.0.0.1", 6379); Jedis jedis = pool.getResource(); String result = jedis.get("a"); System.out.println(result); jedis.close(); pool.close(); } public static void pipeline() { //jedis连接池 JedisPool pool = new JedisPool("127.0.0.1", 6379); Jedis jedis = pool.getResource(); Pipeline pipeline = jedis.pipelined(); long setStart = System.currentTimeMillis(); for (int i = 0; i < 10000; i++) { jedis.set("key_" + i, String.valueOf(i)); } long setEnd = System.currentTimeMillis(); System.out.println("非pipeline操作10000次字符串数据类型set写入,耗时:" + (setEnd - setStart) + "毫秒"); long pipelineStart = System.currentTimeMillis(); for (int i = 0; i < 10000; i++) { pipeline.set("key_" + i, String.valueOf(i)); } List l = pipeline.syncAndReturnAll(); long pipelineEnd = System.currentTimeMillis(); System.out.println("pipeline操作10000次字符串数据类型set写入,耗时:" + (pipelineEnd - pipelineStart) + "毫秒"); String result = jedis.get("a"); System.out.println(result); jedis.close(); pool.close(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_04_lookup_join/_01_redis/RedisLookupTest.java ================================================ package flink.examples.sql._07.query._06_joins._04_lookup_join._01_redis; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.typeutils.ResultTypeQueryable; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.TimeCharacteristic; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.api.functions.source.SourceFunction; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.Schema; import org.apache.flink.table.api.Table; import org.apache.flink.table.api.TableResult; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; import org.apache.flink.types.Row; /** * redis 安装:https://blog.csdn.net/realize_dream/article/details/106227622 * redis java client:https://www.cnblogs.com/chenyanbin/p/12088796.html */ public class RedisLookupTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); env.setParallelism(1); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); DataStream r = env.addSource(new UserDefinedSource()); Table sourceTable = tEnv.fromDataStream(r, Schema.newBuilder() .columnByExpression("proctime", "PROCTIME()") .build()); tEnv.createTemporaryView("leftTable", sourceTable); String sql = "CREATE TABLE dimTable (\n" + " name STRING,\n" + " name1 STRING,\n" + " score BIGINT" + ") WITH (\n" + " 'connector' = 'redis',\n" + " 'hostname' = '127.0.0.1',\n" + " 'port' = '6379',\n" + " 'format' = 'json',\n" + " 'lookup.cache.max-rows' = '500',\n" + " 'lookup.cache.ttl' = '3600',\n" + " 'lookup.max-retries' = '1'\n" + ")"; String joinSql = "SELECT o.f0, o.f1, c.name, c.name1, c.score\n" + "FROM leftTable AS o\n" + "LEFT JOIN dimTable FOR SYSTEM_TIME AS OF o.proctime AS c\n" + "ON o.f0 = c.name"; TableResult dimTable = tEnv.executeSql(sql); Table t = tEnv.sqlQuery(joinSql); // Table t = tEnv.sqlQuery("select * from leftTable"); tEnv.toAppendStream(t, Row.class).print(); env.execute(); } private static class UserDefinedSource implements SourceFunction, ResultTypeQueryable { private volatile boolean isCancel; @Override public void run(SourceContext sourceContext) throws Exception { while (!this.isCancel) { sourceContext.collect(Row.of("a", "b", 1L)); Thread.sleep(10L); } } @Override public void cancel() { this.isCancel = true; } @Override public TypeInformation getProducedType() { return new RowTypeInfo(TypeInformation.of(String.class), TypeInformation.of(String.class), TypeInformation.of(Long.class)); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_04_lookup_join/_01_redis/RedisLookupTest2.java ================================================ package flink.examples.sql._07.query._06_joins._04_lookup_join._01_redis; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; /** * redis 安装:https://blog.csdn.net/realize_dream/article/details/106227622 * redis java client:https://www.cnblogs.com/chenyanbin/p/12088796.html */ public class RedisLookupTest2 { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.env().setParallelism(1); flinkEnv.streamTEnv().getConfig().getConfiguration().setBoolean("is.dim.batch.mode", false); String sql = "CREATE TABLE left_table (\n" + " log_id BIGINT,\n" + " click_params STRING,\n" + " name STRING,\n" + " proctime AS PROCTIME()\n" + ")\n" + "WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.click_params.length' = '1',\n" + " 'fields.name.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE dim_table (\n" + " name STRING,\n" + " age BIGINT) WITH (\n" + " 'connector' = 'redis',\n" + " 'hostname' = '127.0.0.1',\n" + " 'port' = '6379',\n" + " 'format' = 'json',\n" + " 'lookup.cache.max-rows' = '500',\n" + " 'lookup.cache.ttl' = '3600',\n" + " 'lookup.max-retries' = '1'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " log_id BIGINT,\n" + " click_params STRING,\n" + " name STRING,\n" + " proctime TIMESTAMP(3),\n" + " d_name STRING,\n" + " age BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT l.log_id as log_id, l.click_params as click_params, l.name as name, l.proctime as proctime," + " d.name as d_name, d.age as age\n" + "FROM left_table AS l\n" + "LEFT JOIN dim_table FOR SYSTEM_TIME AS OF l.proctime AS d\n" + "ON l.name = d.name"; String exampleSql = "CREATE TABLE show_log (\n" + " log_id BIGINT,\n" + " `timestamp` as cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " user_id STRING,\n" + " proctime AS PROCTIME()\n" + ")\n" + "WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.user_id.length' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE user_profile (\n" + " user_id STRING,\n" + " age STRING,\n" + " sex STRING\n" + " ) WITH (\n" + " 'connector' = 'redis',\n" + " 'hostname' = '127.0.0.1',\n" + " 'port' = '6379',\n" + " 'format' = 'json',\n" + " 'lookup.cache.max-rows' = '500',\n" + " 'lookup.cache.ttl' = '3600',\n" + " 'lookup.max-retries' = '1'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " log_id BIGINT,\n" + " `timestamp` TIMESTAMP(3),\n" + " user_id STRING,\n" + " proctime TIMESTAMP(3),\n" + " age STRING,\n" + " sex STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT \n" + " s.log_id as log_id\n" + " , s.`timestamp` as `timestamp`\n" + " , s.user_id as user_id\n" + " , s.proctime as proctime\n" + " , u.sex as sex\n" + " , u.age as age\n" + "FROM show_log AS s\n" + "LEFT JOIN user_profile FOR SYSTEM_TIME AS OF s.proctime AS u\n" + "ON s.user_id = u.user_id"; Arrays.stream(exampleSql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_04_lookup_join/_01_redis/pipeline/BatchJoinTableFuncCollector$8.java ================================================ ////package flink.examples.sql._07.query._06_joins._04_lookup_join._01_redis.pipeline; //// //// //import java.util.List; // //public class BatchJoinTableFuncCollector$8 extends org.apache.flink.table.runtime.collector.TableFunctionCollector { // // org.apache.flink.table.data.GenericRowData out = new org.apache.flink.table.data.GenericRowData(2); // org.apache.flink.table.data.utils.JoinedRowData joinedRow$7 = new org.apache.flink.table.data.utils.JoinedRowData(); // // public BatchJoinTableFuncCollector$8(Object[] references) throws Exception { // // } // // @Override // public void open(org.apache.flink.configuration.Configuration parameters) throws Exception { // // } // // @Override // public void collect(Object record) throws Exception { // List l = (List) getInput(); // List r = (List) record; // // for (int i = 0; i < l.size(); i++) { // // org.apache.flink.table.data.RowData in1 = l.get(i); // org.apache.flink.table.data.RowData in2 = r.get(i); // // org.apache.flink.table.data.binary.BinaryStringData field$5; // boolean isNull$5; // long field$6; // boolean isNull$6; // isNull$6 = in2.isNullAt(1); // field$6 = -1L; // if (!isNull$6) { // field$6 = in2.getLong(1); // } // isNull$5 = in2.isNullAt(0); // field$5 = org.apache.flink.table.data.binary.BinaryStringData.EMPTY_UTF8; // if (!isNull$5) { // field$5 = ((org.apache.flink.table.data.binary.BinaryStringData) in2.getString(0)); // } // // // // // // // if (isNull$5) { // out.setField(0, null); // } else { // out.setField(0, field$5); // } // // // // if (isNull$6) { // out.setField(1, null); // } else { // out.setField(1, field$6); // } // // // joinedRow$7.replace(in1, out); // joinedRow$7.setRowKind(in1.getRowKind()); // outputResult(joinedRow$7); // } // // } // // @Override // public void close() throws Exception { // // } //} ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_04_lookup_join/_01_redis/pipeline/BatchLookupFunction$4.java ================================================ //package flink.examples.sql._07.query._06_joins._04_lookup_join._01_redis.pipeline; // // //import java.util.LinkedList; //import java.util.List; // //public class PipelineLookupFunction$4 // extends org.apache.flink.api.common.functions.RichFlatMapFunction { // // private transient flink.examples.sql._03.source_sink.table.redis.v2.source.RedisRowDataLookupFunction // function_flink$examples$sql$_03$source_sink$table$redis$v2$source$RedisRowDataLookupFunction$9a02959d27765bacc6e3b2107f2d01bc; // private TableFunctionResultConverterCollector$2 resultConverterCollector$3 = null; // // public PipelineLookupFunction$4(Object[] references) throws Exception { // function_flink$examples$sql$_03$source_sink$table$redis$v2$source$RedisRowDataLookupFunction$9a02959d27765bacc6e3b2107f2d01bc = // (((flink.examples.sql._03.source_sink.table.redis.v2.source.RedisRowDataLookupFunction) references[0])); // } // // // @Override // public void open(org.apache.flink.configuration.Configuration parameters) throws Exception { // // function_flink$examples$sql$_03$source_sink$table$redis$v2$source$RedisRowDataLookupFunction$9a02959d27765bacc6e3b2107f2d01bc // .open(new org.apache.flink.table.functions.FunctionContext(getRuntimeContext())); // // // resultConverterCollector$3 = new TableFunctionResultConverterCollector$2(); // resultConverterCollector$3.setRuntimeContext(getRuntimeContext()); // resultConverterCollector$3.open(new org.apache.flink.configuration.Configuration()); // // // function_flink$examples$sql$_03$source_sink$table$redis$v2$source$RedisRowDataLookupFunction$9a02959d27765bacc6e3b2107f2d01bc // .setCollector(resultConverterCollector$3); // // } // // @Override // public void flatMap(Object _in1, org.apache.flink.util.Collector c) throws Exception { // // 改动第一处 // List in1 = (List) _in1; // // List list = new LinkedList<>(); // // for (int i = 0; i < in1.size(); i++) { // // org.apache.flink.table.data.binary.BinaryStringData field$0; // boolean isNull$0; // isNull$0 = in1.get(i).isNullAt(2); // field$0 = org.apache.flink.table.data.binary.BinaryStringData.EMPTY_UTF8; // if (!isNull$0) { // field$0 = ((org.apache.flink.table.data.binary.BinaryStringData) in1.get(i).getString(2)); // } // // list.add(field$0); // } // // resultConverterCollector$3.setCollector(c); // // // function_flink$examples$sql$_03$source_sink$table$redis$v2$source$RedisRowDataLookupFunction$9a02959d27765bacc6e3b2107f2d01bc // .eval(((List) list)); // // // } // // @Override // public void close() throws Exception { // // function_flink$examples$sql$_03$source_sink$table$redis$v2$source$RedisRowDataLookupFunction$9a02959d27765bacc6e3b2107f2d01bc // .close(); // // } // // // public class TableFunctionResultConverterCollector$2 // extends org.apache.flink.table.runtime.collector.WrappingCollector { // // // public TableFunctionResultConverterCollector$2() throws Exception { // // } // // @Override // public void open(org.apache.flink.configuration.Configuration parameters) throws Exception { // // } // // @Override // public void collect(Object record) throws Exception { // List externalResult$1 = (List) record; // // // if (externalResult$1 != null) { // outputResult(externalResult$1); // } // // } // // @Override // public void close() { // try { // // } catch (Exception e) { // throw new RuntimeException(e); // } // } // } // //} ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_04_lookup_join/_01_redis/pipeline/JoinTableFuncCollector$8.java ================================================ // //import java.util.List; // //public class JoinTableFuncCollector$9 extends org.apache.flink.table.runtime.collector.TableFunctionCollector { // // org.apache.flink.table.data.GenericRowData out = new org.apache.flink.table.data.GenericRowData(3); // org.apache.flink.table.data.utils.JoinedRowData joinedRow$8 = new org.apache.flink.table.data.utils.JoinedRowData(); // // public JoinTableFuncCollector$9(Object[] references) throws Exception { // // } // // @Override // public void open(org.apache.flink.configuration.Configuration parameters) throws Exception { // // } // // @Override // public void collect(Object record) throws Exception { // List l = (List) getInput(); // List r = (List) record; // for (int i = 0; i < l.size(); i++) { // org.apache.flink.table.data.RowData in1 = (org.apache.flink.table.data.RowData) l.get(i); // org.apache.flink.table.data.RowData in2 = (org.apache.flink.table.data.RowData) r.get(i); // org.apache.flink.table.data.binary.BinaryStringData field$5; // boolean isNull$5; // org.apache.flink.table.data.binary.BinaryStringData field$6; // boolean isNull$6; // org.apache.flink.table.data.binary.BinaryStringData field$7; // boolean isNull$7; // isNull$7 = in2.isNullAt(2); // field$7 = org.apache.flink.table.data.binary.BinaryStringData.EMPTY_UTF8; // if (!isNull$7) { // field$7 = ((org.apache.flink.table.data.binary.BinaryStringData) in2.getString(2)); // } // isNull$6 = in2.isNullAt(1); // field$6 = org.apache.flink.table.data.binary.BinaryStringData.EMPTY_UTF8; // if (!isNull$6) { // field$6 = ((org.apache.flink.table.data.binary.BinaryStringData) in2.getString(1)); // } // isNull$5 = in2.isNullAt(0); // field$5 = org.apache.flink.table.data.binary.BinaryStringData.EMPTY_UTF8; // if (!isNull$5) { // field$5 = ((org.apache.flink.table.data.binary.BinaryStringData) in2.getString(0)); // } // if (isNull$5) { // out.setField(0, null); // } else { // out.setField(0, field$5); // } // if (isNull$6) { // out.setField(1, null); // } else { // out.setField(1, field$6); // } // if (isNull$7) { // out.setField(2, null); // } else { // out.setField(2, field$7); // } // joinedRow$8.replace(in1, out); // joinedRow$8.setRowKind(in1.getRowKind()); // outputResult(joinedRow$8); // // } // } // // @Override // public void close() throws Exception { // // } //} // ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_04_lookup_join/_01_redis/pipeline/JoinTableFuncCollector$9.java ================================================ // //import java.util.List; // //public class JoinTableFuncCollector$9 extends org.apache.flink.table.runtime.collector.TableFunctionCollector { // // org.apache.flink.table.data.GenericRowData out = new org.apache.flink.table.data.GenericRowData(2); // org.apache.flink.table.data.utils.JoinedRowData joinedRow$7 = new org.apache.flink.table.data.utils.JoinedRowData(); // // public JoinTableFuncCollector$9(Object[] references) throws Exception { // // } // // @Override // public void open(org.apache.flink.configuration.Configuration parameters) throws Exception { // // } // // @Override // public void collect(Object record) throws Exception { // List in1 = (List) getInput(); // List in2 = (List) record; // // for (int i = 0; i < in1.size(); i++) { // // org.apache.flink.table.data.binary.BinaryStringData field$5; // boolean isNull$5; // long field$6; // boolean isNull$6; // isNull$6 = in2.get(i).isNullAt(1); // field$6 = -1L; // if (!isNull$6) { // field$6 = in2.get(i).getLong(1); // } // isNull$5 = in2.get(i).isNullAt(0); // field$5 = org.apache.flink.table.data.binary.BinaryStringData.EMPTY_UTF8; // if (!isNull$5) { // field$5 = ((org.apache.flink.table.data.binary.BinaryStringData) in2.get(i).getString(0)); // } // // // // // // // if (isNull$5) { // out.setField(0, null); // } else { // out.setField(0, field$5); // } // // // // if (isNull$6) { // out.setField(1, null); // } else { // out.setField(1, field$6); // } // // // joinedRow$7.replace(in1.get(i), out); // joinedRow$7.setRowKind(in1.get(i).getRowKind()); // outputResult(joinedRow$7); // // } // } // // @Override // public void close() throws Exception { // // } //} // ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_04_lookup_join/_01_redis/pipeline/LookupFunction$4.java ================================================ // //public class LookupFunction$4 // extends org.apache.flink.api.common.functions.RichFlatMapFunction { // // private transient flink.examples.sql._03.source_sink.table.redis.v2.source.RedisRowDataLookupFunction function_flink$examples$sql$_03$source_sink$table$redis$v2$source$RedisRowDataLookupFunction$48c9b464341243406b9f0b4a0ba51d1c; // private TableFunctionResultConverterCollector$2 resultConverterCollector$3 = null; // // public LookupFunction$4(Object[] references) throws Exception { // function_flink$examples$sql$_03$source_sink$table$redis$v2$source$RedisRowDataLookupFunction$48c9b464341243406b9f0b4a0ba51d1c = // (((flink.examples.sql._03.source_sink.table.redis.v2.source.RedisRowDataLookupFunction) references[0])); // } // // // // @Override // public void open(org.apache.flink.configuration.Configuration parameters) throws Exception { // // function_flink$examples$sql$_03$source_sink$table$redis$v2$source$RedisRowDataLookupFunction$48c9b464341243406b9f0b4a0ba51d1c.open(new org.apache.flink.table.functions.FunctionContext(getRuntimeContext())); // // // resultConverterCollector$3 = new TableFunctionResultConverterCollector$2(); // resultConverterCollector$3.setRuntimeContext(getRuntimeContext()); // resultConverterCollector$3.open(new org.apache.flink.configuration.Configuration()); // // // function_flink$examples$sql$_03$source_sink$table$redis$v2$source$RedisRowDataLookupFunction$48c9b464341243406b9f0b4a0ba51d1c.setCollector(resultConverterCollector$3); // // } // // @Override // public void flatMap(Object _in1, org.apache.flink.util.Collector c) throws Exception { // org.apache.flink.table.data.RowData in1 = (org.apache.flink.table.data.RowData) _in1; // org.apache.flink.table.data.binary.BinaryStringData field$0; // boolean isNull$0; // isNull$0 = in1.isNullAt(2); // field$0 = org.apache.flink.table.data.binary.BinaryStringData.EMPTY_UTF8; // if (!isNull$0) { // field$0 = ((org.apache.flink.table.data.binary.BinaryStringData) in1.getString(2)); // } // resultConverterCollector$3.setCollector(c); // if (isNull$0) { // // skip // } else { // function_flink$examples$sql$_03$source_sink$table$redis$v2$source$RedisRowDataLookupFunction$48c9b464341243406b9f0b4a0ba51d1c // .eval((org.apache.flink.table.data.binary.BinaryStringData) field$0); // } // // // } // // @Override // public void close() throws Exception { // // function_flink$examples$sql$_03$source_sink$table$redis$v2$source$RedisRowDataLookupFunction$48c9b464341243406b9f0b4a0ba51d1c.close(); // // } // // // public class TableFunctionResultConverterCollector$2 extends org.apache.flink.table.runtime.collector.WrappingCollector { // // // // public TableFunctionResultConverterCollector$2() throws Exception { // // } // // @Override // public void open(org.apache.flink.configuration.Configuration parameters) throws Exception { // // } // // @Override // public void collect(Object record) throws Exception { // org.apache.flink.table.data.RowData externalResult$1 = (org.apache.flink.table.data.RowData) record; // // // // // if (externalResult$1 != null) { // outputResult(externalResult$1); // } // // } // // @Override // public void close() { // try { // // } catch (Exception e) { // throw new RuntimeException(e); // } // } // } // //} // ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_04_lookup_join/_01_redis/pipeline/LookupFunction$5.java ================================================ // //import java.util.LinkedList; //import java.util.List; //public class LookupFunction$4 // extends org.apache.flink.api.common.functions.RichFlatMapFunction { // // private transient flink.examples.sql._03.source_sink.table.redis.v2.source.RedisRowDataLookupFunction function_flink$examples$sql$_03$source_sink$table$redis$v2$source$RedisRowDataLookupFunction$9a02959d27765bacc6e3b2107f2d01bc; // private TableFunctionResultConverterCollector$2 resultConverterCollector$3 = null; // // public LookupFunction$4(Object[] references) throws Exception { // function_flink$examples$sql$_03$source_sink$table$redis$v2$source$RedisRowDataLookupFunction$9a02959d27765bacc6e3b2107f2d01bc = (((flink.examples.sql._03.source_sink.table.redis.v2.source.RedisRowDataLookupFunction) references[0])); // } // // // // @Override // public void open(org.apache.flink.configuration.Configuration parameters) throws Exception { // // function_flink$examples$sql$_03$source_sink$table$redis$v2$source$RedisRowDataLookupFunction$9a02959d27765bacc6e3b2107f2d01bc.open(new org.apache.flink.table.functions.FunctionContext(getRuntimeContext())); // // // resultConverterCollector$3 = new TableFunctionResultConverterCollector$2(); // resultConverterCollector$3.setRuntimeContext(getRuntimeContext()); // resultConverterCollector$3.open(new org.apache.flink.configuration.Configuration()); // // // function_flink$examples$sql$_03$source_sink$table$redis$v2$source$RedisRowDataLookupFunction$9a02959d27765bacc6e3b2107f2d01bc.setCollector(resultConverterCollector$3); // // } // // @Override // public void flatMap(Object _in1, org.apache.flink.util.Collector c) throws Exception { // List l = (List) _in1; // List list = new LinkedList<>(); // for (int i = 0; i < l.size(); i++) { // // org.apache.flink.table.data.RowData in1 = (org.apache.flink.table.data.RowData) l.get(i); // // org.apache.flink.table.data.binary.BinaryStringData field$0; // boolean isNull$0; // // isNull$0 = in1.isNullAt(2); // field$0 = org.apache.flink.table.data.binary.BinaryStringData.EMPTY_UTF8; // if (!isNull$0) { // field$0 = ((org.apache.flink.table.data.binary.BinaryStringData) in1.getString(2)); // } // // list.add(field$0); // } // // // resultConverterCollector$3.setCollector(c); // // // function_flink$examples$sql$_03$source_sink$table$redis$v2$source$RedisRowDataLookupFunction$9a02959d27765bacc6e3b2107f2d01bc.eval((List) list); // // // } // // @Override // public void close() throws Exception { // // function_flink$examples$sql$_03$source_sink$table$redis$v2$source$RedisRowDataLookupFunction$9a02959d27765bacc6e3b2107f2d01bc.close(); // // } // // // public class TableFunctionResultConverterCollector$2 extends org.apache.flink.table.runtime.collector.WrappingCollector { // // // // public TableFunctionResultConverterCollector$2() throws Exception { // // } // // @Override // public void open(org.apache.flink.configuration.Configuration parameters) throws Exception { // // } // // @Override // public void collect(Object record) throws Exception { // List externalResult$1 = (List) record; // // // // // if (externalResult$1 != null) { // outputResult(externalResult$1); // } // // } // // @Override // public void close() { // try { // // } catch (Exception e) { // throw new RuntimeException(e); // } // } // } // //} ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_04_lookup_join/_01_redis/pipeline/T1.java ================================================ ///* 1 */ ///* 2 */ // //import java.util.LinkedList; //import java.util.List; // ///* 3 */ ///* 4 */ public class LookupFunction$4 // /* 5 */ extends org.apache.flink.api.common.functions.RichFlatMapFunction { // /* 6 */ // /* 7 */ private transient flink.examples.sql._03.source_sink.table.redis.v2.source.RedisRowDataLookupFunction function_flink$examples$sql$_03$source_sink$table$redis$v2$source$RedisRowDataLookupFunction$9a02959d27765bacc6e3b2107f2d01bc; // /* 8 */ private TableFunctionResultConverterCollector$2 resultConverterCollector$3 = null; // /* 9 */ // /* 10 */ public LookupFunction$4(Object[] references) throws Exception { // /* 11 */ function_flink$examples$sql$_03$source_sink$table$redis$v2$source$RedisRowDataLookupFunction$9a02959d27765bacc6e3b2107f2d01bc = (((flink.examples.sql._03.source_sink.table.redis.v2.source.RedisRowDataLookupFunction) references[0])); // /* 12 */ } // /* 13 */ // /* 14 */ // /* 15 */ // /* 16 */ @Override // /* 17 */ public void open(org.apache.flink.configuration.Configuration parameters) throws Exception { // /* 18 */ // /* 19 */ function_flink$examples$sql$_03$source_sink$table$redis$v2$source$RedisRowDataLookupFunction$9a02959d27765bacc6e3b2107f2d01bc.open(new org.apache.flink.table.functions.FunctionContext(getRuntimeContext())); // /* 20 */ // /* 21 */ // /* 22 */ resultConverterCollector$3 = new TableFunctionResultConverterCollector$2(); // /* 23 */ resultConverterCollector$3.setRuntimeContext(getRuntimeContext()); // /* 24 */ resultConverterCollector$3.open(new org.apache.flink.configuration.Configuration()); // /* 25 */ // /* 26 */ // /* 27 */ function_flink$examples$sql$_03$source_sink$table$redis$v2$source$RedisRowDataLookupFunction$9a02959d27765bacc6e3b2107f2d01bc.setCollector(resultConverterCollector$3); // /* 28 */ // /* 29 */ } // /* 30 */ // /* 31 */ @Override // /* 32 */ public void flatMap(Object _in1, org.apache.flink.util.Collector c) throws Exception { // /* 33 */ List l = (List) _in1; // /* 34 */ List list = new LinkedList<>(); // /* 35 */ for (int i = 0; i < l.size(); i++) { // /* 36 */ // /* 37 */ org.apache.flink.table.data.RowData in1 = (org.apache.flink.table.data.RowData) l.get(i); // /* 38 */ // /* 39 */ // /* 40 */ org.apache.flink.table.data.binary.BinaryStringData field$0; // /* 41 */ boolean isNull$0; // /* 42 */ // /* 43 */ isNull$0 = in1.isNullAt(2); // /* 44 */ field$0 = org.apache.flink.table.data.binary.BinaryStringData.EMPTY_UTF8; // /* 45 */ if (!isNull$0) { // /* 46 */ field$0 = ((org.apache.flink.table.data.binary.BinaryStringData) in1.getString(2)); // /* 47 */ } // /* 48 */ // /* 49 */ list.add(field$0); // /* 50 */ } // /* 51 */ // /* 52 */ // /* 53 */ resultConverterCollector$3.setCollector(c); // /* 54 */ // /* 55 */ // /* 56 */ function_flink$examples$sql$_03$source_sink$table$redis$v2$source$RedisRowDataLookupFunction$9a02959d27765bacc6e3b2107f2d01bc.eval((List) list); // /* 57 */ // /* 58 */ // /* 59 */ } // /* 60 */ // /* 61 */ @Override // /* 62 */ public void close() throws Exception { // /* 63 */ // /* 64 */ function_flink$examples$sql$_03$source_sink$table$redis$v2$source$RedisRowDataLookupFunction$9a02959d27765bacc6e3b2107f2d01bc.close(); // /* 65 */ // /* 66 */ } // /* 67 */ // /* 68 */ // /* 69 */ public class TableFunctionResultConverterCollector$2 extends org.apache.flink.table.runtime.collector.WrappingCollector { // /* 70 */ // /* 71 */ // /* 72 */ // /* 73 */ public TableFunctionResultConverterCollector$2() throws Exception { // /* 74 */ // /* 75 */ } // /* 76 */ // /* 77 */ @Override // /* 78 */ public void open(org.apache.flink.configuration.Configuration parameters) throws Exception { // /* 79 */ // /* 80 */ } // /* 81 */ // /* 82 */ @Override // /* 83 */ public void collect(Object record) throws Exception { // /* 84 */ List externalResult$1 = (List) record; // /* 85 */ // /* 86 */ // /* 87 */ // /* 88 */ // /* 89 */ if (externalResult$1 != null) { // /* 90 */ outputResult(externalResult$1); // /* 91 */ } // /* 92 */ // /* 93 */ } // /* 94 */ // /* 95 */ @Override // /* 96 */ public void close() { // /* 97 */ try { // /* 98 */ // /* 99 */ } catch (Exception e) { // /* 100 */ throw new RuntimeException(e); // /* 101 */ } // /* 102 */ } // /* 103 */ } // /* 104 */ // /* 105 */ } ///* 106 */ ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_05_array_expansion/_01_ArrayExpansionTest.java ================================================ package flink.examples.sql._07.query._06_joins._05_array_expansion; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class _01_ArrayExpansionTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.env().setParallelism(1); String sql = "CREATE TABLE show_log_table (\n" + " log_id BIGINT,\n" + " show_params ARRAY\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " log_id BIGINT,\n" + " show_param STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " log_id,\n" + " t.show_param as show_param\n" + "FROM show_log_table\n" + "CROSS JOIN UNNEST(show_params) AS t (show_param)"; String originalSql = "CREATE TABLE show_log_table (\n" + " log_id BIGINT,\n" + " show_params ARRAY\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.log_id.min' = '1',\n" + " 'fields.log_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " log_id BIGINT,\n" + " show_params ARRAY\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " log_id,\n" + " show_params\n" + "FROM show_log_table\n"; /** * join 算子:{@link org.apache.flink.table.runtime.operators.join.stream.StreamingJoinOperator} */ Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_06_table_function/_01_inner_join/TableFunctionInnerJoin_Test.java ================================================ package flink.examples.sql._07.query._06_joins._06_table_function._01_inner_join; import java.util.Arrays; import org.apache.flink.table.functions.TableFunction; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class TableFunctionInnerJoin_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); String sql = "CREATE FUNCTION user_profile_table_func AS 'flink.examples.sql._07.query._06_joins._06_table_function" + "._01_inner_join.TableFunctionInnerJoin_Test$UserProfileTableFunction';\n" + "\n" + "CREATE TABLE source_table (\n" + " user_id BIGINT NOT NULL,\n" + " name STRING,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.name.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " name STRING,\n" + " age INT,\n" + " row_time TIMESTAMP(3)\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT user_id,\n" + " name,\n" + " age,\n" + " row_time\n" + "FROM source_table,\n" + "LATERAL TABLE(user_profile_table_func(user_id)) t(age)"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } public static class UserProfileTableFunction extends TableFunction { public void eval(long userId) { // 自定义输出逻辑 if (userId <= 5) { // 一行转 1 行 collect(1); } else { // 一行转 3 行 collect(1); collect(2); collect(3); } } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_06_joins/_06_table_function/_01_inner_join/TableFunctionInnerJoin_WithEmptyTableFunction_Test.java ================================================ package flink.examples.sql._07.query._06_joins._06_table_function._01_inner_join; import java.util.Arrays; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; import org.apache.flink.table.functions.TableFunction; public class TableFunctionInnerJoin_WithEmptyTableFunction_Test { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(10); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig() .enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); tEnv.getConfig().getConfiguration().setString("pipeline.name", "1.13.5 Interval Outer Join 事件时间案例"); String sql = "CREATE FUNCTION user_profile_table_func AS 'flink.examples.sql._07.query._06_joins._07_table_function" + "._01_inner_join.TableFunctionInnerJoin_WithEmptyTableFunction_Test$UserProfile_EmptyTableFunction';\n" + "\n" + "CREATE TABLE source_table (\n" + " user_id BIGINT NOT NULL,\n" + " name STRING,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.name.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " name STRING,\n" + " age INT,\n" + " row_time TIMESTAMP(3)\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT user_id,\n" + " name,\n" + " age,\n" + " row_time\n" + "FROM source_table,\n" + "LATERAL TABLE(user_profile_table_func(user_id)) t(age)"; /** * join 算子:{@link org.apache.flink.table.runtime.operators.join.KeyedCoProcessOperatorWithWatermarkDelay} * -> {@link org.apache.flink.table.runtime.operators.join.interval.RowTimeIntervalJoin} * -> {@link org.apache.flink.table.runtime.operators.join.interval.IntervalJoinFunction} */ Arrays.stream(sql.split(";")) .forEach(tEnv::executeSql); } public static class UserProfile_EmptyTableFunction extends TableFunction { public void eval(long userId) { } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_07_deduplication/DeduplicationProcessingTimeTest.java ================================================ package flink.examples.sql._07.query._07_deduplication; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class DeduplicationProcessingTimeTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.env().setParallelism(1); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT COMMENT '用户 id',\n" + " name STRING COMMENT '用户姓名',\n" + " server_timestamp BIGINT COMMENT '用户访问时间戳',\n" + " proctime AS PROCTIME()\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.name.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '10',\n" + " 'fields.server_timestamp.min' = '1',\n" + " 'fields.server_timestamp.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " name STRING,\n" + " server_timestamp BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "select user_id,\n" + " name,\n" + " server_timestamp\n" + "from (\n" + " SELECT\n" + " user_id,\n" + " name,\n" + " server_timestamp,\n" + " row_number() over(partition by user_id order by proctime) as rn\n" + " FROM source_table\n" + ")\n" + "where rn = 1"; /** * 算子 {@link org.apache.flink.streaming.api.operators.KeyedProcessOperator} * -- {@link org.apache.flink.table.runtime.operators.deduplicate.ProcTimeDeduplicateKeepFirstRowFunction} */ for (String innerSql : sql.split(";")) { flinkEnv.streamTEnv().executeSql(innerSql); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_07_deduplication/DeduplicationProcessingTimeTest1.java ================================================ package flink.examples.sql._07.query._07_deduplication; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class DeduplicationProcessingTimeTest1 { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.env().setParallelism(1); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT COMMENT '用户 id',\n" + " name STRING COMMENT '用户姓名',\n" + " server_timestamp BIGINT COMMENT '用户访问时间戳',\n" + " proctime AS PROCTIME()\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.name.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '10',\n" + " 'fields.server_timestamp.min' = '1',\n" + " 'fields.server_timestamp.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " name STRING,\n" + " server_timestamp BIGINT,\n" + " rn BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "select user_id,\n" + " name,\n" + " server_timestamp, rn\n" + "from (\n" + " SELECT\n" + " user_id,\n" + " name,\n" + " server_timestamp,\n" + " row_number() over(partition by user_id order by proctime) as rn\n" + " FROM source_table\n" + ")\n"; /** * 算子 {@link org.apache.flink.streaming.api.operators.KeyedProcessOperator} * -- {@link org.apache.flink.table.runtime.operators.deduplicate.ProcTimeDeduplicateKeepFirstRowFunction} */ for (String innerSql : sql.split(";")) { flinkEnv.streamTEnv().executeSql(innerSql); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_07_deduplication/DeduplicationRowTimeTest.java ================================================ package flink.examples.sql._07.query._07_deduplication; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class DeduplicationRowTimeTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[]{"--enable.hive.module.v2", "false"}); flinkEnv.env().setParallelism(1); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT COMMENT '用户 id',\n" + " level STRING COMMENT '用户等级',\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)) COMMENT '事件时间戳',\n" + " WATERMARK FOR row_time AS row_time\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.level.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '1000000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " level STRING COMMENT '等级',\n" + " uv BIGINT COMMENT '当前等级用户数',\n" + " row_time timestamp(3) COMMENT '时间戳'\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "select \n" + " level\n" + " , count(1) as uv\n" + " , max(row_time) as row_time\n" + "from (\n" + " SELECT\n" + " user_id,\n" + " level,\n" + " row_time,\n" + " row_number() over(partition by user_id order by row_time desc) as rn\n" + " FROM source_table\n" + ")\n" + "where rn = 1\n" + "group by \n" + " level"; for (String innerSql : sql.split(";")) { flinkEnv.streamTEnv().executeSql(innerSql); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_08_datastream_trans/AlertExample.java ================================================ package flink.examples.sql._07.query._08_datastream_trans; import org.apache.flink.api.common.functions.FlatMapFunction; import org.apache.flink.table.api.Table; import org.apache.flink.types.Row; import org.apache.flink.util.Collector; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; import lombok.extern.slf4j.Slf4j; @Slf4j public class AlertExample { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); String createTableSql = "CREATE TABLE source_table (\n" + " id BIGINT,\n" + " money BIGINT,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp_LTZ(3)),\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.id.min' = '1',\n" + " 'fields.id.max' = '100000',\n" + " 'fields.money.min' = '1',\n" + " 'fields.money.max' = '100000'\n" + ")\n"; String querySql = "SELECT UNIX_TIMESTAMP(CAST(window_end AS STRING)) * 1000 as window_end, \n" + " window_start, \n" + " sum(money) as sum_money,\n" + " count(distinct id) as count_distinct_id\n" + "FROM TABLE(CUMULATE(\n" + " TABLE source_table\n" + " , DESCRIPTOR(row_time)\n" + " , INTERVAL '5' SECOND\n" + " , INTERVAL '1' DAY))\n" + "GROUP BY window_start, \n" + " window_end"; flinkEnv.streamTEnv().executeSql(createTableSql); Table resultTable = flinkEnv.streamTEnv().sqlQuery(querySql); flinkEnv.streamTEnv() .toDataStream(resultTable, Row.class) .flatMap(new FlatMapFunction() { @Override public void flatMap(Row value, Collector out) throws Exception { long l = Long.parseLong(String.valueOf(value.getField("sum_money"))); if (l > 10000L) { log.info("报警,超过 1w"); } } }); flinkEnv.env().execute(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_08_datastream_trans/AlertExampleRetract.java ================================================ package flink.examples.sql._07.query._08_datastream_trans; import org.apache.flink.api.common.functions.FlatMapFunction; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.table.api.Table; import org.apache.flink.types.Row; import org.apache.flink.util.Collector; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; import lombok.extern.slf4j.Slf4j; @Slf4j public class AlertExampleRetract { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); String createTableSql = "CREATE TABLE source_table (\n" + " id BIGINT,\n" + " money BIGINT,\n" + " `time` as cast(CURRENT_TIMESTAMP as bigint) * 1000\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.id.min' = '1',\n" + " 'fields.id.max' = '100000',\n" + " 'fields.money.min' = '1',\n" + " 'fields.money.max' = '100000'\n" + ")\n"; String querySql = "SELECT max(`time`), \n" + " sum(money) as sum_money\n" + "FROM source_table\n" + "GROUP BY (`time` + 8 * 3600 * 1000) / (24 * 3600 * 1000)"; flinkEnv.streamTEnv().executeSql(createTableSql); Table resultTable = flinkEnv.streamTEnv().sqlQuery(querySql); flinkEnv.streamTEnv() .toRetractStream(resultTable, Row.class) .flatMap(new FlatMapFunction, Object>() { @Override public void flatMap(Tuple2 value, Collector out) throws Exception { long l = Long.parseLong(String.valueOf(value.f1.getField("sum_money"))); if (l > 10000L) { log.info("报警,超过 1w"); } } }); flinkEnv.env().execute(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_08_datastream_trans/AlertExampleRetractError.java ================================================ package flink.examples.sql._07.query._08_datastream_trans; import org.apache.flink.api.common.functions.FlatMapFunction; import org.apache.flink.table.api.Table; import org.apache.flink.types.Row; import org.apache.flink.util.Collector; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; import lombok.extern.slf4j.Slf4j; @Slf4j public class AlertExampleRetractError { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); String createTableSql = "CREATE TABLE source_table (\n" + " id BIGINT,\n" + " money BIGINT,\n" + " `time` as cast(CURRENT_TIMESTAMP as bigint) * 1000\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.id.min' = '1',\n" + " 'fields.id.max' = '100000',\n" + " 'fields.money.min' = '1',\n" + " 'fields.money.max' = '100000'\n" + ")\n"; String querySql = "SELECT max(`time`), \n" + " sum(money) as sum_money\n" + "FROM source_table\n" + "GROUP BY (`time` + 8 * 3600 * 1000) / (24 * 3600 * 1000)"; flinkEnv.streamTEnv().executeSql(createTableSql); Table resultTable = flinkEnv.streamTEnv().sqlQuery(querySql); flinkEnv.streamTEnv() .toDataStream(resultTable, Row.class) .flatMap(new FlatMapFunction() { @Override public void flatMap(Row value, Collector out) throws Exception { long l = Long.parseLong(String.valueOf(value.getField("sum_money"))); if (l > 10000L) { log.info("报警,超过 1w"); } } }); flinkEnv.env().execute(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_08_datastream_trans/RetractExample.java ================================================ //package flink.examples.sql._07.query._08_datastream_trans; // //import org.apache.flink.api.java.tuple.Tuple2; //import org.apache.flink.streaming.api.datastream.DataStream; //import org.apache.flink.table.api.Table; //import org.apache.flink.types.Row; // //import flink.examples.FlinkEnvUtils; //import flink.examples.FlinkEnvUtils.FlinkEnv; //import lombok.extern.slf4j.Slf4j; // //@Slf4j //public class RetractExample { // // public static void main(String[] args) throws Exception { // // FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); // // String createTableSql = "CREATE TABLE source_table (\n" // + " id BIGINT,\n" // + " money BIGINT,\n" // + " `time` as cast(CURRENT_TIMESTAMP as bigint) * 1000\n" // + ") WITH (\n" // + " 'connector' = 'datagen',\n" // + " 'rows-per-second' = '1',\n" // + " 'fields.id.min' = '1',\n" // + " 'fields.id.max' = '100000',\n" // + " 'fields.money.min' = '1',\n" // + " 'fields.money.max' = '100000'\n" // + ")\n"; // // String querySql = "SELECT max(`time`), \n" // + " sum(money) as sum_money\n" // + "FROM source_table\n" // + "GROUP BY (`time` + 8 * 3600 * 1000) / (24 * 3600 * 1000)"; // // flinkEnv.streamTEnv().executeSql(createTableSql); // // Table resultTable = flinkEnv.streamTEnv().sqlQuery(querySql); // // DataStream> d = flinkEnv.streamTEnv() // .toChangelogStream(resultTable, Row.class); // // flinkEnv.streamTEnv().from // // flinkEnv.env().execute(); // } // //} ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_08_datastream_trans/Test.java ================================================ package flink.examples.sql._07.query._08_datastream_trans; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.typeutils.ResultTypeQueryable; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.functions.source.SourceFunction; import org.apache.flink.table.api.Schema; import org.apache.flink.table.api.Table; import org.apache.flink.types.Row; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); DataStream r = flinkEnv.env().addSource(new UserDefinedSource()); // 数据源是 DataStream API Table sourceTable = flinkEnv.streamTEnv().fromDataStream(r , Schema .newBuilder() .column("f0", "string") .column("f1", "string") .column("f2", "bigint") .columnByExpression("proctime", "PROCTIME()") .build()); flinkEnv.streamTEnv().createTemporaryView("source_table", sourceTable); String selectDistinctSql = "select distinct f0 from source_table"; Table resultTable = flinkEnv.streamTEnv().sqlQuery(selectDistinctSql); flinkEnv.streamTEnv().toRetractStream(resultTable, Row.class).print(); String groupBySql = "select f0 from source_table group by f0"; Table resultTable1 = flinkEnv.streamTEnv().sqlQuery(groupBySql); flinkEnv.streamTEnv().toRetractStream(resultTable1, Row.class).print(); flinkEnv.env().execute(); } private static class UserDefinedSource implements SourceFunction, ResultTypeQueryable { private volatile boolean isCancel; @Override public void run(SourceContext sourceContext) throws Exception { int i = 0; while (!this.isCancel) { sourceContext.collect(Row.of("a" + i, "b", 1L)); Thread.sleep(10L); i++; } } @Override public void cancel() { this.isCancel = true; } @Override public TypeInformation getProducedType() { return new RowTypeInfo(TypeInformation.of(String.class), TypeInformation.of(String.class), TypeInformation.of(Long.class)); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_09_set_operations/Except_Test.java ================================================ package flink.examples.sql._07.query._09_set_operations; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class Except_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); String sql = "CREATE TABLE source_table_1 (\n" + " user_id BIGINT NOT NULL\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE source_table_2 (\n" + " user_id BIGINT NOT NULL\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT user_id\n" + "FROM source_table_1\n" + "Except\n" + "SELECT user_id\n" + "FROM source_table_2\n"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_09_set_operations/Exist_Test.java ================================================ package flink.examples.sql._07.query._09_set_operations; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class Exist_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); String sql = "CREATE TABLE source_table_1 (\n" + " user_id BIGINT NOT NULL\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE source_table_2 (\n" + " user_id BIGINT NOT NULL\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT user_id\n" + "FROM source_table_1\n" + "WHERE user_id EXISTS (\n" + " SELECT user_id\n" + " FROM source_table_2\n" + ")\n"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_09_set_operations/In_Test.java ================================================ package flink.examples.sql._07.query._09_set_operations; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class In_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); String sql = "CREATE TABLE source_table_1 (\n" + " user_id BIGINT NOT NULL\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE source_table_2 (\n" + " user_id BIGINT NOT NULL\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT user_id\n" + "FROM source_table_1\n" + "WHERE user_id in (\n" + " SELECT user_id\n" + " FROM source_table_2\n" + ")\n"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_09_set_operations/Intersect_Test.java ================================================ package flink.examples.sql._07.query._09_set_operations; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class Intersect_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); String sql = "CREATE TABLE source_table_1 (\n" + " user_id BIGINT NOT NULL\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE source_table_2 (\n" + " user_id BIGINT NOT NULL\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT user_id\n" + "FROM source_table_1\n" + "INTERSECT\n" + "SELECT user_id\n" + "FROM source_table_2\n"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_09_set_operations/UnionAll_Test.java ================================================ package flink.examples.sql._07.query._09_set_operations; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class UnionAll_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); String sql = "CREATE TABLE source_table_1 (\n" + " user_id BIGINT NOT NULL,\n" + " name STRING,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.name.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE source_table_2 (\n" + " user_id BIGINT NOT NULL,\n" + " name STRING,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.name.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT user_id\n" + "FROM source_table_1\n" + "UNION ALL\n" + "SELECT user_id\n" + "FROM source_table_2\n"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_09_set_operations/Union_Test.java ================================================ package flink.examples.sql._07.query._09_set_operations; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class Union_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); String sql = "CREATE TABLE source_table_1 (\n" + " user_id BIGINT NOT NULL,\n" + " name STRING,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.name.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE source_table_2 (\n" + " user_id BIGINT NOT NULL,\n" + " name STRING,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.name.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT user_id\n" + "FROM source_table_1\n" + "UNION\n" + "SELECT user_id\n" + "FROM source_table_2\n"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_10_order_by/OrderBy_with_time_attr_Test.java ================================================ package flink.examples.sql._07.query._10_order_by; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class OrderBy_with_time_attr_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); String sql = "CREATE TABLE source_table_1 (\n" + " user_id BIGINT NOT NULL,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT user_id\n" + "FROM source_table_1\n" + "Order By row_time, user_id desc\n"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_10_order_by/OrderBy_without_time_attr_Test.java ================================================ package flink.examples.sql._07.query._10_order_by; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class OrderBy_without_time_attr_Test { public static void main(String[] args) throws Exception { /** * Exception in thread "main" org.apache.flink.table.api.TableException: Sort on a non-time-attribute field * is not supported. * at org.apache.flink.table.planner.plan.nodes.exec.stream.StreamExecSort.translateToPlanInternal * (StreamExecSort.java:75) * at org.apache.flink.table.planner.plan.nodes.exec.ExecNodeBase.translateToPlan(ExecNodeBase.java:134) * at org.apache.flink.table.planner.plan.nodes.exec.ExecEdge.translateToPlan(ExecEdge.java:247) * at org.apache.flink.table.planner.plan.nodes.exec.stream.StreamExecSink.translateToPlanInternal * (StreamExecSink.java:104) * at org.apache.flink.table.planner.plan.nodes.exec.ExecNodeBase.translateToPlan(ExecNodeBase.java:134) * at org.apache.flink.table.planner.delegation.StreamPlanner$$anonfun$translateToPlan$1.apply(StreamPlanner * .scala:70) * at org.apache.flink.table.planner.delegation.StreamPlanner$$anonfun$translateToPlan$1.apply(StreamPlanner * .scala:69) * at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234) * at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234) * at scala.collection.Iterator$class.foreach(Iterator.scala:891) * at scala.collection.AbstractIterator.foreach(Iterator.scala:1334) * at scala.collection.IterableLike$class.foreach(IterableLike.scala:72) * at scala.collection.AbstractIterable.foreach(Iterable.scala:54) * at scala.collection.TraversableLike$class.map(TraversableLike.scala:234) * at scala.collection.AbstractTraversable.map(Traversable.scala:104) * at org.apache.flink.table.planner.delegation.StreamPlanner.translateToPlan(StreamPlanner.scala:69) * at org.apache.flink.table.planner.delegation.PlannerBase.translate(PlannerBase.scala:165) * at org.apache.flink.table.api.internal.TableEnvironmentImpl.translate(TableEnvironmentImpl.java:1518) * at org.apache.flink.table.api.internal.TableEnvironmentImpl.executeInternal(TableEnvironmentImpl.java:740) * at org.apache.flink.table.api.internal.TableEnvironmentImpl.executeInternal(TableEnvironmentImpl.java:856) * at org.apache.flink.table.api.internal.TableEnvironmentImpl.executeSql(TableEnvironmentImpl.java:730) * at java.util.Spliterators$ArraySpliterator.forEachRemaining(Spliterators.java:948) * at java.util.stream.ReferencePipeline$Head.forEach(ReferencePipeline.java:580) * at flink.examples.sql._07.query._10_order_by.OrderBy_Test.main(OrderBy_Test.java:36) */ FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); String sql = "CREATE TABLE source_table_1 (\n" + " user_id BIGINT NOT NULL\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT user_id\n" + "FROM source_table_1\n" + "Order By user_id\n"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_11_limit/Limit_Test.java ================================================ package flink.examples.sql._07.query._11_limit; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class Limit_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); String sql = "CREATE TABLE source_table_1 (\n" + " user_id BIGINT NOT NULL,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT user_id\n" + "FROM source_table_1\n" + "Limit 3\n"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_12_topn/TopN_Test.java ================================================ package flink.examples.sql._07.query._12_topn; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class TopN_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); String sql = "CREATE TABLE source_table (\n" + " name BIGINT NOT NULL,\n" + " search_cnt BIGINT NOT NULL,\n" + " key BIGINT NOT NULL,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.name.min' = '1',\n" + " 'fields.name.max' = '10',\n" + " 'fields.key.min' = '1',\n" + " 'fields.key.max' = '2',\n" + " 'fields.search_cnt.min' = '1000',\n" + " 'fields.search_cnt.max' = '10000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " key BIGINT,\n" + " name BIGINT,\n" + " search_cnt BIGINT,\n" + " `timestamp` TIMESTAMP(3)\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT key, name, search_cnt, row_time as `timestamp`\n" + "FROM (\n" + " SELECT key, name, search_cnt, row_time, \n" + " ROW_NUMBER() OVER (PARTITION BY key\n" + " ORDER BY search_cnt desc) AS rownum\n" + " FROM source_table)\n" + "WHERE rownum <= 100\n"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_13_window_topn/WindowTopN_Test.java ================================================ package flink.examples.sql._07.query._13_window_topn; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class WindowTopN_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); String sql = "CREATE TABLE source_table (\n" + " name BIGINT NOT NULL,\n" + " search_cnt BIGINT NOT NULL,\n" + " key BIGINT NOT NULL,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.name.min' = '1',\n" + " 'fields.name.max' = '10',\n" + " 'fields.key.min' = '1',\n" + " 'fields.key.max' = '2',\n" + " 'fields.search_cnt.min' = '1000',\n" + " 'fields.search_cnt.max' = '10000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " key BIGINT,\n" + " name BIGINT,\n" + " search_cnt BIGINT,\n" + " window_start TIMESTAMP(3),\n" + " window_end TIMESTAMP(3)\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT key, name, search_cnt, window_start, window_end\n" + "FROM (\n" + " SELECT key, name, search_cnt, window_start, window_end, \n" + " ROW_NUMBER() OVER (PARTITION BY window_start, window_end, key\n" + " ORDER BY search_cnt desc) AS rownum\n" + " FROM (\n" + " SELECT window_start, window_end, key, name, max(search_cnt) as search_cnt\n" + " FROM TABLE(TUMBLE(TABLE source_table, DESCRIPTOR(row_time), INTERVAL '1' MINUTES))\n" + " GROUP BY window_start, window_end, key, name\n" + " )\n" + ")\n" + "WHERE rownum <= 100\n"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_14_retract/Retract_Test.java ================================================ package flink.examples.sql._07.query._14_retract; import org.apache.flink.table.api.TableResult; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class Retract_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[] {"--enable.hive.module.v2", "false"}); flinkEnv.env().setParallelism(1); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT COMMENT '用户 id',\n" + " name STRING COMMENT '用户姓名',\n" + " server_timestamp BIGINT COMMENT '用户访问时间戳',\n" + " proctime AS PROCTIME()\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.name.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '10',\n" + " 'fields.server_timestamp.min' = '1',\n" + " 'fields.server_timestamp.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " server_timestamp BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "select user_id,\n" + " max(cast(server_timestamp as bigint)) as server_timestamp\n" + "from (\n" + " SELECT\n" + " user_id,\n" + " name,\n" + " server_timestamp,\n" + " row_number() over(partition by user_id order by proctime desc) as rn\n" + " FROM source_table\n" + ")\n" + "where rn = 1\n" + "group by user_id"; for (String innerSql : sql.split(";")) { TableResult tableResult = flinkEnv.streamTEnv().executeSql(innerSql); tableResult.print(); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_15_exec_options/Default_Parallelism_Test.java ================================================ package flink.examples.sql._07.query._15_exec_options; import org.apache.flink.table.api.TableResult; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class Default_Parallelism_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[] {"--enable.hive.module.v2", "false"}); flinkEnv.env().setParallelism(1); flinkEnv.streamTEnv() .getConfig() .getConfiguration() .setInteger("table.exec.resource.default-parallelism", 8); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT COMMENT '用户 id',\n" + " name STRING COMMENT '用户姓名',\n" + " server_timestamp BIGINT COMMENT '用户访问时间戳',\n" + " proctime AS PROCTIME()\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.name.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '10',\n" + " 'fields.server_timestamp.min' = '1',\n" + " 'fields.server_timestamp.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " server_timestamp BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "select user_id,\n" + " max(cast(server_timestamp as bigint)) as server_timestamp\n" + "from (\n" + " SELECT\n" + " user_id,\n" + " name,\n" + " server_timestamp,\n" + " row_number() over(partition by user_id order by proctime desc) as rn\n" + " FROM source_table\n" + ")\n" + "where rn = 1\n" + "group by user_id"; for (String innerSql : sql.split(";")) { TableResult tableResult = flinkEnv.streamTEnv().executeSql(innerSql); tableResult.print(); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_15_exec_options/Idle_Timeout_Test.java ================================================ package flink.examples.sql._07.query._15_exec_options; import org.apache.flink.table.api.TableResult; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class Idle_Timeout_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[] {"--enable.hive.module.v2", "false"}); flinkEnv.env().setParallelism(1); flinkEnv.streamTEnv() .getConfig() .getConfiguration() .setString("table.exec.source.idle-timeout", "180 s"); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT COMMENT '用户 id',\n" + " name STRING COMMENT '用户姓名',\n" + " server_timestamp BIGINT COMMENT '用户访问时间戳',\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.name.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '10',\n" + " 'fields.server_timestamp.min' = '1',\n" + " 'fields.server_timestamp.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " server_timestamp BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "select user_id,\n" + " max(cast(server_timestamp as bigint)) as server_timestamp\n" + "from (\n" + " SELECT\n" + " user_id,\n" + " name,\n" + " server_timestamp,\n" + " row_number() over(partition by user_id order by row_time desc) as rn\n" + " FROM source_table\n" + ")\n" + "where rn = 1\n" + "group by user_id"; for (String innerSql : sql.split(";")) { TableResult tableResult = flinkEnv.streamTEnv().executeSql(innerSql); tableResult.print(); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_15_exec_options/State_Ttl_Test.java ================================================ package flink.examples.sql._07.query._15_exec_options; import org.apache.flink.table.api.TableResult; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class State_Ttl_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[] {"--enable.hive.module.v2", "false"}); flinkEnv.env().setParallelism(1); flinkEnv.streamTEnv() .getConfig() .getConfiguration() .setString("table.exec.state.ttl", "180 s"); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT COMMENT '用户 id',\n" + " name STRING COMMENT '用户姓名',\n" + " server_timestamp BIGINT COMMENT '用户访问时间戳',\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.name.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '10',\n" + " 'fields.server_timestamp.min' = '1',\n" + " 'fields.server_timestamp.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " server_timestamp BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "select user_id,\n" + " max(cast(server_timestamp as bigint)) as server_timestamp\n" + "from (\n" + " SELECT\n" + " user_id,\n" + " name,\n" + " server_timestamp,\n" + " row_number() over(partition by user_id order by row_time desc) as rn\n" + " FROM source_table\n" + ")\n" + "where rn = 1\n" + "group by user_id"; for (String innerSql : sql.split(";")) { TableResult tableResult = flinkEnv.streamTEnv().executeSql(innerSql); tableResult.print(); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_16_optimizer_options/Agg_OnePhase_Strategy_window_Test.java ================================================ package flink.examples.sql._07.query._16_optimizer_options; import org.apache.flink.table.api.TableResult; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class Agg_OnePhase_Strategy_window_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[] {"--enable.hive.module.v2", "false"}); flinkEnv.env().setParallelism(1); flinkEnv.streamTEnv() .getConfig() .getConfiguration() .setString("table.optimizer.agg-phase-strategy", "ONE_PHASE"); String sql = "CREATE TABLE source_table (\n" + " id BIGINT,\n" + " money BIGINT,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp_LTZ(3)),\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.id.min' = '1',\n" + " 'fields.id.max' = '100000',\n" + " 'fields.money.min' = '1',\n" + " 'fields.money.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " window_end bigint,\n" + " window_start timestamp(3),\n" + " sum_money BIGINT,\n" + " count_distinct_id BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "insert into sink_table\n" + "SELECT UNIX_TIMESTAMP(CAST(window_end AS STRING)) * 1000 as window_end, \n" + " window_start, \n" + " sum(money) as sum_money,\n" + " count(distinct id) as count_distinct_id\n" + "FROM TABLE(CUMULATE(\n" + " TABLE source_table\n" + " , DESCRIPTOR(row_time)\n" + " , INTERVAL '60' SECOND\n" + " , INTERVAL '1' DAY))\n" + "GROUP BY window_start, \n" + " window_end"; for (String innerSql : sql.split(";")) { TableResult tableResult = flinkEnv.streamTEnv().executeSql(innerSql); tableResult.print(); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_16_optimizer_options/Agg_TwoPhase_Strategy_unbounded_Test.java ================================================ package flink.examples.sql._07.query._16_optimizer_options; import org.apache.flink.table.api.TableResult; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class Agg_TwoPhase_Strategy_unbounded_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[] {"--enable.hive.module.v2", "false"}); flinkEnv.env().setParallelism(1); flinkEnv.streamTEnv() .getConfig() .getConfiguration() .setString("table.optimizer.agg-phase-strategy", "TWO_PHASE"); flinkEnv.streamTEnv() .getConfig() .getConfiguration() .setString("table.exec.mini-batch.enabled", "true"); flinkEnv.streamTEnv() .getConfig() .getConfiguration() .setString("table.exec.mini-batch.allow-latency", "60 s"); flinkEnv.streamTEnv() .getConfig() .getConfiguration() .setString("table.exec.mini-batch.size", "1000000000"); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT COMMENT '用户 id',\n" + " name STRING COMMENT '用户姓名',\n" + " server_timestamp BIGINT COMMENT '用户访问时间戳',\n" + " proctime AS PROCTIME()\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.name.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '10',\n" + " 'fields.server_timestamp.min' = '1',\n" + " 'fields.server_timestamp.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " cnt BIGINT,\n" + " server_timestamp BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " user_id,\n" + " count(1) as cnt,\n" + " max(cast(server_timestamp as bigint)) as server_timestamp\n" + "FROM source_table\n" + "GROUP BY\n" + " user_id"; for (String innerSql : sql.split(";")) { TableResult tableResult = flinkEnv.streamTEnv().executeSql(innerSql); tableResult.print(); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_16_optimizer_options/Agg_TwoPhase_Strategy_window_Test.java ================================================ package flink.examples.sql._07.query._16_optimizer_options; import org.apache.flink.table.api.TableResult; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class Agg_TwoPhase_Strategy_window_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[] {"--enable.hive.module.v2", "false"}); flinkEnv.env().setParallelism(1); flinkEnv.streamTEnv() .getConfig() .getConfiguration() .setString("table.optimizer.agg-phase-strategy", "TWO_PHASE"); String sql = "CREATE TABLE source_table (\n" + " id BIGINT,\n" + " money BIGINT,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp_LTZ(3)),\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.id.min' = '1',\n" + " 'fields.id.max' = '100000',\n" + " 'fields.money.min' = '1',\n" + " 'fields.money.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " window_end bigint,\n" + " window_start timestamp(3),\n" + " sum_money BIGINT,\n" + " count_distinct_id BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "insert into sink_table\n" + "SELECT UNIX_TIMESTAMP(CAST(window_end AS STRING)) * 1000 as window_end, \n" + " window_start, \n" + " sum(money) as sum_money,\n" + " count(distinct id) as count_distinct_id\n" + "FROM TABLE(CUMULATE(\n" + " TABLE source_table\n" + " , DESCRIPTOR(row_time)\n" + " , INTERVAL '60' SECOND\n" + " , INTERVAL '1' DAY))\n" + "GROUP BY window_start, \n" + " window_end"; for (String innerSql : sql.split(";")) { TableResult tableResult = flinkEnv.streamTEnv().executeSql(innerSql); tableResult.print(); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_16_optimizer_options/DistinctAgg_Split_One_Distinct_Key_Test.java ================================================ package flink.examples.sql._07.query._16_optimizer_options; import org.apache.flink.table.api.TableResult; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class DistinctAgg_Split_One_Distinct_Key_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[] {"--enable.hive.module.v2", "false"}); flinkEnv.env().setParallelism(1); flinkEnv.streamTEnv() .getConfig() .getConfiguration() .setString("table.optimizer.distinct-agg.split.enabled", "true"); flinkEnv.streamTEnv() .getConfig() .getConfiguration() .setString("table.optimizer.distinct-agg.split.bucket-num", "1024"); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT COMMENT '用户 id',\n" + " name STRING COMMENT '用户姓名',\n" + " server_timestamp BIGINT COMMENT '用户访问时间戳',\n" + " proctime AS PROCTIME()\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.name.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '10',\n" + " 'fields.server_timestamp.min' = '1',\n" + " 'fields.server_timestamp.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " uv BIGINT,\n" + " server_timestamp BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " count(distinct user_id) as uv,\n" + " max(cast(server_timestamp as bigint)) as server_timestamp\n" + "FROM source_table\n"; for (String innerSql : sql.split(";")) { TableResult tableResult = flinkEnv.streamTEnv().executeSql(innerSql); tableResult.print(); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_16_optimizer_options/DistinctAgg_Split_Two_Distinct_Key_Test.java ================================================ package flink.examples.sql._07.query._16_optimizer_options; import org.apache.flink.table.api.TableResult; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class DistinctAgg_Split_Two_Distinct_Key_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[] {"--enable.hive.module.v2", "false"}); flinkEnv.env().setParallelism(1); flinkEnv.streamTEnv() .getConfig() .getConfiguration() .setString("table.optimizer.distinct-agg.split.enabled", "true"); flinkEnv.streamTEnv() .getConfig() .getConfiguration() .setString("table.optimizer.distinct-agg.split.bucket-num", "1024"); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT COMMENT '用户 id',\n" + " name STRING COMMENT '用户姓名',\n" + " server_timestamp BIGINT COMMENT '用户访问时间戳',\n" + " proctime AS PROCTIME()\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.name.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '10',\n" + " 'fields.server_timestamp.min' = '1',\n" + " 'fields.server_timestamp.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id_uv BIGINT,\n" + " name_uv BIGINT,\n" + " server_timestamp BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " count(distinct user_id) as user_id_uv,\n" + " count(distinct name) as name_uv,\n" + " max(cast(server_timestamp as bigint)) as server_timestamp\n" + "FROM source_table\n"; for (String innerSql : sql.split(";")) { TableResult tableResult = flinkEnv.streamTEnv().executeSql(innerSql); tableResult.print(); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_17_table_options/Dml_Syc_False_Test.java ================================================ package flink.examples.sql._07.query._17_table_options; import org.apache.flink.table.api.StatementSet; import org.apache.flink.table.api.TableResult; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class Dml_Syc_False_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[] {"--enable.hive.module.v2", "false"}); flinkEnv.env().setParallelism(1); flinkEnv.streamTEnv() .getConfig() .getConfiguration() .setString("table.dml-sync", "false"); String sql = "CREATE TABLE source_table (\n" + " id BIGINT,\n" + " money BIGINT,\n" + " row_time AS TO_TIMESTAMP_LTZ(cast(UNIX_TIMESTAMP() as bigint) * 1000, 3),\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.id.min' = '1',\n" + " 'fields.id.max' = '100000',\n" + " 'fields.money.min' = '1',\n" + " 'fields.money.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table_1 (\n" + " window_end timestamp(3),\n" + " window_start timestamp(3),\n" + " sum_money BIGINT,\n" + " count_distinct_id BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "CREATE TABLE sink_table_2 (\n" + " id bigint,\n" + " window_end timestamp(3),\n" + " window_start timestamp(3),\n" + " sum_money BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "insert into sink_table_1\n" + "SELECT window_end, \n" + " window_start, \n" + " sum(money) as sum_money,\n" + " count(distinct id) as count_distinct_id\n" + "FROM TABLE(CUMULATE(\n" + " TABLE source_table\n" + " , DESCRIPTOR(row_time)\n" + " , INTERVAL '5' SECOND\n" + " , INTERVAL '1' DAY))\n" + "GROUP BY window_start, \n" + " window_end\n" + ";\n" + "\n" + "insert into sink_table_2\n" + "SELECT id, \n" + " window_end, \n" + " window_start, \n" + " sum(money) as sum_money\n" + "FROM TABLE(CUMULATE(\n" + " TABLE source_table\n" + " , DESCRIPTOR(row_time)\n" + " , INTERVAL '5' SECOND\n" + " , INTERVAL '1' DAY))\n" + "GROUP BY window_start, \n" + " window_end, \n" + " id\n" + ";"; StatementSet statementSet = flinkEnv.streamTEnv().createStatementSet(); for (String innerSql : sql.split(";")) { if (innerSql.contains("insert")) { statementSet.addInsertSql(innerSql); } else { TableResult tableResult = flinkEnv.streamTEnv() .executeSql(innerSql); } } statementSet.execute(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_17_table_options/Dml_Syc_True_Test.java ================================================ package flink.examples.sql._07.query._17_table_options; import org.apache.flink.table.api.StatementSet; import org.apache.flink.table.api.TableResult; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class Dml_Syc_True_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[] {"--enable.hive.module.v2", "false"}); flinkEnv.env().setParallelism(1); flinkEnv.streamTEnv() .getConfig() .getConfiguration() .setString("table.dml-sync", "true"); String sql = "CREATE TABLE source_table (\n" + " id BIGINT,\n" + " money BIGINT,\n" + " row_time AS TO_TIMESTAMP_LTZ(cast(UNIX_TIMESTAMP() as bigint) * 1000, 3),\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.id.min' = '1',\n" + " 'fields.id.max' = '100000',\n" + " 'fields.money.min' = '1',\n" + " 'fields.money.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table_1 (\n" + " window_end timestamp(3),\n" + " window_start timestamp(3),\n" + " sum_money BIGINT,\n" + " count_distinct_id BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "CREATE TABLE sink_table_2 (\n" + " id bigint,\n" + " window_end timestamp(3),\n" + " window_start timestamp(3),\n" + " sum_money BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "insert into sink_table_1\n" + "SELECT window_end, \n" + " window_start, \n" + " sum(money) as sum_money,\n" + " count(distinct id) as count_distinct_id\n" + "FROM TABLE(CUMULATE(\n" + " TABLE source_table\n" + " , DESCRIPTOR(row_time)\n" + " , INTERVAL '5' SECOND\n" + " , INTERVAL '1' DAY))\n" + "GROUP BY window_start, \n" + " window_end\n" + ";\n" + "\n" + "insert into sink_table_2\n" + "SELECT id, \n" + " window_end, \n" + " window_start, \n" + " sum(money) as sum_money\n" + "FROM TABLE(CUMULATE(\n" + " TABLE source_table\n" + " , DESCRIPTOR(row_time)\n" + " , INTERVAL '5' SECOND\n" + " , INTERVAL '1' DAY))\n" + "GROUP BY window_start, \n" + " window_end, \n" + " id\n" + ";"; StatementSet statementSet = flinkEnv.streamTEnv().createStatementSet(); for (String innerSql : sql.split(";")) { if (innerSql.contains("insert")) { statementSet.addInsertSql(innerSql); } else { TableResult tableResult = flinkEnv.streamTEnv() .executeSql(innerSql); } } statementSet.execute(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_17_table_options/TimeZone_window_Test.java ================================================ package flink.examples.sql._07.query._17_table_options; import org.apache.flink.table.api.TableResult; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class TimeZone_window_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[] {"--enable.hive.module.v2", "false"}); flinkEnv.env().setParallelism(1); flinkEnv.streamTEnv() .getConfig() .getConfiguration() .setString("table.local-time-zone", "GMT+00:00"); String sql = "CREATE TABLE source_table (\n" + " id BIGINT,\n" + " money BIGINT,\n" + " row_time AS TO_TIMESTAMP_LTZ(cast(UNIX_TIMESTAMP() as bigint) * 1000, 3),\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.id.min' = '1',\n" + " 'fields.id.max' = '100000',\n" + " 'fields.money.min' = '1',\n" + " 'fields.money.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " window_end timestamp(3),\n" + " window_start timestamp(3),\n" + " sum_money BIGINT,\n" + " count_distinct_id BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "insert into sink_table\n" + "SELECT window_end, \n" + " window_start, \n" + " sum(money) as sum_money,\n" + " count(distinct id) as count_distinct_id\n" + "FROM TABLE(CUMULATE(\n" + " TABLE source_table\n" + " , DESCRIPTOR(row_time)\n" + " , INTERVAL '5' SECOND\n" + " , INTERVAL '1' DAY))\n" + "GROUP BY window_start, \n" + " window_end"; for (String innerSql : sql.split(";")) { TableResult tableResult = flinkEnv.streamTEnv().executeSql(innerSql); tableResult.print(); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_07/query/_18_performance_tuning/Count_Distinct_Filter_Test.java ================================================ package flink.examples.sql._07.query._18_performance_tuning; import org.apache.flink.table.api.StatementSet; import org.apache.flink.table.api.TableResult; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class Count_Distinct_Filter_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[] {"--enable.hive.module.v2", "false"}); flinkEnv.env().setParallelism(1); String sql = "CREATE TABLE source_table (\n" + " id BIGINT,\n" + " money BIGINT,\n" + " name STRING,\n" + " row_time AS TO_TIMESTAMP_LTZ(cast(UNIX_TIMESTAMP() as bigint) * 1000, 3),\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.id.min' = '1',\n" + " 'fields.id.max' = '100000',\n" + " 'fields.money.min' = '1',\n" + " 'fields.money.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table_1 (\n" + " window_end timestamp(3),\n" + " window_start timestamp(3),\n" + " sum_money BIGINT,\n" + " count_distinct_id BIGINT,\n" + " a_count_distinct_id BIGINT,\n" + " b_count_distinct_id BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "insert into sink_table_1\n" + "SELECT window_end, \n" + " window_start, \n" + " sum(money) as sum_money,\n" + " count(distinct id) as count_distinct_id,\n" + " count(distinct case when name = 'a' then id else null end) as a_count_distinct_id,\n" + " count(distinct case when name = 'b' then id else null end) as b_count_distinct_id\n" + "FROM TABLE(CUMULATE(\n" + " TABLE source_table\n" + " , DESCRIPTOR(row_time)\n" + " , INTERVAL '5' SECOND\n" + " , INTERVAL '1' DAY))\n" + "GROUP BY window_start, \n" + " window_end\n" + ";"; StatementSet statementSet = flinkEnv.streamTEnv().createStatementSet(); for (String innerSql : sql.split(";")) { if (innerSql.contains("insert")) { statementSet.addInsertSql(innerSql); } else { TableResult tableResult = flinkEnv.streamTEnv() .executeSql(innerSql); } } statementSet.execute(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/Utils.java ================================================ package flink.examples.sql._08.batch; import java.util.regex.Pattern; public class Utils { public static String format(String sql) { // https://blog.csdn.net/qq_21383435/article/details/82286132 Pattern p = Pattern.compile("(?ms)('(?:''|[^'])*')|--.*?$|/\\*.*?\\*/|#.*?$|"); return p.matcher(sql).replaceAll("$1"); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_01_ddl/HiveDDLTest.java ================================================ package flink.examples.sql._08.batch._01_ddl; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.SqlDialect; import org.apache.flink.table.api.TableEnvironment; import org.apache.flink.table.catalog.hive.HiveCatalog; /** * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster */ public class HiveDDLTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(10); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig() .enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inBatchMode() .build(); TableEnvironment tEnv = TableEnvironment.create(settings); tEnv.getConfig().getConfiguration().setString("pipeline.name", "1.13.5 Interval Outer Join 事件时间案例"); String defaultDatabase = "default"; String hiveConfDir = "/usr/local/Cellar/hive/3.1.2/libexec/conf"; HiveCatalog hive = new HiveCatalog("default", defaultDatabase, hiveConfDir); tEnv.registerCatalog("myhive", hive); // set the HiveCatalog as the current catalog of the session tEnv.useCatalog("myhive"); tEnv.getConfig().setSqlDialect(SqlDialect.HIVE); // String createTableSql = "CREATE TABLE hive_table_1 (\n" // + " user_id STRING,\n" // + " order_amount DOUBLE\n" // + ") PARTITIONED BY (\n" // + " p_date STRING\n" // + ") STORED AS parquet"; // tEnv.executeSql(createTableSql); // hive dialect 支持 insert overwrite table // 默认不支持 tEnv.executeSql("insert overwrite table hive_table_1 select * from hive_table") .print(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_02_dml/HiveDMLBetweenAndTest.java ================================================ package flink.examples.sql._08.batch._02_dml; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.SqlDialect; import org.apache.flink.table.api.TableEnvironment; import org.apache.flink.table.catalog.hive.HiveCatalog; import org.apache.flink.table.module.CoreModule; import org.apache.flink.table.module.hive.HiveModule; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class HiveDMLBetweenAndTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(10); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig() .enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inBatchMode() .build(); TableEnvironment tEnv = TableEnvironment.create(settings); tEnv.getConfig().getConfiguration().setString("pipeline.name", "1.13.5 Interval Outer Join 事件时间案例"); String defaultDatabase = "default"; String hiveConfDir = "/usr/local/Cellar/hive/3.1.2/libexec/conf"; HiveCatalog hive = new HiveCatalog("default", defaultDatabase, hiveConfDir); tEnv.registerCatalog("myhive", hive); tEnv.getConfig().setSqlDialect(SqlDialect.HIVE); // set the HiveCatalog as the current catalog of the session tEnv.useCatalog("myhive"); String version = "3.1.2"; tEnv.unloadModule("core"); tEnv.loadModule("myhive", new HiveModule(version)); tEnv.loadModule("core", CoreModule.INSTANCE); String sql = "select count(1) as uv\n" + " , sum(part_pv) as pv\n" + " , max(part_max) as max_no\n" + " , nvl(min(part_min), 1) as min_no\n" + "from (\n" + " select user_id\n" + " , count(1) as part_pv\n" + " , max(order_amount) as part_max\n" + " , min(order_amount) as part_min\n" + " from hive_table\n" + " where p_date between '20210920' and '20210920'\n" + " group by user_id\n" + ") tmp"; tEnv.executeSql(sql) .print(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_02_dml/HiveDMLTest.java ================================================ package flink.examples.sql._08.batch._02_dml; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.SqlDialect; import org.apache.flink.table.api.TableEnvironment; import org.apache.flink.table.catalog.hive.HiveCatalog; import org.apache.flink.table.module.hive.HiveModule; /** * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster */ public class HiveDMLTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(10); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig() .enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inBatchMode() .build(); TableEnvironment tEnv = TableEnvironment.create(settings); tEnv.getConfig().getConfiguration().setString("pipeline.name", "1.13.5 Interval Outer Join 事件时间案例"); String defaultDatabase = "default"; String hiveConfDir = "/usr/local/Cellar/hive/3.1.2/libexec/conf"; HiveCatalog hive = new HiveCatalog("default", defaultDatabase, hiveConfDir); tEnv.registerCatalog("myhive", hive); tEnv.getConfig().setSqlDialect(SqlDialect.HIVE); // set the HiveCatalog as the current catalog of the session tEnv.useCatalog("myhive"); String version = "3.1.2"; tEnv.loadModule("myhive", new HiveModule(version)); tEnv.executeSql("select count(1) as uv\n" + " , sum(part_pv) as pv\n" + " , max(part_max) as max_no\n" + " , nvl(min(part_min), 1) as min_no\n" + "from (\n" + " select user_id\n" + " , count(1) as part_pv\n" + " , max(order_amount) as part_max\n" + " , min(order_amount) as part_min\n" + " from hive_table\n" + " where p_date = '20210920'\n" + " group by user_id\n" + ")") .print(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_02_dml/HiveTest2.java ================================================ package flink.examples.sql._08.batch._02_dml; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.TableEnvironment; import org.apache.flink.table.catalog.hive.HiveCatalog; /** * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster */ public class HiveTest2 { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(10); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig() .enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inBatchMode() .build(); TableEnvironment tEnv = TableEnvironment.create(settings); tEnv.getConfig().getConfiguration().setString("pipeline.name", "1.13.5 Interval Outer Join 事件时间案例"); String defaultDatabase = "default"; String hiveConfDir = "/usr/local/Cellar/hive/3.1.2/libexec/conf"; HiveCatalog hive = new HiveCatalog("default", defaultDatabase, hiveConfDir); tEnv.registerCatalog("myhive", hive); // set the HiveCatalog as the current catalog of the session tEnv.useCatalog("myhive"); tEnv.executeSql("select count(1) as uv\n" + " , sum(part_pv) as pv\n" + " , max(part_max) as max_no\n" + " , min(part_min) as min_no\n" + "from (\n" + " select user_id\n" + " , count(1) as part_pv\n" + " , max(order_amount) as part_max\n" + " , min(order_amount) as part_min\n" + " from hive_table\n" + " where p_date = '20210920'\n" + " group by user_id\n" + ")") .print(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_02_dml/_01_hive_dialect/HiveDMLTest.java ================================================ package flink.examples.sql._08.batch._02_dml._01_hive_dialect; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.TableEnvironment; import org.apache.flink.table.catalog.hive.HiveCatalog; /** * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster */ public class HiveDMLTest { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(10); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig() .enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inBatchMode() .build(); TableEnvironment tEnv = TableEnvironment.create(settings); tEnv.getConfig().getConfiguration().setString("pipeline.name", "1.13.5 Interval Outer Join 事件时间案例"); String defaultDatabase = "default"; String hiveConfDir = "/usr/local/Cellar/hive/3.1.2/libexec/conf"; HiveCatalog hive = new HiveCatalog("default", defaultDatabase, hiveConfDir); tEnv.registerCatalog("myhive", hive); // set the HiveCatalog as the current catalog of the session tEnv.useCatalog("myhive"); long l = System.currentTimeMillis(); tEnv.executeSql("insert into values(" + l + ", '20210923', '00')") .print(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_02_dml/_02_with_as/HIveWIthAsTest.java ================================================ package flink.examples.sql._08.batch._02_dml._02_with_as; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.SqlDialect; import org.apache.flink.table.api.TableEnvironment; import org.apache.flink.table.catalog.hive.HiveCatalog; import org.apache.flink.table.module.CoreModule; import flink.examples.sql._08.batch._03_hive_udf.HiveModuleV2; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class HIveWIthAsTest { public static void main(String[] args) { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig() .enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inBatchMode() .build(); TableEnvironment tEnv = TableEnvironment.create(settings); tEnv.getConfig().getConfiguration().setString("pipeline.name", "1.13.5 Interval Outer Join 事件时间案例"); String defaultDatabase = "default"; String hiveConfDir = "/usr/local/Cellar/hive/3.1.2/libexec/conf"; HiveCatalog hive = new HiveCatalog("default", defaultDatabase, hiveConfDir); tEnv.registerCatalog("default", hive); tEnv.getConfig().setSqlDialect(SqlDialect.HIVE); // set the HiveCatalog as the current catalog of the session tEnv.useCatalog("default"); String version = "3.1.2"; tEnv.unloadModule("core"); HiveModuleV2 hiveModuleV2 = new HiveModuleV2(version); tEnv.loadModule("default", hiveModuleV2); tEnv.loadModule("core", CoreModule.INSTANCE); String sql3 = "" + "with tmp as (" + "" + "select get_json_object(user_id, '$.user_id')\n" + " , count(1) as part_pv\n" + " , max(order_amount) as part_max\n" + " , min(order_amount) as part_min\n" + " from hive_table\n" + " where p_date between '20210920' and '20210920'\n" + " group by get_json_object(user_id, '$.user_id'))" + "\n" + "select * from tmp"; tEnv.executeSql(sql3) .print(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_02_dml/_03_substr/HiveSubstrTest.java ================================================ package flink.examples.sql._08.batch._02_dml._03_substr; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.SqlDialect; import org.apache.flink.table.api.TableEnvironment; import org.apache.flink.table.catalog.hive.HiveCatalog; import org.apache.flink.table.module.CoreModule; import flink.examples.sql._08.batch._03_hive_udf.HiveModuleV2; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class HiveSubstrTest { public static void main(String[] args) { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig() .enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inBatchMode() .build(); TableEnvironment tEnv = TableEnvironment.create(settings); tEnv.getConfig().getConfiguration().setString("pipeline.name", "1.13.5 Interval Outer Join 事件时间案例"); String defaultDatabase = "default"; String hiveConfDir = "/usr/local/Cellar/hive/3.1.2/libexec/conf"; HiveCatalog hive = new HiveCatalog("default", defaultDatabase, hiveConfDir); tEnv.registerCatalog("default", hive); tEnv.getConfig().setSqlDialect(SqlDialect.HIVE); // set the HiveCatalog as the current catalog of the session tEnv.useCatalog("default"); String version = "3.1.2"; tEnv.unloadModule("core"); HiveModuleV2 hiveModuleV2 = new HiveModuleV2(version); tEnv.loadModule("default", hiveModuleV2); tEnv.loadModule("core", CoreModule.INSTANCE); String sql3 = "" + "with tmp as (" + "" + "select substr(user_id, 1, 10)\n" + " , count(1) as part_pv\n" + " , max(order_amount) as part_max\n" + " , min(order_amount) as part_min\n" + " from hive_table\n" + " where p_date between '20210920' and '20210920'\n" + " group by substr(user_id, 1, 10))" + "\n" + "select * from tmp"; tEnv.executeSql(sql3) .print(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_02_dml/_04_tumble_window/Test.java ================================================ package flink.examples.sql._08.batch._02_dml._04_tumble_window; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.SqlDialect; import org.apache.flink.table.api.TableEnvironment; import org.apache.flink.table.catalog.hive.HiveCatalog; import org.apache.flink.table.module.CoreModule; import flink.examples.sql._08.batch._03_hive_udf.HiveModuleV2; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class Test { // CREATE TABLE `hive_tumble_window_table`( // `user_id` string, // `order_amount` double, // `server_timestamp` timestamp // // ) //PARTITIONED BY ( // `p_date` string) // // //insert into hive_tumble_window_table values ('yyc', 300, '2021-09-30 11:22:57.0', '20210920'), ('yyc', 300, // '2021-09-30 11:22:58.0', '20210920'), ('yyc', 300, '2021-09-30 11:23:57.0', '20210920'), ('yyc', 300, // '2021-09-30 11:24:57.0', '20210920'), ('yyc', 300, '2021-09-30 11:25:57.0', '20210920'), ('yyc', 300, // '2021-09-30 11:25:58.0', '20210920') public static void main(String[] args) { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig() .enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inBatchMode() .build(); TableEnvironment tEnv = TableEnvironment.create(settings); tEnv.getConfig().getConfiguration().setString("pipeline.name", "1.13.5 Interval Outer Join 事件时间案例"); String defaultDatabase = "default"; String hiveConfDir = "/usr/local/Cellar/hive/3.1.2/libexec/conf"; HiveCatalog hive = new HiveCatalog("default", defaultDatabase, hiveConfDir); tEnv.registerCatalog("default", hive); tEnv.getConfig().setSqlDialect(SqlDialect.DEFAULT); // set the HiveCatalog as the current catalog of the session tEnv.useCatalog("default"); String version = "3.1.2"; tEnv.unloadModule("core"); HiveModuleV2 hiveModuleV2 = new HiveModuleV2(version); tEnv.loadModule("default", hiveModuleV2); tEnv.loadModule("core", CoreModule.INSTANCE); String sql3 = "insert overwrite hive_tumble_window_table_sink\n" + "select TUMBLE_START(server_timestamp, INTERVAL '1' MINUTE) as window_start\n" + " , count(1) as part_pv\n" + " , max(order_amount) as part_max\n" + " , min(order_amount) as part_min\n" + "from hive_tumble_window_table\n" + "where p_date = '20210920'\n" + "group by TUMBLE(server_timestamp, INTERVAL '1' MINUTE)"; tEnv.executeSql(sql3) .print(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_02_dml/_04_tumble_window/Test1.java ================================================ package flink.examples.sql._08.batch._02_dml._04_tumble_window; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.SqlDialect; import org.apache.flink.table.api.TableEnvironment; import org.apache.flink.table.catalog.hive.HiveCatalog; import org.apache.flink.table.module.CoreModule; import flink.examples.sql._08.batch._03_hive_udf.HiveModuleV2; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class Test1 { // CREATE TABLE `hive_tumble_window_table`( // `user_id` string, // `order_amount` double, // `server_timestamp` timestamp // // ) //PARTITIONED BY ( // `p_date` string) // // //insert into hive_tumble_window_table values ('yyc', 300, '2021-09-30 11:22:57.0', '20210920'), ('yyc', 300, // '2021-09-30 11:22:58.0', '20210920'), ('yyc', 300, '2021-09-30 11:23:57.0', '20210920'), ('yyc', 300, // '2021-09-30 11:24:57.0', '20210920'), ('yyc', 300, '2021-09-30 11:25:57.0', '20210920'), ('yyc', 300, // '2021-09-30 11:25:58.0', '20210920') public static void main(String[] args) { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig() .enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inBatchMode() .build(); TableEnvironment tEnv = TableEnvironment.create(settings); tEnv.getConfig().getConfiguration().setString("pipeline.name", "1.13.5 Interval Outer Join 事件时间案例"); String defaultDatabase = "default"; String hiveConfDir = "/usr/local/Cellar/hive/3.1.2/libexec/conf"; HiveCatalog hive = new HiveCatalog("default", defaultDatabase, hiveConfDir); tEnv.registerCatalog("default", hive); tEnv.getConfig().setSqlDialect(SqlDialect.DEFAULT); // set the HiveCatalog as the current catalog of the session tEnv.useCatalog("default"); String version = "3.1.2"; tEnv.unloadModule("core"); HiveModuleV2 hiveModuleV2 = new HiveModuleV2(version); tEnv.loadModule("default", hiveModuleV2); tEnv.loadModule("core", CoreModule.INSTANCE); String sql3 = "" + "with tmp as (\n" + "select cast(server_timestamp as timestamp(3)) as ti, order_amount as order_amount from hive_tumble_window_table\n" + ")\n" + "\n" + "select window_start, window_end, count(1) as part_pv\n" + " , max(order_amount) as part_max\n" + " , min(order_amount) as part_min\n" + "from TABLE(\n" + " TUMBLE(TABLE tmp, DESCRIPTOR(ti), INTERVAL '1' MINUTES))\n" // + "from tmp\n"; // + "where p_date = '20210920'\n" + "group by window_start, window_end\n"; tEnv.executeSql(sql3) .print(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_02_dml/_04_tumble_window/Test2_BIGINT_SOURCE.java ================================================ package flink.examples.sql._08.batch._02_dml._04_tumble_window; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.SqlDialect; import org.apache.flink.table.api.TableEnvironment; import org.apache.flink.table.catalog.hive.HiveCatalog; import org.apache.flink.table.module.CoreModule; import flink.examples.sql._08.batch._03_hive_udf.HiveModuleV2; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class Test2_BIGINT_SOURCE { // CREATE TABLE `hive_tumble_window_table`( // `user_id` string, // `order_amount` double, // `server_timestamp` timestamp // // ) //PARTITIONED BY ( // `p_date` string) // // //insert into hive_tumble_window_table values ('yyc', 300, '2021-09-30 11:22:57.0', '20210920'), ('yyc', 300, // '2021-09-30 11:22:58.0', '20210920'), ('yyc', 300, '2021-09-30 11:23:57.0', '20210920'), ('yyc', 300, // '2021-09-30 11:24:57.0', '20210920'), ('yyc', 300, '2021-09-30 11:25:57.0', '20210920'), ('yyc', 300, // '2021-09-30 11:25:58.0', '20210920') public static void main(String[] args) { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig() .enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inBatchMode() .build(); TableEnvironment tEnv = TableEnvironment.create(settings); tEnv.getConfig().getConfiguration().setString("pipeline.name", "1.13.5 Interval Outer Join 事件时间案例"); String defaultDatabase = "default"; String hiveConfDir = "/usr/local/Cellar/hive/3.1.2/libexec/conf"; HiveCatalog hive = new HiveCatalog("default", defaultDatabase, hiveConfDir); tEnv.registerCatalog("default", hive); tEnv.getConfig().setSqlDialect(SqlDialect.DEFAULT); // set the HiveCatalog as the current catalog of the session tEnv.useCatalog("default"); String version = "3.1.2"; tEnv.unloadModule("core"); HiveModuleV2 hiveModuleV2 = new HiveModuleV2(version); tEnv.loadModule("default", hiveModuleV2); tEnv.loadModule("core", CoreModule.INSTANCE); String sql3 = "\n" // + "insert overwrite hive_tumble_window_table_sink\n" + "select TUMBLE_START(st, INTERVAL '1' MINUTE) as window_start\n" + " , count(1) as part_pv\n" + " , max(order_amount) as part_max\n" + " , min(order_amount) as part_min\n" + "from (select cast(TO_TIMESTAMP(server_timestamp_bigint, 3) as timestamp(3)) as st, order_amount as order_amount from hive_tumble_window_table_bigint_source where p_date = '20210920') tmp1\n" + "group by TUMBLE(st, INTERVAL '1' MINUTE)"; tEnv.executeSql(sql3) .print(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_02_dml/_04_tumble_window/Test3.java ================================================ package flink.examples.sql._08.batch._02_dml._04_tumble_window; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.SqlDialect; import org.apache.flink.table.api.TableEnvironment; import org.apache.flink.table.catalog.hive.HiveCatalog; import org.apache.flink.table.module.CoreModule; import flink.examples.sql._08.batch._03_hive_udf.HiveModuleV2; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class Test3 { // CREATE TABLE `hive_tumble_window_table`( // `user_id` string, // `order_amount` double, // `server_timestamp` timestamp // // ) //PARTITIONED BY ( // `p_date` string) // // //insert into hive_tumble_window_table values ('yyc', 300, '2021-09-30 11:22:57.0', '20210920'), ('yyc', 300, // '2021-09-30 11:22:58.0', '20210920'), ('yyc', 300, '2021-09-30 11:23:57.0', '20210920'), ('yyc', 300, // '2021-09-30 11:24:57.0', '20210920'), ('yyc', 300, '2021-09-30 11:25:57.0', '20210920'), ('yyc', 300, // '2021-09-30 11:25:58.0', '20210920') public static void main(String[] args) { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig() .enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inBatchMode() .build(); TableEnvironment tEnv = TableEnvironment.create(settings); tEnv.getConfig().getConfiguration().setString("pipeline.name", "1.13.5 Interval Outer Join 事件时间案例"); String defaultDatabase = "default"; String hiveConfDir = "/usr/local/Cellar/hive/3.1.2/libexec/conf"; HiveCatalog hive = new HiveCatalog("default", defaultDatabase, hiveConfDir); tEnv.registerCatalog("default", hive); tEnv.getConfig().setSqlDialect(SqlDialect.DEFAULT); // set the HiveCatalog as the current catalog of the session tEnv.useCatalog("default"); String version = "3.1.2"; tEnv.unloadModule("core"); HiveModuleV2 hiveModuleV2 = new HiveModuleV2(version); tEnv.loadModule("default", hiveModuleV2); tEnv.loadModule("core", CoreModule.INSTANCE); String sql3 = "insert overwrite hive_tumble_window_table_bigint_source partition(p_date = '20210921')" // + "with tmp as (\n" // + "select cast(server_timestamp as timestamp(3)) as ti, order_amount as order_amount from hive_tumble_window_table\n" // + ")\n" // + "\n" + "select user_id, order_amount, server_timestamp_bigint, server_timestamp from hive_tumble_window_table_bigint_source\n"; // + "from tmp\n"; // + "where p_date = '20210920'\n" // + "group by server_timestamp\n"; tEnv.executeSql(sql3) .print(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_02_dml/_04_tumble_window/Test5.java ================================================ package flink.examples.sql._08.batch._02_dml._04_tumble_window; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.SqlDialect; import org.apache.flink.table.api.TableEnvironment; import org.apache.flink.table.catalog.hive.HiveCatalog; import org.apache.flink.table.module.CoreModule; import flink.examples.sql._08.batch._03_hive_udf.HiveModuleV2; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class Test5 { // CREATE TABLE `hive_tumble_window_table`( // `user_id` string, // `order_amount` double, // `server_timestamp` timestamp // // ) //PARTITIONED BY ( // `p_date` string) // // //insert into hive_tumble_window_table values ('yyc', 300, '2021-09-30 11:22:57.0', '20210920'), ('yyc', 300, // '2021-09-30 11:22:58.0', '20210920'), ('yyc', 300, '2021-09-30 11:23:57.0', '20210920'), ('yyc', 300, // '2021-09-30 11:24:57.0', '20210920'), ('yyc', 300, '2021-09-30 11:25:57.0', '20210920'), ('yyc', 300, // '2021-09-30 11:25:58.0', '20210920') public static void main(String[] args) { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig() .enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inBatchMode() .build(); TableEnvironment tEnv = TableEnvironment.create(settings); tEnv.getConfig().getConfiguration().setString("pipeline.name", "1.13.5 Interval Outer Join 事件时间案例"); String defaultDatabase = "default"; String hiveConfDir = "/usr/local/Cellar/hive/3.1.2/libexec/conf"; HiveCatalog hive = new HiveCatalog("default", defaultDatabase, hiveConfDir); tEnv.registerCatalog("default", hive); tEnv.getConfig().setSqlDialect(SqlDialect.DEFAULT); // set the HiveCatalog as the current catalog of the session tEnv.useCatalog("default"); String version = "3.1.2"; tEnv.unloadModule("core"); HiveModuleV2 hiveModuleV2 = new HiveModuleV2(version); tEnv.loadModule("default", hiveModuleV2); tEnv.loadModule("core", CoreModule.INSTANCE); String sql3 = "select TUMBLE_START(server_timestamp, INTERVAL '1' MINUTE) as window_start\n" + " , count(1) as part_pv\n" + " , max(order_amount) as part_max\n" + " , min(order_amount) as part_min\n" + "from hive_tumble_window_table\n" + "where p_date = '20210920'\n" + "group by TUMBLE(server_timestamp, INTERVAL '1' MINUTE)"; tEnv.executeSql(sql3) .print(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_02_dml/_05_batch_to_datastream/Test.java ================================================ package flink.examples.sql._08.batch._02_dml._05_batch_to_datastream; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.SqlDialect; import org.apache.flink.table.api.Table; import org.apache.flink.table.api.TableEnvironment; import org.apache.flink.table.catalog.hive.HiveCatalog; import org.apache.flink.table.module.CoreModule; import flink.examples.sql._08.batch._03_hive_udf.HiveModuleV2; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class Test { public static void main(String[] args) { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig() .enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inBatchMode() .build(); TableEnvironment tEnv = TableEnvironment.create(settings); tEnv.getConfig().getConfiguration().setString("pipeline.name", "1.13.5 Interval Outer Join 事件时间案例"); String defaultDatabase = "default"; String hiveConfDir = "/usr/local/Cellar/hive/3.1.2/libexec/conf"; HiveCatalog hive = new HiveCatalog("default", defaultDatabase, hiveConfDir); tEnv.registerCatalog("default", hive); tEnv.getConfig().setSqlDialect(SqlDialect.DEFAULT); // set the HiveCatalog as the current catalog of the session tEnv.useCatalog("default"); String version = "3.1.2"; tEnv.unloadModule("core"); HiveModuleV2 hiveModuleV2 = new HiveModuleV2(version); tEnv.loadModule("default", hiveModuleV2); tEnv.loadModule("core", CoreModule.INSTANCE); String sql3 = "" + "with tmp as (" + "" + "select count(1) as part_pv\n" + " , max(order_amount) as part_max\n" + " , min(order_amount) as part_min\n" + " from hive_table\n" + " where p_date between '20210920' and '20210920'\n" + ")\n" + "select * from tmp"; Table t = tEnv.sqlQuery(sql3); tEnv.createTemporaryView("test", t); tEnv.executeSql("select * from test") .print(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_02_dml/_06_select_where/Test.java ================================================ package flink.examples.sql._08.batch._02_dml._06_select_where; import java.lang.reflect.Field; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import org.apache.calcite.sql.SqlNode; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.SqlDialect; import org.apache.flink.table.api.Table; import org.apache.flink.table.api.TableEnvironment; import org.apache.flink.table.api.internal.TableEnvironmentImpl; import org.apache.flink.table.catalog.hive.HiveCatalog; import org.apache.flink.table.module.CoreModule; import org.apache.flink.table.planner.delegation.ParserImpl; import org.apache.flink.table.planner.parse.CalciteParser; import flink.examples.sql._08.batch._03_hive_udf.HiveModuleV2; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster *

* hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class Test { public static void main(String[] args) throws NoSuchFieldException, IllegalAccessException { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig() .enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inBatchMode() .build(); TableEnvironment tEnv = TableEnvironment.create(settings); tEnv.getConfig().getConfiguration().setString("pipeline.name", "1.13.5 Interval Outer Join 事件时间案例"); String defaultDatabase = "default"; String hiveConfDir = "/usr/local/Cellar/hive/3.1.2/libexec/conf"; HiveCatalog hive = new HiveCatalog("default", defaultDatabase, hiveConfDir); tEnv.registerCatalog("default", hive); tEnv.getConfig().setSqlDialect(SqlDialect.DEFAULT); // set the HiveCatalog as the current catalog of the session tEnv.useCatalog("default"); String version = "3.1.2"; tEnv.unloadModule("core"); HiveModuleV2 hiveModuleV2 = new HiveModuleV2(version); tEnv.loadModule("default", hiveModuleV2); tEnv.loadModule("core", CoreModule.INSTANCE); String sql3 = "" + "with tmp as (" + "" + "select count(1) as part_pv\n" + " , max(order_amount) as part_max\n" + " , min(order_amount) as part_min\n" + " from hive_table\n" + " where mod(cast(order_amount as bigint), 10) = 0 and cast(order_amount as bigint) <> 0\n" + ")\n" + "select * from tmp"; ParserImpl p = (ParserImpl) ((TableEnvironmentImpl) tEnv).getParser(); Field f = p.getClass().getDeclaredField("calciteParserSupplier"); f.setAccessible(true); Supplier su = (Supplier) f.get(p); CalciteParser calciteParser = su.get(); SqlNode s = calciteParser.parse(sql3); Table t = tEnv.sqlQuery(sql3); tEnv.createTemporaryView("test", t); tEnv.executeSql("select * from test") .print(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_03_hive_udf/HiveModuleV2.java ================================================ package flink.examples.sql._08.batch._03_hive_udf; import static org.apache.flink.util.Preconditions.checkArgument; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Optional; import java.util.Set; import org.apache.flink.annotation.VisibleForTesting; import org.apache.flink.table.catalog.hive.client.HiveShim; import org.apache.flink.table.catalog.hive.client.HiveShimLoader; import org.apache.flink.table.catalog.hive.factories.HiveFunctionDefinitionFactory; import org.apache.flink.table.functions.FunctionDefinition; import org.apache.flink.table.module.Module; import org.apache.flink.table.module.hive.udf.generic.GenericUDFLegacyGroupingID; import org.apache.flink.table.module.hive.udf.generic.HiveGenericUDFGrouping; import org.apache.flink.util.StringUtils; import org.apache.hadoop.hive.ql.exec.FunctionInfo; public class HiveModuleV2 implements Module { // a set of functions that shouldn't be overridden by HiveModule @VisibleForTesting static final Set BUILT_IN_FUNC_BLACKLIST = Collections.unmodifiableSet( new HashSet<>( Arrays.asList( "count", "cume_dist", "current_date", "current_timestamp", "dense_rank", "first_value", "lag", "last_value", "lead", "ntile", "rank", "row_number", "hop", "hop_end", "hop_proctime", "hop_rowtime", "hop_start", "percent_rank", "session", "session_end", "session_proctime", "session_rowtime", "session_start", "tumble", "tumble_end", "tumble_proctime", "tumble_rowtime", "tumble_start"))); private final HiveFunctionDefinitionFactory factory; private final String hiveVersion; private final HiveShim hiveShim; private Set functionNames; public HiveModuleV2() { this(HiveShimLoader.getHiveVersion()); } public HiveModuleV2(String hiveVersion) { checkArgument( !StringUtils.isNullOrWhitespaceOnly(hiveVersion), "hiveVersion cannot be null"); this.hiveVersion = hiveVersion; this.hiveShim = HiveShimLoader.loadHiveShim(hiveVersion); this.factory = new HiveFunctionDefinitionFactory(hiveShim); this.functionNames = new HashSet<>(); this.map = new HashMap<>(); } @Override public Set listFunctions() { // lazy initialize if (functionNames.isEmpty()) { functionNames = hiveShim.listBuiltInFunctions(); functionNames.removeAll(BUILT_IN_FUNC_BLACKLIST); functionNames.add("grouping"); functionNames.add(GenericUDFLegacyGroupingID.NAME); functionNames.addAll(map.keySet()); } return functionNames; } @Override public Optional getFunctionDefinition(String name) { if (BUILT_IN_FUNC_BLACKLIST.contains(name)) { return Optional.empty(); } // We override Hive's grouping function. Refer to the implementation for more details. if (name.equalsIgnoreCase("grouping")) { return Optional.of( factory.createFunctionDefinitionFromHiveFunction( name, HiveGenericUDFGrouping.class.getName())); } // this function is used to generate legacy GROUPING__ID value for old hive versions if (name.equalsIgnoreCase(GenericUDFLegacyGroupingID.NAME)) { return Optional.of( factory.createFunctionDefinitionFromHiveFunction( name, GenericUDFLegacyGroupingID.class.getName())); } Optional info = hiveShim.getBuiltInFunctionInfo(name); if (info.isPresent()) { return info.map( functionInfo -> factory.createFunctionDefinitionFromHiveFunction( name, functionInfo.getFunctionClass().getName())); } else { return Optional.ofNullable(this.map.get(name)) .map(hiveUDFClassName -> factory.createFunctionDefinitionFromHiveFunction(name, hiveUDFClassName)); } } public String getHiveVersion() { return hiveVersion; } private final Map map; public void registryHiveUDF(String hiveUDFName, String hiveUDFClassName) { this.map.put(hiveUDFName, hiveUDFClassName); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_03_hive_udf/HiveUDFRegistryTest.java ================================================ package flink.examples.sql._08.batch._03_hive_udf; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.SqlDialect; import org.apache.flink.table.api.TableEnvironment; import org.apache.flink.table.catalog.hive.HiveCatalog; import org.apache.flink.table.module.CoreModule; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class HiveUDFRegistryTest { public static void main(String[] args) { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig() .enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inBatchMode() .build(); TableEnvironment tEnv = TableEnvironment.create(settings); tEnv.getConfig().getConfiguration().setString("pipeline.name", "1.13.5 Interval Outer Join 事件时间案例"); String defaultDatabase = "default"; String hiveConfDir = "/usr/local/Cellar/hive/3.1.2/libexec/conf"; HiveCatalog hive = new HiveCatalog("default", defaultDatabase, hiveConfDir); tEnv.registerCatalog("default", hive); tEnv.getConfig().setSqlDialect(SqlDialect.HIVE); // set the HiveCatalog as the current catalog of the session tEnv.useCatalog("default"); String version = "3.1.2"; HiveModuleV2 hiveModuleV2 = new HiveModuleV2(version); tEnv.unloadModule("core"); tEnv.loadModule("default", hiveModuleV2); tEnv.loadModule("core", CoreModule.INSTANCE); String[] s = tEnv.listFunctions(); String[] s1 = tEnv.listUserDefinedFunctions(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_03_hive_udf/HiveUDFRegistryUnloadTest.java ================================================ package flink.examples.sql._08.batch._03_hive_udf; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.SqlDialect; import org.apache.flink.table.api.TableEnvironment; import org.apache.flink.table.catalog.hive.HiveCatalog; import org.apache.flink.table.module.CoreModule; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class HiveUDFRegistryUnloadTest { public static void main(String[] args) { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig() .enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inBatchMode() .build(); TableEnvironment tEnv = TableEnvironment.create(settings); tEnv.getConfig().getConfiguration().setString("pipeline.name", "1.13.5 Interval Outer Join 事件时间案例"); String defaultDatabase = "default"; String hiveConfDir = "/usr/local/Cellar/hive/3.1.2/libexec/conf"; HiveCatalog hive = new HiveCatalog("default", defaultDatabase, hiveConfDir); tEnv.registerCatalog("default", hive); tEnv.getConfig().setSqlDialect(SqlDialect.HIVE); // set the HiveCatalog as the current catalog of the session tEnv.useCatalog("default"); String version = "3.1.2"; HiveModuleV2 hiveModuleV2 = new HiveModuleV2(version); tEnv.unloadModule("core"); tEnv.loadModule("default", hiveModuleV2); tEnv.loadModule("core", CoreModule.INSTANCE); String[] s = tEnv.listFunctions(); String[] s1 = tEnv.listUserDefinedFunctions(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_03_hive_udf/_01_GenericUDAFResolver2/HiveUDAF_hive_module_registry_Test.java ================================================ package flink.examples.sql._08.batch._03_hive_udf._01_GenericUDAFResolver2; import java.io.IOException; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class HiveUDAF_hive_module_registry_Test { public static void main(String[] args) throws IOException { FlinkEnv flinkEnv = FlinkEnvUtils.getBatchTableEnv(args); // TODO 可以成功执行没有任何问题 flinkEnv.hiveModuleV2().registryHiveUDF("test_hive_udaf", TestHiveUDAF.class.getName()); String sql3 = "select test_hive_udaf(user_id)\n" + " , count(1) as part_pv\n" + " , max(order_amount) as part_max\n" + " , min(order_amount) as part_min\n" + " from hive_table\n" + " where p_date between '20210920' and '20210920'\n" + " group by 0"; flinkEnv.batchTEnv() .executeSql(sql3) .print(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_03_hive_udf/_01_GenericUDAFResolver2/HiveUDAF_sql_registry_create_function_Test.java ================================================ package flink.examples.sql._08.batch._03_hive_udf._01_GenericUDAFResolver2; import java.io.IOException; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class HiveUDAF_sql_registry_create_function_Test { public static void main(String[] args) throws ClassNotFoundException, IOException { FlinkEnv flinkEnv = FlinkEnvUtils.getBatchTableEnv(args); // TODO sql 执行创建 hive udaf 可以正常执行,create function 执行完成之后就会被注册到 hive catalog 中 String sql2 = "CREATE FUNCTION test_hive_udaf as 'flink.examples.sql._08.batch._03_hive_udf._01_GenericUDAFResolver2.TestHiveUDAF'"; String sql3 = "select default.test_hive_udaf(user_id, '20210920')\n" + " , count(1) as part_pv\n" + " , max(order_amount) as part_max\n" + " , min(order_amount) as part_min\n" + " from hive_table\n" + " where p_date between '20210920' and '20210920'\n" + " group by 0"; flinkEnv.batchTEnv().executeSql(sql2); flinkEnv.batchTEnv().executeSql(sql3) .print(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_03_hive_udf/_01_GenericUDAFResolver2/HiveUDAF_sql_registry_create_temporary_function_Test.java ================================================ package flink.examples.sql._08.batch._03_hive_udf._01_GenericUDAFResolver2; import java.io.IOException; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class HiveUDAF_sql_registry_create_temporary_function_Test { public static void main(String[] args) throws ClassNotFoundException, IOException { FlinkEnv flinkEnv = FlinkEnvUtils.getBatchTableEnv(args); // TODO sql 执行创建 hive udtf 会报错 // java.lang.UnsupportedOperationException: This CatalogFunction is a InlineCatalogFunction. This method should not be called. // 因为 CREATE TEMPORARY FUNCTION 使用的是 inline catalog String sql2 = "CREATE TEMPORARY FUNCTION test_hive_udaf as 'flink.examples.sql._08.batch._03_hive_udf._01_GenericUDAFResolver2.TestHiveUDAF'"; String sql3 = "select test_hive_udaf(user_id, '20210920')\n" + " , count(1) as part_pv\n" + " , max(order_amount) as part_max\n" + " , min(order_amount) as part_min\n" + " from hive_table\n" + " where p_date between '20210920' and '20210920'\n" + " group by 0"; flinkEnv.batchTEnv().executeSql(sql2); flinkEnv.batchTEnv().executeSql(sql3) .print(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_03_hive_udf/_01_GenericUDAFResolver2/TestHiveUDAF.java ================================================ package flink.examples.sql._08.batch._03_hive_udf._01_GenericUDAFResolver2; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFParameterInfo; import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFResolver2; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.io.Text; public class TestHiveUDAF implements GenericUDAFResolver2 { public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws SemanticException { return new InneGenericUDAFEvaluatorr(); } public GenericUDAFEvaluator getEvaluator(GenericUDAFParameterInfo paramInfo) throws SemanticException { return new InneGenericUDAFEvaluatorr(); } public static class InneGenericUDAFEvaluatorr extends GenericUDAFEvaluator { private PrimitiveObjectInspector inputOI; @Override public ObjectInspector init(Mode m, ObjectInspector[] parameters) throws HiveException { super.init(m, parameters); this.inputOI = (PrimitiveObjectInspector) parameters[0]; return PrimitiveObjectInspectorFactory.writableStringObjectInspector; } static class StringAgg implements AggregationBuffer { String all = ""; } @Override public AggregationBuffer getNewAggregationBuffer() throws HiveException { StringAgg stringAgg = new StringAgg(); return stringAgg; } @Override public void reset(AggregationBuffer agg) throws HiveException { StringAgg stringAgg = (StringAgg) agg; stringAgg.all = ""; } @Override public void iterate(AggregationBuffer agg, Object[] parameters) throws HiveException { StringAgg myagg = (StringAgg) agg; String inputStr = PrimitiveObjectInspectorUtils.getString(parameters[0], inputOI); myagg.all += inputStr; } @Override public Object terminatePartial(AggregationBuffer agg) throws HiveException { return this.terminate(agg); } @Override public void merge(AggregationBuffer agg, Object partial) throws HiveException { if (partial != null) { StringAgg stringAgg = (StringAgg) agg; stringAgg.all += partial; } } @Override public Object terminate(AggregationBuffer agg) throws HiveException { return new Text(((StringAgg) agg).all); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_03_hive_udf/_02_GenericUDTF/HiveUDTF_hive_module_registry_Test.java ================================================ package flink.examples.sql._08.batch._03_hive_udf._02_GenericUDTF; import java.io.IOException; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class HiveUDTF_hive_module_registry_Test { public static void main(String[] args) throws IOException { FlinkEnv flinkEnv = FlinkEnvUtils.getBatchTableEnv(args); // TODO 可以成功执行没有任何问题 flinkEnv.hiveModuleV2().registryHiveUDF("test_hive_udtf", TestHiveUDTF.class.getName()); String sql3 = "select test_hive_udtf(user_id) as (a)\n" + " from hive_table\n" + " where p_date between '20210920' and '20210920'\n"; flinkEnv.batchTEnv() .executeSql(sql3) .print(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_03_hive_udf/_02_GenericUDTF/HiveUDTF_sql_registry_create_function_Test.java ================================================ package flink.examples.sql._08.batch._03_hive_udf._02_GenericUDTF; import java.io.IOException; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class HiveUDTF_sql_registry_create_function_Test { public static void main(String[] args) throws ClassNotFoundException, IOException { FlinkEnv flinkEnv = FlinkEnvUtils.getBatchTableEnv(args); // String sql = "drop function default.test_hive_udtf"; // TODO sql 执行正常,create function 使用的是 hive catalog 没有任何问题 String sql2 = "CREATE FUNCTION test_hive_udtf as 'flink.examples.sql._08.batch._03_hive_udf._02_GenericUDTF.TestHiveUDTF'"; String sql3 = "select default.test_hive_udtf(user_id)\n" + " from hive_table\n" + " where p_date between '20210920' and '20210920'\n"; // flinkEnv.batchTEnv().executeSql(sql); flinkEnv.batchTEnv().executeSql(sql2); flinkEnv.batchTEnv().executeSql(sql3) .print(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_03_hive_udf/_02_GenericUDTF/HiveUDTF_sql_registry_create_temporary_function_Test.java ================================================ package flink.examples.sql._08.batch._03_hive_udf._02_GenericUDTF; import java.io.IOException; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class HiveUDTF_sql_registry_create_temporary_function_Test { public static void main(String[] args) throws ClassNotFoundException, IOException { FlinkEnv flinkEnv = FlinkEnvUtils.getBatchTableEnv(args); // TODO sql 执行创建 hive udtf 会报错 // Caused by: java.lang.UnsupportedOperationException: This CatalogFunction is a InlineCatalogFunction. This method should not be called. String sql2 = "CREATE TEMPORARY FUNCTION test_hive_udtf as 'flink.examples.sql._08.batch._03_hive_udf._02_GenericUDTF.TestHiveUDTF'"; String sql3 = "select default.test_hive_udtf(user_id)\n" + " from hive_table\n" + " where p_date between '20210920' and '20210920'\n"; flinkEnv.batchTEnv().executeSql(sql2); flinkEnv.batchTEnv().executeSql(sql3) .print(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_03_hive_udf/_02_GenericUDTF/TestHiveUDTF.java ================================================ package flink.examples.sql._08.batch._03_hive_udf._02_GenericUDTF; import java.util.ArrayList; import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.udf.generic.GenericUDTF; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; public class TestHiveUDTF extends GenericUDTF { @Override public StructObjectInspector initialize(ObjectInspector[] argOIs) throws UDFArgumentException { ArrayList fieldNames = new ArrayList() {{ add("column1"); }}; ArrayList fieldOIs = new ArrayList() {{ add(PrimitiveObjectInspectorFactory.javaStringObjectInspector); }}; return ObjectInspectorFactory.getStandardStructObjectInspector(fieldNames, fieldOIs); } @Override public void process(Object[] objects) throws HiveException { forward(objects[0]); forward(objects[0]); } @Override public void close() throws HiveException { } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_03_hive_udf/_03_built_in_udf/_01_get_json_object/HiveUDF_get_json_object_Test.java ================================================ package flink.examples.sql._08.batch._03_hive_udf._03_built_in_udf._01_get_json_object; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.SqlDialect; import org.apache.flink.table.api.TableEnvironment; import org.apache.flink.table.catalog.hive.HiveCatalog; import org.apache.flink.table.module.CoreModule; import flink.examples.sql._08.batch._03_hive_udf.HiveModuleV2; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class HiveUDF_get_json_object_Test { public static void main(String[] args) { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig() .enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inBatchMode() .build(); TableEnvironment tEnv = TableEnvironment.create(settings); tEnv.getConfig().getConfiguration().setString("pipeline.name", "1.13.5 Interval Outer Join 事件时间案例"); String defaultDatabase = "default"; String hiveConfDir = "/usr/local/Cellar/hive/3.1.2/libexec/conf"; HiveCatalog hive = new HiveCatalog("default", defaultDatabase, hiveConfDir); tEnv.registerCatalog("default", hive); tEnv.getConfig().setSqlDialect(SqlDialect.HIVE); // set the HiveCatalog as the current catalog of the session tEnv.useCatalog("default"); String version = "3.1.2"; tEnv.unloadModule("core"); HiveModuleV2 hiveModuleV2 = new HiveModuleV2(version); tEnv.loadModule("default", hiveModuleV2); tEnv.loadModule("core", CoreModule.INSTANCE); String sql3 = "select get_json_object(user_id, '$.user_id')\n" + " , count(1) as part_pv\n" + " , max(order_amount) as part_max\n" + " , min(order_amount) as part_min\n" + " from hive_table\n" + " where p_date between '20210920' and '20210920'\n" + " group by get_json_object(user_id, '$.user_id')"; tEnv.executeSql(sql3) .print(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_03_hive_udf/_03_built_in_udf/_02_rlike/HiveUDF_rlike_Test.java ================================================ package flink.examples.sql._08.batch._03_hive_udf._03_built_in_udf._02_rlike; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.SqlDialect; import org.apache.flink.table.api.TableEnvironment; import org.apache.flink.table.catalog.hive.HiveCatalog; import flink.examples.sql._08.batch._03_hive_udf.HiveModuleV2; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class HiveUDF_rlike_Test { public static void main(String[] args) { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig() .enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inBatchMode() .build(); TableEnvironment tEnv = TableEnvironment.create(settings); tEnv.getConfig().getConfiguration().setString("pipeline.name", "1.13.5 Interval Outer Join 事件时间案例"); String defaultDatabase = "default"; String hiveConfDir = "/usr/local/Cellar/hive/3.1.2/libexec/conf"; HiveCatalog hive = new HiveCatalog("default", defaultDatabase, hiveConfDir); tEnv.registerCatalog("default", hive); tEnv.getConfig().setSqlDialect(SqlDialect.HIVE); // set the HiveCatalog as the current catalog of the session tEnv.useCatalog("default"); String version = "3.1.2"; HiveModuleV2 hiveModuleV2 = new HiveModuleV2(version); tEnv.loadModule("default", hiveModuleV2); // TODO hive module 才支持 rLike String sql3 = "with tmp as (select case when user_id rlike 'a' then 1 else 0 end as b -- 注释\n" + " , count(1) as part_pv\n" + " , max(order_amount) as part_max\n" + " , min(order_amount) as part_min\n" + " from hive_table\n" + " where p_date = '20210920'\n" + " group by user_id) \n" + "\n" + "select * from tmp"; tEnv.executeSql(sql3) .print(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_03_hive_udf/_04_GenericUDF/HiveUDF_hive_module_registry_Test.java ================================================ package flink.examples.sql._08.batch._03_hive_udf._04_GenericUDF; import java.io.IOException; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class HiveUDF_hive_module_registry_Test { public static void main(String[] args) throws IOException { FlinkEnv flinkEnv = FlinkEnvUtils.getBatchTableEnv(args); // TODO 可以正常执行 flinkEnv.hiveModuleV2().registryHiveUDF("test_hive_udf", TestGenericUDF.class.getName()); String sql3 = "select test_hive_udf(user_id)\n" + " from hive_table\n" + " where p_date between '20210920' and '20210920'\n"; flinkEnv.batchTEnv() .executeSql(sql3) .print(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_03_hive_udf/_04_GenericUDF/HiveUDF_sql_registry_create_function_Test.java ================================================ package flink.examples.sql._08.batch._03_hive_udf._04_GenericUDF; import java.io.IOException; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class HiveUDF_sql_registry_create_function_Test { public static void main(String[] args) throws ClassNotFoundException, IOException { FlinkEnv flinkEnv = FlinkEnvUtils.getBatchTableEnv(args); // TODO sql 执行创建 hive udf 可以正常执行,create function 执行完成之后就会被注册到 hive catalog 中 String sql2 = "CREATE FUNCTION test_hive_udf as 'flink.examples.sql._08.batch._03_hive_udf._04_GenericUDF.TestGenericUDF'"; String sql3 = "select test_hive_udf(user_id)\n" + " from hive_table\n" + " where p_date between '20210920' and '20210920'\n"; flinkEnv.batchTEnv().executeSql(sql2); flinkEnv.batchTEnv().executeSql(sql3) .print(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_03_hive_udf/_04_GenericUDF/HiveUDF_sql_registry_create_temporary_function_Test.java ================================================ package flink.examples.sql._08.batch._03_hive_udf._04_GenericUDF; import java.io.IOException; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class HiveUDF_sql_registry_create_temporary_function_Test { public static void main(String[] args) throws ClassNotFoundException, IOException { FlinkEnv flinkEnv = FlinkEnvUtils.getBatchTableEnv(args); // TODO sql 执行创建 hive udf 可以正常执行 String sql2 = "CREATE TEMPORARY FUNCTION test_hive_udf as 'flink.examples.sql._08.batch._03_hive_udf._04_GenericUDF.TestGenericUDF'"; String sql3 = "select test_hive_udf(user_id)\n" + " from hive_table\n" + " where p_date between '20210920' and '20210920'\n"; flinkEnv.batchTEnv().executeSql(sql2); flinkEnv.batchTEnv().executeSql(sql3) .print(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_03_hive_udf/_04_GenericUDF/TestGenericUDF.java ================================================ package flink.examples.sql._08.batch._03_hive_udf._04_GenericUDF; import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; import org.apache.hadoop.hive.serde2.objectinspector.primitive.StringObjectInspector; import org.apache.hadoop.io.Text; public class TestGenericUDF extends GenericUDF { private transient StringObjectInspector soi = null; private transient StringObjectInspector soi1 = null; @Override public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException { PrimitiveObjectInspector primitiveObjectInspector = (PrimitiveObjectInspector) arguments[0]; soi = (StringObjectInspector) primitiveObjectInspector; return PrimitiveObjectInspectorFactory .getPrimitiveWritableObjectInspector(PrimitiveObjectInspector.PrimitiveCategory.STRING); } @Override public Object evaluate(DeferredObject[] arguments) throws HiveException { return new Text("UNKNOWN"); } @Override public String getDisplayString(String[] children) { return "test"; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_04_flink_udf/FlinkUDAF_Test.java ================================================ package flink.examples.sql._08.batch._04_flink_udf; public class FlinkUDAF_Test { } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_04_flink_udf/FlinkUDF_Test.java ================================================ package flink.examples.sql._08.batch._04_flink_udf; public class FlinkUDF_Test { } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_04_flink_udf/FlinkUDTF_Test.java ================================================ package flink.examples.sql._08.batch._04_flink_udf; public class FlinkUDTF_Test { } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_08/batch/_05_test/_01_batch_to_datastream/Test.java ================================================ package flink.examples.sql._08.batch._05_test._01_batch_to_datastream; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.TableEnvironment; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; public class Test { public static void main(String[] args) { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig() .enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inBatchMode() .build(); TableEnvironment tEnv = TableEnvironment.create(settings); // TODO 这一行会抛出异常 StreamTableEnvironment t1Env = StreamTableEnvironment.create(env, settings); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_09/udf/_01_hive_udf/_01_GenericUDF/HiveUDF_sql_registry_create_function_Test.java ================================================ package flink.examples.sql._09.udf._01_hive_udf._01_GenericUDF; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class HiveUDF_sql_registry_create_function_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); // TODO stream sql hive udf 创建不报错,执行使用报错 class cast exception String sql2 = "CREATE FUNCTION test_hive_udf as 'flink.examples.sql._08.batch._03_hive_udf._04_GenericUDF.TestGenericUDF'"; String sql = "CREATE TABLE source_table (\n" + " order_id STRING,\n" + " price BIGINT\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.order_id.length' = '1',\n" + " 'fields.price.min' = '1',\n" + " 'fields.price.max' = '1000000'\n" + ");\n" + "\n" + "\n" + "CREATE TABLE sink_table (\n" + " order_id STRING,\n" + " count_result BIGINT,\n" + " sum_result BIGINT,\n" + " avg_result DOUBLE,\n" + " min_result BIGINT,\n" + " max_result BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "insert into sink_table\n" + "select test_hive_udf(order_id) as order_id,\n" + " count(*) as count_result,\n" + " sum(price) as sum_result,\n" + " avg(price) as avg_result,\n" + " min(price) as min_result,\n" + " max(price) as max_result\n" + "from source_table\n" + "group by order_id"; flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "GROUP AGG 案例"); flinkEnv.streamTEnv().executeSql(sql2); for (String innerSql : sql.split(";")) { flinkEnv.streamTEnv().executeSql(innerSql); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_09/udf/_01_hive_udf/_01_GenericUDF/HiveUDF_sql_registry_create_function_with_hive_catalog_Test.java ================================================ package flink.examples.sql._09.udf._01_hive_udf._01_GenericUDF; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class HiveUDF_sql_registry_create_function_with_hive_catalog_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[]{"--enable.hive.catalog", "true"}); // TODO stream sql hive udf 成功,底层可能调用了 hive 相关的逻辑,所以能成功 // String sql2 = "CREATE FUNCTION test_hive_udf as 'flink.examples.sql._08.batch._03_hive_udf._04_GenericUDF.TestGenericUDF'"; String sql = // "CREATE TABLE source_table (\n" // + " order_id STRING,\n" // + " price BIGINT\n" // + ") WITH (\n" // + " 'connector' = 'datagen',\n" // + " 'rows-per-second' = '10',\n" // + " 'fields.order_id.length' = '1',\n" // + " 'fields.price.min' = '1',\n" // + " 'fields.price.max' = '1000000'\n" // + ");\n" // + "\n" // + "\n" // + "CREATE TABLE sink_table (\n" // + " order_id STRING,\n" // + " count_result BIGINT,\n" // + " sum_result BIGINT,\n" // + " avg_result DOUBLE,\n" // + " min_result BIGINT,\n" // + " max_result BIGINT\n" // + ") WITH (\n" // + " 'connector' = 'print'\n" // + ");\n" // + "\n" // + "insert into sink_table\n" + "select test_hive_udf(order_id) as order_id,\n" + " count(*) as count_result,\n" + " sum(price) as sum_result,\n" + " avg(price) as avg_result,\n" + " min(price) as min_result,\n" + " max(price) as max_result\n" + "from source_table\n" + "group by order_id"; flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "GROUP AGG 案例"); // flinkEnv.streamTEnv().executeSql(sql2); for (String innerSql : sql.split(";")) { flinkEnv.streamTEnv().executeSql(innerSql); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_09/udf/_01_hive_udf/_01_GenericUDF/HiveUDF_sql_registry_create_temporary_function_Test.java ================================================ package flink.examples.sql._09.udf._01_hive_udf._01_GenericUDF; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class HiveUDF_sql_registry_create_temporary_function_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); // TODO stream sql 执行 hive udf 创建不报错,执行使用报错 // Caused by: java.lang.ClassCastException: flink.examples.sql._08.batch._03_hive_udf._04_GenericUDF.TestGenericUDF cannot be cast to org.apache.flink.table.functions.UserDefinedFunction String sql2 = "CREATE TEMPORARY FUNCTION test_hive_udf as 'flink.examples.sql._08.batch._03_hive_udf._04_GenericUDF.TestGenericUDF'"; String sql = "CREATE TABLE source_table (\n" + " order_id STRING,\n" + " price BIGINT\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.order_id.length' = '1',\n" + " 'fields.price.min' = '1',\n" + " 'fields.price.max' = '1000000'\n" + ");\n" + "\n" + "\n" + "CREATE TABLE sink_table (\n" + " order_id STRING,\n" + " count_result BIGINT,\n" + " sum_result BIGINT,\n" + " avg_result DOUBLE,\n" + " min_result BIGINT,\n" + " max_result BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "insert into sink_table\n" + "select test_hive_udf(order_id) as order_id,\n" + " count(*) as count_result,\n" + " sum(price) as sum_result,\n" + " avg(price) as avg_result,\n" + " min(price) as min_result,\n" + " max(price) as max_result\n" + "from source_table\n" + "group by order_id"; flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "GROUP AGG 案例"); flinkEnv.streamTEnv().executeSql(sql2); for (String innerSql : sql.split(";")) { flinkEnv.streamTEnv().executeSql(innerSql); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_09/udf/_01_hive_udf/_01_GenericUDF/HiveUDF_sql_registry_create_temporary_function_with_hive_catalog_Test.java ================================================ package flink.examples.sql._09.udf._01_hive_udf._01_GenericUDF; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class HiveUDF_sql_registry_create_temporary_function_with_hive_catalog_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[]{"--enable.hive.catalog", "true", "--enable.hive.dialect", "true"}); // TODO stream sql 执行 hive udf 创建不报错,执行使用报错 // Caused by: java.lang.ClassCastException: flink.examples.sql._08.batch._03_hive_udf._04_GenericUDF.TestGenericUDF cannot be cast to org.apache.flink.table.functions.UserDefinedFunction String sql2 = "CREATE TEMPORARY FUNCTION test_hive_udf as 'flink.examples.sql._08.batch._03_hive_udf._04_GenericUDF.TestGenericUDF'"; String sql = // "CREATE TABLE source_table (\n" // + " order_id STRING,\n" // + " price BIGINT\n" // + ") WITH (\n" // + " 'connector' = 'datagen',\n" // + " 'rows-per-second' = '10',\n" // + " 'fields.order_id.length' = '1',\n" // + " 'fields.price.min' = '1',\n" // + " 'fields.price.max' = '1000000'\n" // + ");\n" // + "\n" // + "\n" // + "CREATE TABLE sink_table (\n" // + " order_id STRING,\n" // + " count_result BIGINT,\n" // + " sum_result BIGINT,\n" // + " avg_result DOUBLE,\n" // + " min_result BIGINT,\n" // + " max_result BIGINT\n" // + ") WITH (\n" // + " 'connector' = 'print'\n" // + ");\n" // + "\n" // + "insert into sink_table\n" + "select test_hive_udf(order_id) as order_id,\n" + " count(*) as count_result,\n" + " sum(price) as sum_result,\n" + " avg(price) as avg_result,\n" + " min(price) as min_result,\n" + " max(price) as max_result\n" + "from source_table\n" + "group by order_id"; flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "GROUP AGG 案例"); flinkEnv.streamTEnv().executeSql(sql2); for (String innerSql : sql.split(";")) { flinkEnv.streamTEnv().executeSql(innerSql); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_09/udf/_01_hive_udf/_01_GenericUDF/TestGenericUDF.java ================================================ package flink.examples.sql._09.udf._01_hive_udf._01_GenericUDF; import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; import org.apache.hadoop.hive.serde2.objectinspector.primitive.StringObjectInspector; import org.apache.hadoop.io.Text; public class TestGenericUDF extends GenericUDF { private transient StringObjectInspector soi = null; private transient StringObjectInspector soi1 = null; @Override public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException { PrimitiveObjectInspector primitiveObjectInspector = (PrimitiveObjectInspector) arguments[0]; soi = (StringObjectInspector) primitiveObjectInspector; return PrimitiveObjectInspectorFactory .getPrimitiveWritableObjectInspector(PrimitiveObjectInspector.PrimitiveCategory.STRING); } @Override public Object evaluate(DeferredObject[] arguments) throws HiveException { return new Text("UNKNOWN"); } @Override public String getDisplayString(String[] children) { return "test"; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_09/udf/_02_stream_hive_udf/HiveUDF_Error_Test.java ================================================ package flink.examples.sql._09.udf._02_stream_hive_udf; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class HiveUDF_Error_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[] {"--enable.hive.module.v2", "false"}); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " `params` STRING\n" + ") WITH (\n" + " 'connector' = 'user_defined',\n" + " 'format' = 'json',\n" + " 'class.name' = 'flink.examples.sql._09.udf._02_stream_hive_udf.UserDefinedSource'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " `log_id` STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "insert into sink_table\n" + "select user_id,\n" + " get_json_object(params, '$.log_id') as log_id\n" + "from source_table\n"; flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "Hive UDF 测试案例"); for (String innerSql : sql.split(";")) { flinkEnv.streamTEnv().executeSql(innerSql); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_09/udf/_02_stream_hive_udf/HiveUDF_create_temporary_error_Test.java ================================================ package flink.examples.sql._09.udf._02_stream_hive_udf; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class HiveUDF_create_temporary_error_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); String sql = "CREATE TEMPORARY FUNCTION test_hive_udf as 'flink.examples.sql._09.udf._02_stream_hive_udf.TestGenericUDF';\n" + "\n" + "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " `params` STRING\n" + ") WITH (\n" + " 'connector' = 'user_defined',\n" + " 'format' = 'json',\n" + " 'class.name' = 'flink.examples.sql._09.udf._02_stream_hive_udf.UserDefinedSource'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " `log_id` STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "insert into sink_table\n" + "select user_id,\n" + " test_hive_udf(params) as log_id\n" + "from source_table\n"; flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "Hive UDF 测试案例"); for (String innerSql : sql.split(";")) { flinkEnv.streamTEnv().executeSql(innerSql); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_09/udf/_02_stream_hive_udf/HiveUDF_hive_module_registry_Test.java ================================================ package flink.examples.sql._09.udf._02_stream_hive_udf; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class HiveUDF_hive_module_registry_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " `params` STRING\n" + ") WITH (\n" + " 'connector' = 'user_defined',\n" + " 'format' = 'json',\n" + " 'class.name' = 'flink.examples.sql._09.udf._02_stream_hive_udf.UserDefinedSource'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " `log_id` STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "insert into sink_table\n" + "select user_id,\n" + " test_hive_udf(params) as log_id\n" + "from source_table\n"; flinkEnv.hiveModuleV2() .registryHiveUDF( "test_hive_udf" , TestGenericUDF.class.getName()); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "Hive UDF 测试案例"); for (String innerSql : sql.split(";")) { flinkEnv.streamTEnv().executeSql(innerSql); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_09/udf/_02_stream_hive_udf/HiveUDF_load_first_Test.java ================================================ package flink.examples.sql._09.udf._02_stream_hive_udf; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class HiveUDF_load_first_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " `params` STRING\n" + ") WITH (\n" + " 'connector' = 'user_defined',\n" + " 'format' = 'json',\n" + " 'class.name' = 'flink.examples.sql._09.udf._02_stream_hive_udf.UserDefinedSource'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " `log_id` STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "insert into sink_table\n" + "select user_id,\n" + " get_json_object(params, '$.log_id') as log_id\n" + "from source_table\n"; Arrays.stream(flinkEnv.streamTEnv().listModules()).forEach(System.out::println); Arrays.stream(flinkEnv.streamTEnv().listFunctions()).forEach(System.out::println); flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "Hive UDF 测试案例"); for (String innerSql : sql.split(";")) { flinkEnv.streamTEnv().executeSql(innerSql); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_09/udf/_02_stream_hive_udf/HiveUDF_load_second_Test.java ================================================ package flink.examples.sql._09.udf._02_stream_hive_udf; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster * * hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class HiveUDF_load_second_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(new String[] {"--enable.hive.module.load-first", "false"}); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " `params` STRING\n" + ") WITH (\n" + " 'connector' = 'user_defined',\n" + " 'format' = 'json',\n" + " 'class.name' = 'flink.examples.sql._09.udf._02_stream_hive_udf.UserDefinedSource'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " `log_id` STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "insert into sink_table\n" + "select user_id,\n" + " get_json_object(params, '$.log_id') as log_id\n" + "from source_table\n"; flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "Hive UDF 测试案例"); for (String innerSql : sql.split(";")) { flinkEnv.streamTEnv().executeSql(innerSql); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_09/udf/_02_stream_hive_udf/TestGenericUDF.java ================================================ package flink.examples.sql._09.udf._02_stream_hive_udf; import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; import org.apache.hadoop.hive.serde2.objectinspector.primitive.StringObjectInspector; import org.apache.hadoop.io.Text; public class TestGenericUDF extends GenericUDF { private transient StringObjectInspector soi = null; private transient StringObjectInspector soi1 = null; @Override public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException { PrimitiveObjectInspector primitiveObjectInspector = (PrimitiveObjectInspector) arguments[0]; soi = (StringObjectInspector) primitiveObjectInspector; return PrimitiveObjectInspectorFactory .getPrimitiveWritableObjectInspector(PrimitiveObjectInspector.PrimitiveCategory.STRING); } @Override public Object evaluate(DeferredObject[] arguments) throws HiveException { return new Text("UNKNOWN"); } @Override public String getDisplayString(String[] children) { return "test"; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_09/udf/_02_stream_hive_udf/UserDefinedSource.java ================================================ package flink.examples.sql._09.udf._02_stream_hive_udf; import org.apache.flink.api.common.serialization.DeserializationSchema; import org.apache.flink.streaming.api.functions.source.RichSourceFunction; import org.apache.flink.table.data.RowData; import com.google.common.collect.ImmutableMap; import flink.examples.JacksonUtils; public class UserDefinedSource extends RichSourceFunction { private DeserializationSchema dser; private volatile boolean isCancel; public UserDefinedSource(DeserializationSchema dser) { this.dser = dser; } @Override public void run(SourceContext ctx) throws Exception { int i = 0; while (!this.isCancel) { ctx.collect(this.dser.deserialize( JacksonUtils.bean2Json(ImmutableMap.of("user_id", 1111L, "params", "{\"log_id\":\"" + i + "\"}")).getBytes() )); Thread.sleep(1000); i++; } } @Override public void cancel() { this.isCancel = true; } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_09/udf/_03_advanced_type_inference/AdvancedFunctionsExample.java ================================================ package flink.examples.sql._09.udf._03_advanced_type_inference; import java.time.LocalDate; import org.apache.flink.table.api.DataTypes; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.Table; import org.apache.flink.table.api.TableEnvironment; import org.apache.flink.types.Row; public class AdvancedFunctionsExample { public static void main(String[] args) throws Exception { // setup the environment final EnvironmentSettings settings = EnvironmentSettings.newInstance().inBatchMode().build(); final TableEnvironment env = TableEnvironment.create(settings); // execute different kinds of functions executeLastDatedValueFunction(env); executeInternalRowMergerFunction(env); } /** * Aggregates data by name and returns the latest non-null {@code item_count} value with its * corresponding {@code order_date}. */ private static void executeLastDatedValueFunction(TableEnvironment env) { // create a table with example data final Table customers = env.fromValues( DataTypes.of("ROW"), Row.of("Guillermo Smith", LocalDate.parse("2020-12-01"), 3), Row.of("Guillermo Smith", LocalDate.parse("2020-12-05"), 5), Row.of("Valeria Mendoza", LocalDate.parse("2020-03-23"), 4), Row.of("Valeria Mendoza", LocalDate.parse("2020-06-02"), 10), Row.of("Leann Holloway", LocalDate.parse("2020-05-26"), 9), Row.of("Leann Holloway", LocalDate.parse("2020-05-27"), null), Row.of("Brandy Sanders", LocalDate.parse("2020-10-14"), 1), Row.of("John Turner", LocalDate.parse("2020-10-02"), 12), Row.of("Ellen Ortega", LocalDate.parse("2020-06-18"), 100)); env.createTemporaryView("customers", customers); // register and execute the function env.createTemporarySystemFunction("LastDatedValueFunction", LastDatedValueFunction.class); env.executeSql( "SELECT name, LastDatedValueFunction(item_count, order_date) " + "FROM customers GROUP BY name") .print(); // clean up env.dropTemporaryView("customers"); } /** Merges two rows as efficient as possible using internal data structures. */ private static void executeInternalRowMergerFunction(TableEnvironment env) { // create a table with example data final Table customers = env.fromValues( DataTypes.of( "ROW, data2 ROW>"), Row.of( "Guillermo Smith", Row.of(LocalDate.parse("1992-12-12")), Row.of("New Jersey", "816-443-8010")), Row.of( "Valeria Mendoza", Row.of(LocalDate.parse("1970-03-28")), Row.of("Los Angeles", "928-264-9662")), Row.of( "Leann Holloway", Row.of(LocalDate.parse("1989-05-21")), Row.of("Eugene", "614-889-6038"))); env.createTemporaryView("customers", customers); // register and execute the function env.createTemporarySystemFunction( "InternalRowMergerFunction", InternalRowMergerFunction.class); env.executeSql("SELECT name, InternalRowMergerFunction(data1, data2) FROM customers") .print(); // clean up env.dropTemporaryView("customers"); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_09/udf/_03_advanced_type_inference/InternalRowMergerFunction.java ================================================ package flink.examples.sql._09.udf._03_advanced_type_inference; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Optional; import java.util.stream.IntStream; import org.apache.flink.table.api.DataTypes; import org.apache.flink.table.catalog.DataTypeFactory; import org.apache.flink.table.data.RowData; import org.apache.flink.table.data.utils.JoinedRowData; import org.apache.flink.table.functions.FunctionDefinition; import org.apache.flink.table.functions.ScalarFunction; import org.apache.flink.table.types.DataType; import org.apache.flink.table.types.inference.ArgumentCount; import org.apache.flink.table.types.inference.CallContext; import org.apache.flink.table.types.inference.ConstantArgumentCount; import org.apache.flink.table.types.inference.InputTypeStrategy; import org.apache.flink.table.types.inference.Signature; import org.apache.flink.table.types.inference.Signature.Argument; import org.apache.flink.table.types.inference.TypeInference; import org.apache.flink.table.types.logical.LogicalTypeRoot; public class InternalRowMergerFunction extends ScalarFunction { // -------------------------------------------------------------------------------------------- // Planning // -------------------------------------------------------------------------------------------- @Override public TypeInference getTypeInference(DataTypeFactory typeFactory) { return TypeInference.newBuilder() // accept a signature (ROW, ROW) with arbitrary field types but // with internal conversion classes .inputTypeStrategy( new InputTypeStrategy() { @Override public ArgumentCount getArgumentCount() { // the argument count is checked before input types are inferred return ConstantArgumentCount.of(2); } @Override public Optional> inferInputTypes( CallContext callContext, boolean throwOnFailure) { final List args = callContext.getArgumentDataTypes(); final DataType arg0 = args.get(0); final DataType arg1 = args.get(1); // perform some basic validation based on the logical type if (arg0.getLogicalType().getTypeRoot() != LogicalTypeRoot.ROW || arg1.getLogicalType().getTypeRoot() != LogicalTypeRoot.ROW) { if (throwOnFailure) { throw callContext.newValidationError( "Two row arguments expected."); } return Optional.empty(); } // keep the original logical type but express that both arguments // should use internal data structures return Optional.of( Arrays.asList( arg0.bridgedTo(RowData.class), arg1.bridgedTo(RowData.class))); } @Override public List getExpectedSignatures( FunctionDefinition definition) { // this helps in printing nice error messages return Collections.singletonList( Signature.of(Argument.of("ROW"), Argument.of("ROW"))); } }) .outputTypeStrategy( callContext -> { // merge fields and give them a unique name final List args = callContext.getArgumentDataTypes(); final List allFieldDataTypes = new ArrayList<>(); allFieldDataTypes.addAll(args.get(0).getChildren()); allFieldDataTypes.addAll(args.get(1).getChildren()); final DataTypes.Field[] fields = IntStream.range(0, allFieldDataTypes.size()) .mapToObj( i -> DataTypes.FIELD( "f" + i, allFieldDataTypes.get(i))) .toArray(DataTypes.Field[]::new); // create a new row with the merged fields and express that the return // type will use an internal data structure return Optional.of(DataTypes.ROW(fields).bridgedTo(RowData.class)); }) .build(); } // -------------------------------------------------------------------------------------------- // Runtime // -------------------------------------------------------------------------------------------- public RowData eval(RowData r1, RowData r2) { return new JoinedRowData(r1, r2); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_09/udf/_03_advanced_type_inference/LastDatedValueFunction.java ================================================ package flink.examples.sql._09.udf._03_advanced_type_inference; import java.time.LocalDate; import java.util.Optional; import org.apache.flink.table.api.DataTypes; import org.apache.flink.table.catalog.DataTypeFactory; import org.apache.flink.table.functions.AggregateFunction; import org.apache.flink.table.types.DataType; import org.apache.flink.table.types.inference.InputTypeStrategies; import org.apache.flink.table.types.inference.TypeInference; import org.apache.flink.types.Row; import flink.examples.sql._09.udf._03_advanced_type_inference.LastDatedValueFunction.Accumulator; public class LastDatedValueFunction extends AggregateFunction> { // -------------------------------------------------------------------------------------------- // Planning // -------------------------------------------------------------------------------------------- /** * Declares the {@link TypeInference} of this function. It specifies: * *

    *
  • which argument types are supported when calling this function, *
  • which {@link DataType#getConversionClass()} should be used when calling the JVM method * {@link #accumulate(Accumulator, Object, LocalDate)} during runtime, *
  • a similar strategy how to derive an accumulator type, *
  • and a similar strategy how to derive the output type. *
*/ @Override public TypeInference getTypeInference(DataTypeFactory typeFactory) { return TypeInference.newBuilder() // accept a signature (ANY, DATE) both with default conversion classes, // the input type strategy is mostly used to produce nicer validation exceptions // during planning, implementers can decide to skip it if they are fine with failing // at a later stage during code generation when the runtime method is checked .inputTypeStrategy( InputTypeStrategies.sequence( InputTypeStrategies.ANY, InputTypeStrategies.explicit(DataTypes.DATE()))) // let the accumulator data type depend on the first input argument .accumulatorTypeStrategy( callContext -> { final DataType argDataType = callContext.getArgumentDataTypes().get(0); final DataType accDataType = DataTypes.STRUCTURED( Accumulator.class, DataTypes.FIELD("value", argDataType), DataTypes.FIELD("date", DataTypes.DATE())); return Optional.of(accDataType); }) // let the output data type depend on the first input argument .outputTypeStrategy( callContext -> { final DataType argDataType = callContext.getArgumentDataTypes().get(0); final DataType outputDataType = DataTypes.ROW( DataTypes.FIELD("value", argDataType), DataTypes.FIELD("date", DataTypes.DATE())); return Optional.of(outputDataType); }) .build(); } // -------------------------------------------------------------------------------------------- // Runtime // -------------------------------------------------------------------------------------------- /** * Generic accumulator for representing state. It will contain different kind of instances for * {@code value} depending on actual call in the query. */ public static class Accumulator { public T value; public LocalDate date; } @Override public Accumulator createAccumulator() { return new Accumulator<>(); } /** * Generic runtime function that will be called with different kind of instances for {@code * input} depending on actual call in the query. */ public void accumulate(Accumulator acc, T input, LocalDate date) { if (input != null && (acc.date == null || date.isAfter(acc.date))) { acc.value = input; acc.date = date; } } @Override public Row getValue(Accumulator acc) { return Row.of(acc.value, acc.date); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_09/udf/_04_udf/UDAF_Test.java ================================================ package flink.examples.sql._09.udf._04_udf; import java.util.ArrayList; import java.util.List; import java.util.TreeSet; import org.apache.flink.api.common.accumulators.Accumulator; import org.apache.flink.api.common.typeinfo.TypeHint; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.table.annotation.DataTypeHint; import org.apache.flink.table.annotation.FunctionHint; import org.apache.flink.table.functions.AggregateFunction; import org.apache.flink.table.functions.ScalarFunction; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; import flink.examples.JacksonUtils; import lombok.AllArgsConstructor; import lombok.Data; import lombok.NoArgsConstructor; public class UDAF_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); String sql = "CREATE TEMPORARY FUNCTION test_hive_udf as 'flink.examples.sql._09.udf._04_udf.UDAF_Test$CollectList2';\n" + "CREATE TEMPORARY FUNCTION to_json_udf as 'flink.examples.sql._09.udf._04_udf.UDAF_Test$ToJson';\n" + "CREATE TABLE source_table (\n" + " user_id BIGINT,\n" + " `params` STRING\n" + ") WITH (\n" + " 'connector' = 'user_defined',\n" + " 'format' = 'json',\n" + " 'class.name' = 'flink.examples.sql._09.udf._02_stream_hive_udf.UserDefinedSource'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " `log_id` STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "insert into sink_table\n" + "select user_id,\n" // + " to_json_udf(test_hive_udf(params, cast(0 as int), cast('a' as string), cast(0 as bigint))) as log_id\n" + " to_json_udf(test_hive_udf(params)) as log_id\n" + "from source_table\n" + "group by user_id\n"; flinkEnv.streamTEnv().getConfig().getConfiguration().setString("pipeline.name", "UDAF 测试案例"); for (String innerSql : sql.split(";")) { flinkEnv.streamTEnv().executeSql(innerSql); } } @Data @AllArgsConstructor @NoArgsConstructor public static class Sentence implements Comparable { private String msgid; private Integer type; private String content; private Long ts; public int compareTo(Sentence s) { return s.equals(this) ? 1 : 0; } } public static class CollectList1 extends AggregateFunction { @Override public Sentence getValue(Sentence strings) { return new Sentence(); } @Override public Sentence createAccumulator() { return new Sentence(); } public void accumulate(Sentence list, String msgid, Integer type, String content, Long ts) { } public void merge(Sentence list, Iterable it) { } // @Override // public TypeInformation getAccumulatorType() { // // return Types.POJO(Sentence.class); // // } // // @Override // public TypeInformation getResultType() { // // return Types.POJO(Sentence.class); // } } public static class CollectList extends AggregateFunction, List> { @Override public List getValue(List strings) { return strings; } @Override public List createAccumulator() { return new ArrayList<>(); } public void accumulate(List list, String msgid, Integer type, String content, Long ts) { list.add(new Sentence(msgid, type, content, ts)); } public void merge(List list, Iterable> it) { for (List list1 : it) { list.addAll(list1); } } @Override public TypeInformation> getAccumulatorType() { return TypeInformation.of(new TypeHint>() { }); } @Override public TypeInformation> getResultType() { return TypeInformation.of(new TypeHint>() { }); } } public static class ToJson extends ScalarFunction { public String eval(List in) { return JacksonUtils.bean2Json(in); } } /** * Set Aggregate * @author Liu Yang * @date 2022/3/28 16:46 */ @FunctionHint( input = {@DataTypeHint("STRING")}, output = @DataTypeHint("STRING") ) public static class CollectList2 extends AggregateFunction { private String delimiter; public void accumulate(TreeSetAccumulator acc, String value){ if (value == null) { return; } if (value instanceof Comparable) { acc.add((String) value); } } @Override public String getValue(TreeSetAccumulator accumulator) { return JacksonUtils.bean2Json(accumulator.getLocalValue()); } @Override public TreeSetAccumulator createAccumulator() { return new TreeSetAccumulator<>(); } } public static class TreeSetAccumulator> implements Accumulator> { private static final long serialVersionUID = 1L; // Tips: Construction of sorted collection with non-comparable elements private TreeSet localValue = new TreeSet<>(); @Override public void add(T value) { localValue.add(value); } @Override public TreeSet getLocalValue() { return localValue; } @Override public void resetLocal() { localValue.clear(); } @Override public void merge(Accumulator> other) { localValue.addAll(other.getLocalValue()); } @Override public Accumulator> clone() { TreeSetAccumulator newInstance = new TreeSetAccumulator(); newInstance.localValue = new TreeSet<>(localValue); return newInstance; } @Override public String toString() { return "TreeSet Accumulator " + localValue; } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_09/udf/_05_scalar_function/ExplodeUDTF.java ================================================ package flink.examples.sql._09.udf._05_scalar_function; import java.util.Set; import org.apache.flink.table.annotation.DataTypeHint; import org.apache.flink.table.functions.TableFunction; public class ExplodeUDTF extends TableFunction { public void eval(@DataTypeHint("RAW") Object test) { Set test1 = (Set) test; for (String t : test1) { collect(t); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_09/udf/_05_scalar_function/ExplodeUDTFV2.java ================================================ package flink.examples.sql._09.udf._05_scalar_function; import org.apache.flink.table.functions.TableFunction; public class ExplodeUDTFV2 extends TableFunction { public void eval(String worlds) { collect(new String[]{ worlds, worlds + "111"}); collect(new String[]{ worlds, worlds + "222"}); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_09/udf/_05_scalar_function/GetMapValue.java ================================================ package flink.examples.sql._09.udf._05_scalar_function; import java.util.Map; import org.apache.flink.table.annotation.DataTypeHint; import org.apache.flink.table.functions.ScalarFunction; public class GetMapValue extends ScalarFunction { public String eval(@DataTypeHint("RAW") Object map, String key) { Map innerMap = (Map) map; try { Object obj = innerMap.get(key); if (obj != null) { return obj.toString(); } else { return null; } } catch (Exception e) { return null; } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_09/udf/_05_scalar_function/GetSetValue.java ================================================ package flink.examples.sql._09.udf._05_scalar_function; import java.util.Set; import org.apache.flink.table.annotation.DataTypeHint; import org.apache.flink.table.functions.ScalarFunction; public class GetSetValue extends ScalarFunction { public String eval(@DataTypeHint("RAW") Object set) { Set s = (Set) set; return s.iterator().next(); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_09/udf/_05_scalar_function/ScalarFunctionTest.java ================================================ package flink.examples.sql._09.udf._05_scalar_function; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class ScalarFunctionTest { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().createFunction("set_string", SetStringUDF.class); flinkEnv.streamTEnv().createFunction("explode_udtf", ExplodeUDTF.class); flinkEnv.streamTEnv().createFunction("get_map_value", GetMapValue.class); String sql = "CREATE TABLE Orders (\n" + " order_id BIGINT NOT NULL,\n" + " name STRING,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.name.length' = '1',\n" + " 'fields.order_id.min' = '1',\n" + " 'fields.order_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE target_table (\n" + " order_id BIGINT NOT NULL,\n" + " name STRING,\n" + " row_time timestamp(3),\n" + " name_explode STRING,\n" + " i STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO target_table\n" + "SELECT *, cast(get_map_value(name_explode, cast('a' as string)) as string) as i\n" + "FROM Orders\n" + "LEFT JOIN lateral TABLE(\n" + " explode_udtf(\n" + " set_string(name)\n" + " )\n" + " ) AS t(name_explode) ON TRUE"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_09/udf/_05_scalar_function/ScalarFunctionTest2.java ================================================ package flink.examples.sql._09.udf._05_scalar_function; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class ScalarFunctionTest2 { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().createFunction("set_string", SetStringUDF.class); flinkEnv.streamTEnv().createFunction("explode_udtf", ExplodeUDTF.class); flinkEnv.streamTEnv().createFunction("get_map_value", GetMapValue.class); flinkEnv.streamTEnv().createFunction("get_set_value", GetSetValue.class); String sql = "CREATE TABLE Orders (\n" + " order_id BIGINT NOT NULL,\n" + " name STRING,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.name.length' = '1',\n" + " 'fields.order_id.min' = '1',\n" + " 'fields.order_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE target_table (\n" + " order_id BIGINT NOT NULL,\n" + " name STRING,\n" + " row_time timestamp(3),\n" + " i STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO target_table\n" + "SELECT *, cast(get_set_value(set_string(name)) as string) as i\n" + "FROM Orders\n"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_09/udf/_05_scalar_function/SetStringUDF.java ================================================ package flink.examples.sql._09.udf._05_scalar_function; import java.util.Set; import org.apache.flink.api.common.typeinfo.TypeHint; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.table.annotation.DataTypeHint; import org.apache.flink.table.functions.ScalarFunction; import com.google.common.collect.Sets; public class SetStringUDF extends ScalarFunction { @DataTypeHint("RAW") public Object eval(String input) { return Sets.newHashSet(input, input + "_1", input + "_2"); } @Override public TypeInformation getResultType(Class[] signature) { return TypeInformation.of(new TypeHint>() { }); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_09/udf/_05_scalar_function/TableFunctionTest2.java ================================================ package flink.examples.sql._09.udf._05_scalar_function; import java.util.Arrays; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class TableFunctionTest2 { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.streamTEnv().createFunction("explode_udtf_v2", ExplodeUDTFV2.class); String sql = "CREATE TABLE Orders (\n" + " order_id BIGINT NOT NULL,\n" + " name STRING,\n" + " row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),\n" + " WATERMARK FOR row_time AS row_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '10',\n" + " 'fields.name.length' = '1',\n" + " 'fields.order_id.min' = '1',\n" + " 'fields.order_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE target_table (\n" + " order_id BIGINT NOT NULL,\n" + " name STRING,\n" + " row_time timestamp(3),\n" + " i STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "INSERT INTO target_table\n" + "SELECT order_id, name, row_time, name_explode[2] as i\n" + "FROM Orders \n" + "LEFT JOIN lateral TABLE(explode_udtf_v2(name)) AS t(name_explode) ON TRUE\n"; Arrays.stream(sql.split(";")) .forEach(flinkEnv.streamTEnv()::executeSql); } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_10_share/A.java ================================================ package flink.examples.sql._10_share; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.typeutils.ResultTypeQueryable; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.TimeCharacteristic; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.api.functions.source.SourceFunction; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.Table; import org.apache.flink.table.api.TableResult; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; import org.apache.flink.types.Row; public class A { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); env.setParallelism(1); EnvironmentSettings settings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inStreamingMode().build(); env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); DataStream source = env.addSource(new UserDefinedSource()); Table sourceTable = tEnv.fromDataStream(source, "stat_date,\n" + " order_id,\n" + " buyer_id,\n" + " seller_id,\n" + " buy_amount,\n" + " div_pay_amt"); tEnv.createTemporaryView("dwd_tb_trd_ord_ent_di_m", sourceTable); String sql = "CREATE TABLE dimTable (\n" + " name STRING,\n" + " name1 STRING,\n" + " score BIGINT" + ") WITH (\n" + " 'connector' = 'redis',\n" + " 'hostname' = '127.0.0.1',\n" + " 'port' = '6379',\n" + " 'format' = 'json',\n" + " 'lookup.cache.max-rows' = '500',\n" + " 'lookup.cache.ttl' = '3600',\n" + " 'lookup.max-retries' = '1'\n" + ")"; String joinSql = "SELECT o.f0, o.f1, c.name, c.name1, c.score\n" + "FROM leftTable AS o\n" + "LEFT JOIN dimTable FOR SYSTEM_TIME AS OF o.proctime AS c\n" + "ON o.f0 = c.name"; TableResult dimTable = tEnv.executeSql(sql); Table t = tEnv.sqlQuery(joinSql); // Table t = tEnv.sqlQuery("select * from leftTable"); tEnv.toAppendStream(t, Row.class).print(); env.execute(); } private static class UserDefinedSource implements SourceFunction, ResultTypeQueryable { private volatile boolean isCancel; @Override public void run(SourceContext sourceContext) throws Exception { while (!this.isCancel) { sourceContext.collect(Row.of("a", "b", 1L)); Thread.sleep(10L); } } @Override public void cancel() { this.isCancel = true; } @Override public TypeInformation getProducedType() { return new RowTypeInfo(TypeInformation.of(String.class), TypeInformation.of(String.class), TypeInformation.of(Long.class)); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_11_explain/Explain_Test.java ================================================ package flink.examples.sql._11_explain; import org.apache.flink.table.api.TableResult; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class Explain_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.env().setParallelism(1); String sql = "CREATE TABLE source_table (\n" + " user_id BIGINT COMMENT '用户 id',\n" + " name STRING COMMENT '用户姓名',\n" + " server_timestamp BIGINT COMMENT '用户访问时间戳',\n" + " proctime AS PROCTIME()\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.name.length' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '10',\n" + " 'fields.server_timestamp.min' = '1',\n" + " 'fields.server_timestamp.max' = '100000'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " user_id BIGINT,\n" + " name STRING,\n" + " server_timestamp BIGINT\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");\n" + "\n" + "EXPLAIN PLAN FOR\n" + "INSERT INTO sink_table\n" + "select user_id,\n" + " name,\n" + " server_timestamp\n" + "from (\n" + " SELECT\n" + " user_id,\n" + " name,\n" + " server_timestamp,\n" + " row_number() over(partition by user_id order by proctime) as rn\n" + " FROM source_table\n" + ")\n" + "where rn = 1"; /** * 算子 {@link org.apache.flink.streaming.api.operators.KeyedProcessOperator} * -- {@link org.apache.flink.table.runtime.operators.deduplicate.ProcTimeDeduplicateKeepFirstRowFunction} */ for (String innerSql : sql.split(";")) { TableResult tableResult = flinkEnv.streamTEnv().executeSql(innerSql); tableResult.print(); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_12_data_type/_01_interval/Timestamp3_Interval_To_Test.java ================================================ package flink.examples.sql._12_data_type._01_interval; import org.apache.flink.table.api.TableResult; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class Timestamp3_Interval_To_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.env().setParallelism(1); String sql = "CREATE TABLE sink_table (\n" + " result_interval_year TIMESTAMP(3),\n" + " result_interval_year_p TIMESTAMP(3),\n" + " result_interval_year_p_to_month TIMESTAMP(3),\n" + " result_interval_month TIMESTAMP(3),\n" + " result_interval_day TIMESTAMP(3),\n" + " result_interval_day_p1 TIMESTAMP(3),\n" + " result_interval_day_p1_to_hour TIMESTAMP(3),\n" + " result_interval_day_p1_to_minute TIMESTAMP(3),\n" + " result_interval_day_p1_to_second_p2 TIMESTAMP(3),\n" + " result_interval_hour TIMESTAMP(3),\n" + " result_interval_hour_to_minute TIMESTAMP(3),\n" + " result_interval_hour_to_second TIMESTAMP(3),\n" + " result_interval_minute TIMESTAMP(3),\n" + " result_interval_minute_to_second_p2 TIMESTAMP(3),\n" + " result_interval_second TIMESTAMP(3),\n" + " result_interval_second_p2 TIMESTAMP(3)\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " f1 + INTERVAL '10' YEAR as result_interval_year\n" + " , f1 + INTERVAL '100' YEAR(3) as result_interval_year_p\n" + " , f1 + INTERVAL '10-03' YEAR(3) TO MONTH as result_interval_year_p_to_month\n" + " , f1 + INTERVAL '13' MONTH as result_interval_month\n" + " , f1 + INTERVAL '10' DAY as result_interval_day\n" + " , f1 + INTERVAL '100' DAY(3) as result_interval_day_p1\n" + " , f1 + INTERVAL '10 03' DAY(3) TO HOUR as result_interval_day_p1_to_hour\n" + " , f1 + INTERVAL '10 03:12' DAY(3) TO MINUTE as result_interval_day_p1_to_minute\n" + " , f1 + INTERVAL '10 00:00:00.004' DAY TO SECOND(3) as result_interval_day_p1_to_second_p2\n" + " , f1 + INTERVAL '10' HOUR as result_interval_hour\n" + " , f1 + INTERVAL '10:03' HOUR TO MINUTE as result_interval_hour_to_minute\n" + " , f1 + INTERVAL '00:00:00.004' HOUR TO SECOND(3) as result_interval_hour_to_second\n" + " , f1 + INTERVAL '10' MINUTE as result_interval_minute\n" + " , f1 + INTERVAL '05:05.006' MINUTE TO SECOND(3) as result_interval_minute_to_second_p2\n" + " , f1 + INTERVAL '3' SECOND as result_interval_second\n" + " , f1 + INTERVAL '300' SECOND(3) as result_interval_second_p2\n" + "FROM (SELECT CAST('1990-10-14 10:20:45.123' as TIMESTAMP(3)) as f1)" ; for (String innerSql : sql.split(";")) { TableResult tableResult = flinkEnv.streamTEnv().executeSql(innerSql); tableResult.print(); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_12_data_type/_01_interval/Timestamp_ltz3_Interval_To_Test.java ================================================ package flink.examples.sql._12_data_type._01_interval; import org.apache.flink.table.api.TableResult; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class Timestamp_ltz3_Interval_To_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.env().setParallelism(1); /** * INTERVAL YEAR * INTERVAL YEAR(p) * INTERVAL YEAR(p) TO MONTH * INTERVAL MONTH * INTERVAL DAY * INTERVAL DAY(p1) * INTERVAL DAY(p1) TO HOUR * INTERVAL DAY(p1) TO MINUTE * INTERVAL DAY(p1) TO SECOND(p2) * INTERVAL HOUR * INTERVAL HOUR TO MINUTE * INTERVAL HOUR TO SECOND(p2) * INTERVAL MINUTE * INTERVAL MINUTE TO SECOND(p2) * INTERVAL SECOND * INTERVAL SECOND(p2) */ String sql = "CREATE TABLE sink_table (\n" + " result_interval_year TIMESTAMP(3),\n" + " result_interval_year_p TIMESTAMP(3),\n" + " result_interval_year_p_to_month TIMESTAMP(3),\n" + " result_interval_month TIMESTAMP(3),\n" + " result_interval_day TIMESTAMP(3),\n" + " result_interval_day_p1 TIMESTAMP(3),\n" + " result_interval_day_p1_to_hour TIMESTAMP(3),\n" + " result_interval_day_p1_to_minute TIMESTAMP(3),\n" + " result_interval_day_p1_to_second_p2 TIMESTAMP(3),\n" + " result_interval_hour TIMESTAMP(3),\n" + " result_interval_hour_to_minute TIMESTAMP(3),\n" + " result_interval_hour_to_second TIMESTAMP(3),\n" + " result_interval_minute TIMESTAMP(3),\n" + " result_interval_minute_to_second_p2 TIMESTAMP(3),\n" + " result_interval_second TIMESTAMP(3),\n" + " result_interval_second_p2 TIMESTAMP(3)\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");" + "\n" + "INSERT INTO sink_table\n" + "SELECT\n" + " f1 + INTERVAL '10' YEAR as result_interval_year\n" + " , f1 + INTERVAL '100' YEAR(3) as result_interval_year_p\n" + " , f1 + INTERVAL '10-03' YEAR(3) TO MONTH as result_interval_year_p_to_month\n" + " , f1 + INTERVAL '13' MONTH as result_interval_month\n" + " , f1 + INTERVAL '10' DAY as result_interval_day\n" + " , f1 + INTERVAL '100' DAY(3) as result_interval_day_p1\n" + " , f1 + INTERVAL '10 03' DAY(3) TO HOUR as result_interval_day_p1_to_hour\n" + " , f1 + INTERVAL '10 03:12' DAY(3) TO MINUTE as result_interval_day_p1_to_minute\n" + " , f1 + INTERVAL '10 00:00:00.004' DAY TO SECOND(3) as result_interval_day_p1_to_second_p2\n" + " , f1 + INTERVAL '10' HOUR as result_interval_hour\n" + " , f1 + INTERVAL '10:03' HOUR TO MINUTE as result_interval_hour_to_minute\n" + " , f1 + INTERVAL '00:00:00.004' HOUR TO SECOND(3) as result_interval_hour_to_second\n" + " , f1 + INTERVAL '10' MINUTE as result_interval_minute\n" + " , f1 + INTERVAL '05:05.006' MINUTE TO SECOND(3) as result_interval_minute_to_second_p2\n" + " , f1 + INTERVAL '3' SECOND as result_interval_second\n" + " , f1 + INTERVAL '300' SECOND(3) as result_interval_second_p2\n" + "FROM (SELECT TO_TIMESTAMP_LTZ(1640966476500, 3) as f1)" ; for (String innerSql : sql.split(";")) { TableResult tableResult = flinkEnv.streamTEnv().executeSql(innerSql); tableResult.print(); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_12_data_type/_02_user_defined/User.java ================================================ package flink.examples.sql._12_data_type._02_user_defined; import java.math.BigDecimal; import org.apache.flink.table.annotation.DataTypeHint; public class User { public int age; public String name; public @DataTypeHint("DECIMAL(10, 2)") BigDecimal totalBalance; } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_12_data_type/_02_user_defined/UserDefinedDataTypes_Test.java ================================================ package flink.examples.sql._12_data_type._02_user_defined; import org.apache.flink.table.api.TableResult; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class UserDefinedDataTypes_Test { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.env().setParallelism(1); String sql = "CREATE FUNCTION user_scalar_func AS 'flink.examples.sql._12_data_type._02_user_defined.UserScalarFunction';" + "\n" + "CREATE TABLE source_table (\n" + " user_id BIGINT NOT NULL COMMENT '用户 id'\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " result_row ROW\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");" + "\n" + "INSERT INTO sink_table\n" + "select user_scalar_func(user_id) as result_row\n" + "from source_table"; ; for (String innerSql : sql.split(";")) { TableResult tableResult = flinkEnv.streamTEnv().executeSql(innerSql); tableResult.print(); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_12_data_type/_02_user_defined/UserDefinedDataTypes_Test2.java ================================================ package flink.examples.sql._12_data_type._02_user_defined; import org.apache.flink.table.api.TableResult; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class UserDefinedDataTypes_Test2 { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); flinkEnv.env().setParallelism(1); String sql = "CREATE FUNCTION user_scalar_func AS 'flink.examples.sql._12_data_type._02_user_defined.UserScalarFunction';" + "\n" + "CREATE TABLE source_table (\n" + " user_id BIGINT NOT NULL COMMENT '用户 id'\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " result_row_1 ROW,\n" + " result_row_2 STRING\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");" + "\n" + "INSERT INTO sink_table\n" + "select\n" + " user_scalar_func(user_id) as result_row_1,\n" + " user_scalar_func(user_scalar_func(user_id)) as result_row_2\n" + "from source_table"; ; for (String innerSql : sql.split(";")) { TableResult tableResult = flinkEnv.streamTEnv().executeSql(innerSql); tableResult.print(); } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_12_data_type/_02_user_defined/UserScalarFunction.java ================================================ package flink.examples.sql._12_data_type._02_user_defined; import java.math.BigDecimal; import org.apache.flink.table.functions.ScalarFunction; public class UserScalarFunction extends ScalarFunction { // 1. 自定义数据类型作为输出参数 public User eval(long i) { if (i > 0 && i <= 5) { User u = new User(); u.age = (int) i; u.name = "name1"; u.totalBalance = new BigDecimal(1.1d); return u; } else { User u = new User(); u.age = (int) i; u.name = "name2"; u.totalBalance = new BigDecimal(2.2d); return u; } } // 2. 自定义数据类型作为输入参数 public String eval(User i) { if (i.age > 0 && i.age <= 5) { User u = new User(); u.age = 1; u.name = "name1"; u.totalBalance = new BigDecimal(1.1d); return u.name; } else { User u = new User(); u.age = 2; u.name = "name2"; u.totalBalance = new BigDecimal(2.2d); return u.name; } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_12_data_type/_03_raw/RawScalarFunction.java ================================================ package flink.examples.sql._12_data_type._03_raw; import java.math.BigDecimal; import org.apache.flink.api.common.typeutils.base.StringSerializer; import org.apache.flink.table.annotation.DataTypeHint; import org.apache.flink.table.functions.ScalarFunction; import flink.examples.sql._12_data_type._02_user_defined.User; public class RawScalarFunction extends ScalarFunction { // 1. 自定义数据类型作为输出参数 public User eval(long i) { if (i > 0 && i <= 5) { User u = new User(); u.age = (int) i; u.name = "name1"; u.totalBalance = new BigDecimal(1.1d); return u; } else { User u = new User(); u.age = (int) i; u.name = "name2"; u.totalBalance = new BigDecimal(2.2d); return u; } } // 2. 自定义数据类型作为输入参数、自定义输出类型为 Raw 类型 @DataTypeHint(value = "RAW", bridgedTo = String.class, rawSerializer = StringSerializer.class) public String eval(User i) { if (i.age > 0 && i.age <= 5) { User u = new User(); u.age = 1; u.name = "name1"; u.totalBalance = new BigDecimal(1.1d); return u.name; } else { User u = new User(); u.age = 2; u.name = "name2"; u.totalBalance = new BigDecimal(2.2d); return u.name; } } } ================================================ FILE: flink-examples-1.13/src/main/java/flink/examples/sql/_12_data_type/_03_raw/Raw_DataTypes_Test2.java ================================================ package flink.examples.sql._12_data_type._03_raw; import org.apache.flink.api.common.typeutils.base.StringSerializer; import org.apache.flink.table.api.TableResult; import org.apache.flink.table.types.logical.RawType; import flink.examples.FlinkEnvUtils; import flink.examples.FlinkEnvUtils.FlinkEnv; public class Raw_DataTypes_Test2 { public static void main(String[] args) throws Exception { FlinkEnv flinkEnv = FlinkEnvUtils.getStreamTableEnv(args); RawType rawType = new RawType(String.class, StringSerializer.INSTANCE); String base64String = rawType.getSerializerString(); flinkEnv.env().setParallelism(1); String sql = String.format("CREATE FUNCTION raw_scalar_func AS 'flink.examples.sql._12_data_type._03_raw.RawScalarFunction';" + "\n" + "CREATE TABLE source_table (\n" + " user_id BIGINT NOT NULL COMMENT '用户 id'\n" + ") WITH (\n" + " 'connector' = 'datagen',\n" + " 'rows-per-second' = '1',\n" + " 'fields.user_id.min' = '1',\n" + " 'fields.user_id.max' = '10'\n" + ");\n" + "\n" + "CREATE TABLE sink_table (\n" + " result_row_1 RAW('java.lang.String', '%s')\n" + ") WITH (\n" + " 'connector' = 'print'\n" + ");" + "\n" + "INSERT INTO sink_table\n" + "select\n" + " raw_scalar_func(raw_scalar_func(user_id)) as result_row_1\n" + "from source_table", base64String); ; for (String innerSql : sql.split(";")) { TableResult tableResult = flinkEnv.streamTEnv().executeSql(innerSql); tableResult.print(); } } } ================================================ FILE: flink-examples-1.13/src/main/javacc/Simple1.jj ================================================ /* Copyright (c) 2006, Sun Microsystems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the Sun Microsystems, Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ options { LOOKAHEAD = 1; CHOICE_AMBIGUITY_CHECK = 2; OTHER_AMBIGUITY_CHECK = 1; STATIC = true; DEBUG_PARSER = false; DEBUG_LOOKAHEAD = false; DEBUG_TOKEN_MANAGER = false; ERROR_REPORTING = true; JAVA_UNICODE_ESCAPE = false; UNICODE_INPUT = false; IGNORE_CASE = false; USER_TOKEN_MANAGER = false; USER_CHAR_STREAM = false; BUILD_PARSER = true; BUILD_TOKEN_MANAGER = true; SANITY_CHECK = true; FORCE_LA_CHECK = false; } PARSER_BEGIN(Simple1) /** Simple brace matcher. */ public class Simple1 { /** Main entry point. */ public static void main(String args[]) throws ParseException { Simple1 parser = new Simple1(System.in); parser.Input(); } } PARSER_END(Simple1) /** Root production. */ void Input() : {} { MatchedBraces() ("\n"|"\r")* } /** Brace matching production. */ void MatchedBraces() : {} { "{" [ MatchedBraces() ] "}" } ================================================ FILE: flink-examples-1.13/src/main/proto/source.proto ================================================ syntax = "proto3"; package flink; option java_package = "flink.examples.datastream._04.keyed_co_process.protobuf"; option java_outer_classname = "SourceOuterClassname"; option java_multiple_files = true; message Source { string name = 1; repeated string names = 2; map si_map = 7; } ================================================ FILE: flink-examples-1.13/src/main/proto/test.proto ================================================ syntax = "proto3"; package flink; option java_package = "flink.examples.sql._05.format.formats.protobuf"; option java_outer_classname = "TestOuterClassname"; option java_multiple_files = true; message Test { string name = 1; repeated string names = 2; map si_map = 7; } ================================================ FILE: flink-examples-1.13/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory ================================================ # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. flink.examples.sql._05.format.formats.csv.ChangelogCsvFormatFactory flink.examples.sql._03.source_sink.table.socket.SocketDynamicTableFactory flink.examples.sql._03.source_sink.table.redis.v2.RedisDynamicTableFactory flink.examples.sql._03.source_sink.table.user_defined.UserDefinedDynamicTableFactory flink.examples.sql._03.source_sink.abilities.source.Abilities_TableSourceFactory flink.examples.sql._03.source_sink.abilities.source.before.Before_Abilities_TableSourceFactory flink.examples.sql._03.source_sink.abilities.sink.Abilities_TableSinkFactory flink.examples.sql._05.format.formats.protobuf.rowdata.ProtobufFormatFactory ================================================ FILE: flink-examples-1.13/src/main/scala/flink/examples/sql/_04/type/TableFunc0.scala ================================================ package flink.examples.sql._04.`type` import org.apache.flink.table.functions.TableFunction case class SimpleUser(name: String, age: Int) class TableFunc0 extends TableFunction[SimpleUser] { // make sure input element's format is "" def eval(user: String): Unit = { if (user.contains("#")) { val splits = user.split("#") collect(SimpleUser(splits(0), splits(1).toInt)) } } } ================================================ FILE: flink-examples-1.13/src/test/java/flink/examples/sql/_05/format/formats/protobuf/row/ProtobufRowDeserializationSchemaTest.java ================================================ //package flink.examples.sql._05.format.formats.protobuf.row; // //import java.io.ByteArrayInputStream; //import java.io.ByteArrayOutputStream; //import java.io.File; //import java.io.FileInputStream; //import java.io.IOException; //import java.io.ObjectInputStream; //import java.io.ObjectOutputStream; //import java.util.HashMap; // //import org.apache.flink.types.Row; //import org.junit.Assert; //import org.junit.Before; //import org.junit.Test; // //import com.google.common.collect.Lists; // //import flink.examples.sql._05.format.formats.protobuf.Dog; //import flink.examples.sql._05.format.formats.protobuf.Person; //import flink.examples.sql._05.format.formats.protobuf.Person.Contact; //import flink.examples.sql._05.format.formats.protobuf.Person.ContactType; // //public class ProtobufRowDeserializationSchemaTest { // // private Person p; // // private byte[] b; // // private static final String PROTO_DESCRIPTOR_FILE_GENERATOR_CMD = // "protoc --proto_path ./src/test/proto --descriptor_set_out=./person.desc ./src/test/proto/person.proto"; // // private static final String PROTO_JAVA_FILE_GENERATOR_CMD = // "protoc --proto_path ./src/test/proto --java_out=./ ./src/test/proto/person.proto"; // // @Before // public void initPerson() throws IOException, InterruptedException { // this.p = Person // .newBuilder() // .setName("name") // .addAllNames(Lists.newArrayList("name1", "name2")) // .setId(1) // .addAllIds(Lists.newArrayList(2, 3)) // .setLong(4L) // .addAllLongs(Lists.newArrayList(5L, 6L)) // .putAllSiMap(new HashMap() { // { // put("key1", 7); // } // }) // .putAllSlMap(new HashMap() { // { // put("key2", 8L); // } // }) // .putAllSdMap(new HashMap() { // { // put("key3", Dog.newBuilder().setId(9).setName("dog1").build()); // } // }) // .setDog(Dog.newBuilder().setId(10).setName("dog2").build()) // .addAllDogs(Lists.newArrayList(Dog.newBuilder().setId(11).setName("dog3").build())) // .addAllContacts(Lists.newArrayList( // Contact.newBuilder().setNumber("number").setContactType(ContactType.EMAIL).build())) // .build(); // // this.b = this.p.toByteArray(); // // String[] cmds = {"bash", "-c", PROTO_DESCRIPTOR_FILE_GENERATOR_CMD}; // Process process = Runtime.getRuntime().exec(cmds, null, new File("./")); // // int exitCode = process.waitFor(); // // // } // // @Test // public void deserializationProtobufToRowTest() throws IOException { // // ProtobufRowDeserializationSchema ds = new ProtobufRowDeserializationSchema(Person.class); // // Row row = ds.deserialize(this.b); // // ProtobufRowSerializationSchema s = new ProtobufRowSerializationSchema(Person.class); // // byte[] b = s.serialize(row); // // Assert.assertArrayEquals(this.b, b); // // } // // @Test // public void deserializationProtobufToRowByDescriptorTest() throws IOException { // // File file = new File("./person.desc"); // // FileInputStream fis = new FileInputStream(file); // // byte[] descriptorBytes = new byte[(int) file.length()]; // // fis.read(descriptorBytes); // // ProtobufRowDeserializationSchema ds = new ProtobufRowDeserializationSchema(descriptorBytes); // // Row row = ds.deserialize(this.b); // // ProtobufRowSerializationSchema s = new ProtobufRowSerializationSchema(descriptorBytes); // // byte[] b = s.serialize(row); // // Assert.assertArrayEquals(this.b, b); // // } // // @Test // public void seAndDeseProtobufRowDeserializationSchema() throws IOException, ClassNotFoundException { // // ProtobufRowDeserializationSchema ds = new ProtobufRowDeserializationSchema(Person.class); // // ByteArrayOutputStream bros = new ByteArrayOutputStream(); // // ObjectOutputStream oos = new ObjectOutputStream(bros); // // oos.writeObject(ds); // // byte[] b = bros.toByteArray(); // // ByteArrayInputStream bris = new ByteArrayInputStream(b); // // ObjectInputStream ois = new ObjectInputStream(bris); // // Object o = ois.readObject(); // // Assert.assertTrue(true); // // } // // @Test // public void seAndDeseProtobufRowDeserializationSchemaByDescriptor() throws IOException, ClassNotFoundException { // // File file = new File("./person.desc"); // // FileInputStream fis = new FileInputStream(file); // // byte[] descriptorBytes = new byte[(int) file.length()]; // // fis.read(descriptorBytes); // // ProtobufRowDeserializationSchema ds = new ProtobufRowDeserializationSchema(descriptorBytes); // // ByteArrayOutputStream bros = new ByteArrayOutputStream(); // // ObjectOutputStream oos = new ObjectOutputStream(bros); // // oos.writeObject(ds); // // byte[] b = bros.toByteArray(); // // ByteArrayInputStream bris = new ByteArrayInputStream(b); // // ObjectInputStream ois = new ObjectInputStream(bris); // // Object o = ois.readObject(); // // Assert.assertTrue(true); // // } // //} ================================================ FILE: flink-examples-1.13/src/test/java/flink/examples/sql/_05/format/formats/protobuf/row/ProtobufRowSerializationSchemaTest.java ================================================ //package flink.examples.sql._05.format.formats.protobuf.row; // //import java.io.ByteArrayInputStream; //import java.io.ByteArrayOutputStream; //import java.io.File; //import java.io.FileInputStream; //import java.io.IOException; //import java.io.ObjectInputStream; //import java.io.ObjectOutputStream; //import java.util.HashMap; // //import org.apache.flink.types.Row; //import org.junit.Assert; //import org.junit.Before; //import org.junit.Test; // //import com.google.common.collect.Lists; // //import flink.examples.sql._05.format.formats.protobuf.Dog; //import flink.examples.sql._05.format.formats.protobuf.Person; //import flink.examples.sql._05.format.formats.protobuf.Person.Contact; //import flink.examples.sql._05.format.formats.protobuf.Person.ContactType; // //public class ProtobufRowSerializationSchemaTest { // // private Person p; // // private byte[] b; // // private Row r; // // private static final String PROTO_DESCRIPTOR_FILE_GENERATOR_CMD = // "protoc --proto_path ./src/test/proto --descriptor_set_out=./person.desc ./src/test/proto/person.proto"; // // private static final String PROTO_JAVA_FILE_GENERATOR_CMD = // "protoc --proto_path ./src/test/proto --java_out=./ ./src/test/proto/person.proto"; // // @Before // public void initPerson() throws IOException, InterruptedException { // this.p = Person // .newBuilder() // .setName("name") // .addAllNames(Lists.newArrayList("name1", "name2")) // .setId(1) // .addAllIds(Lists.newArrayList(2, 3)) // .setLong(4L) // .addAllLongs(Lists.newArrayList(5L, 6L)) // .putAllSiMap(new HashMap() { // { // put("key1", 7); // } // }) // .putAllSlMap(new HashMap() { // { // put("key2", 8L); // } // }) // .putAllSdMap(new HashMap() { // { // put("key3", Dog.newBuilder().setId(9).setName("dog1").build()); // } // }) // .setDog(Dog.newBuilder().setId(10).setName("dog2").build()) // .addAllDogs(Lists.newArrayList(Dog.newBuilder().setId(11).setName("dog3").build())) // .addAllContacts(Lists.newArrayList( // Contact.newBuilder().setNumber("number").setContactType(ContactType.EMAIL).build())) // .build(); // // ProtobufRowDeserializationSchema ds = new ProtobufRowDeserializationSchema(Person.class); // // this.r = ds.deserialize(this.p.toByteArray()); // // this.b = this.p.toByteArray(); // // String[] cmds = {"bash", "-c", PROTO_DESCRIPTOR_FILE_GENERATOR_CMD}; // Process process = Runtime.getRuntime().exec(cmds, null, new File("./")); // // int exitCode = process.waitFor(); // } // // @Test // public void serializationRowToProtobufTest() throws IOException { // // ProtobufRowSerializationSchema s = new ProtobufRowSerializationSchema(Person.class); // // byte[] b = s.serialize(this.r); // // Person p1 = Person.parseFrom(b); // // Assert.assertEquals(p1, this.p); // // } // // // @Test // public void serializationRowToProtobufByDescriptorTest() throws IOException { // // File file = new File("./person.desc"); // // FileInputStream fis = new FileInputStream(file); // // byte[] descriptorBytes = new byte[(int) file.length()]; // // fis.read(descriptorBytes); // // ProtobufRowSerializationSchema s = new ProtobufRowSerializationSchema(descriptorBytes); // // byte[] b = s.serialize(this.r); // // Person p1 = Person.parseFrom(b); // // Assert.assertEquals(p1, this.p); // // } // // // @Test // public void seAndDeseProtobufRowerializationSchema() throws IOException, ClassNotFoundException { // // ProtobufRowSerializationSchema s = new ProtobufRowSerializationSchema(Person.class); // // ByteArrayOutputStream bros = new ByteArrayOutputStream(); // // ObjectOutputStream oos = new ObjectOutputStream(bros); // // oos.writeObject(s); // // byte[] b = bros.toByteArray(); // // ByteArrayInputStream bris = new ByteArrayInputStream(b); // // ObjectInputStream ois = new ObjectInputStream(bris); // // Object o = ois.readObject(); // // Assert.assertTrue(true); // // } // // // @Test // public void seAndDeseProtobufRowSerializationSchemaByDescriptor() throws IOException, ClassNotFoundException { // // File file = new File("./person.desc"); // // FileInputStream fis = new FileInputStream(file); // // byte[] descriptorBytes = new byte[(int) file.length()]; // // fis.read(descriptorBytes); // // ProtobufRowSerializationSchema ds = new ProtobufRowSerializationSchema(descriptorBytes); // // ByteArrayOutputStream bros = new ByteArrayOutputStream(); // // ObjectOutputStream oos = new ObjectOutputStream(bros); // // oos.writeObject(ds); // // byte[] b = bros.toByteArray(); // // ByteArrayInputStream bris = new ByteArrayInputStream(b); // // ObjectInputStream ois = new ObjectInputStream(bris); // // Object o = ois.readObject(); // // Assert.assertTrue(true); // // } // //} ================================================ FILE: flink-examples-1.13/src/test/java/flink/examples/sql/_05/format/formats/protobuf/rowdata/ProtobufRowDataDeserializationSchemaTest.java ================================================ //package flink.examples.sql._05.format.formats.protobuf.rowdata; // //import java.io.ByteArrayInputStream; //import java.io.ByteArrayOutputStream; //import java.io.File; //import java.io.FileInputStream; //import java.io.IOException; //import java.io.ObjectInputStream; //import java.io.ObjectOutputStream; //import java.util.HashMap; // //import org.apache.flink.table.data.RowData; //import org.apache.flink.types.Row; //import org.junit.Assert; //import org.junit.Before; //import org.junit.Test; // //import com.google.common.collect.Lists; // //import flink.examples.sql._05.format.formats.protobuf.Dog; //import flink.examples.sql._05.format.formats.protobuf.Person; //import flink.examples.sql._05.format.formats.protobuf.Person.Contact; //import flink.examples.sql._05.format.formats.protobuf.Person.ContactType; //import flink.examples.sql._05.format.formats.protobuf.row.ProtobufRowDeserializationSchema; //import flink.examples.sql._05.format.formats.protobuf.row.ProtobufRowSerializationSchema; // //public class ProtobufRowDataDeserializationSchemaTest { // // private Person p; // // private byte[] b; // // private static final String PROTO_DESCRIPTOR_FILE_GENERATOR_CMD = // "protoc --proto_path ./src/test/proto --descriptor_set_out=./person.desc ./src/test/proto/person.proto"; // // private static final String PROTO_JAVA_FILE_GENERATOR_CMD = // "protoc --proto_path ./src/test/proto --java_out=./ ./src/test/proto/person.proto"; // // @Before // public void initPerson() throws IOException, InterruptedException { // this.p = Person // .newBuilder() // .setName("name") // .addAllNames(Lists.newArrayList("name1", "name2")) // .setId(1) // .addAllIds(Lists.newArrayList(2, 3)) // .setLong(4L) // .addAllLongs(Lists.newArrayList(5L, 6L)) // .putAllSiMap(new HashMap() { // { // put("key1", 7); // } // }) // .putAllSlMap(new HashMap() { // { // put("key2", 8L); // } // }) // .putAllSdMap(new HashMap() { // { // put("key3", Dog.newBuilder().setId(9).setName("dog1").build()); // } // }) // .setDog(Dog.newBuilder().setId(10).setName("dog2").build()) // .addAllDogs(Lists.newArrayList(Dog.newBuilder().setId(11).setName("dog3").build())) // .addAllContacts(Lists.newArrayList( // Contact.newBuilder().setNumber("number").setContactType(ContactType.EMAIL).build())) // .build(); // // this.b = this.p.toByteArray(); // // String[] cmds = {"bash", "-c", PROTO_DESCRIPTOR_FILE_GENERATOR_CMD}; // Process process = Runtime.getRuntime().exec(cmds, null, new File("./")); // // int exitCode = process.waitFor(); // // // } // // @Test // public void deserializationProtobufToRowTest() throws IOException { // // ProtobufRowDataDeserializationSchema ds = new ProtobufRowDataDeserializationSchema( // Person.class // , true // , null); // // RowData rowData = ds.deserialize(this.b); // // Assert.assertArrayEquals(this.b, b); // // } // // @Test // public void deserializationProtobufToRowByDescriptorTest() throws IOException { // // File file = new File("./person.desc"); // // FileInputStream fis = new FileInputStream(file); // // byte[] descriptorBytes = new byte[(int) file.length()]; // // fis.read(descriptorBytes); // // ProtobufRowDeserializationSchema ds = new ProtobufRowDeserializationSchema(descriptorBytes); // // Row row = ds.deserialize(this.b); // // ProtobufRowSerializationSchema s = new ProtobufRowSerializationSchema(descriptorBytes); // // byte[] b = s.serialize(row); // // Assert.assertArrayEquals(this.b, b); // // } // // @Test // public void seAndDeseProtobufRowDeserializationSchema() throws IOException, ClassNotFoundException { // // // } // // @Test // public void seAndDeseProtobufRowDeserializationSchemaByDescriptor() throws IOException, ClassNotFoundException { // // File file = new File("./person.desc"); // // FileInputStream fis = new FileInputStream(file); // // byte[] descriptorBytes = new byte[(int) file.length()]; // // fis.read(descriptorBytes); // // ProtobufRowDeserializationSchema ds = new ProtobufRowDeserializationSchema(descriptorBytes); // // ByteArrayOutputStream bros = new ByteArrayOutputStream(); // // ObjectOutputStream oos = new ObjectOutputStream(bros); // // oos.writeObject(ds); // // byte[] b = bros.toByteArray(); // // ByteArrayInputStream bris = new ByteArrayInputStream(b); // // ObjectInputStream ois = new ObjectInputStream(bris); // // Object o = ois.readObject(); // // Assert.assertTrue(true); // // } // //} ================================================ FILE: flink-examples-1.13/src/test/java/flink/examples/sql/_05/format/formats/protobuf/rowdata/ProtobufRowDataSerializationSchemaTest.java ================================================ //package flink.examples.sql._05.format.formats.protobuf.rowdata; // //import java.io.ByteArrayInputStream; //import java.io.ByteArrayOutputStream; //import java.io.File; //import java.io.FileInputStream; //import java.io.IOException; //import java.io.ObjectInputStream; //import java.io.ObjectOutputStream; //import java.util.HashMap; // //import org.apache.flink.types.Row; //import org.junit.Assert; //import org.junit.Before; //import org.junit.Test; // //import com.google.common.collect.Lists; // //import flink.examples.sql._05.format.formats.protobuf.Dog; //import flink.examples.sql._05.format.formats.protobuf.Person; //import flink.examples.sql._05.format.formats.protobuf.Person.Contact; //import flink.examples.sql._05.format.formats.protobuf.Person.ContactType; //import flink.examples.sql._05.format.formats.protobuf.row.ProtobufRowDeserializationSchema; //import flink.examples.sql._05.format.formats.protobuf.row.ProtobufRowSerializationSchema; // //public class ProtobufRowDataSerializationSchemaTest { // // private Person p; // // private byte[] b; // // private Row r; // // private static final String PROTO_DESCRIPTOR_FILE_GENERATOR_CMD = // "protoc --proto_path ./src/test/proto --descriptor_set_out=./person.desc ./src/test/proto/person.proto"; // // private static final String PROTO_JAVA_FILE_GENERATOR_CMD = // "protoc --proto_path ./src/test/proto --java_out=./ ./src/test/proto/person.proto"; // // @Before // public void initPerson() throws IOException, InterruptedException { // this.p = Person // .newBuilder() // .setName("name") // .addAllNames(Lists.newArrayList("name1", "name2")) // .setId(1) // .addAllIds(Lists.newArrayList(2, 3)) // .setLong(4L) // .addAllLongs(Lists.newArrayList(5L, 6L)) // .putAllSiMap(new HashMap() { // { // put("key1", 7); // } // }) // .putAllSlMap(new HashMap() { // { // put("key2", 8L); // } // }) // .putAllSdMap(new HashMap() { // { // put("key3", Dog.newBuilder().setId(9).setName("dog1").build()); // } // }) // .setDog(Dog.newBuilder().setId(10).setName("dog2").build()) // .addAllDogs(Lists.newArrayList(Dog.newBuilder().setId(11).setName("dog3").build())) // .addAllContacts(Lists.newArrayList( // Contact.newBuilder().setNumber("number").setContactType(ContactType.EMAIL).build())) // .build(); // // ProtobufRowDeserializationSchema ds = new ProtobufRowDeserializationSchema(Person.class); // // this.r = ds.deserialize(this.p.toByteArray()); // // this.b = this.p.toByteArray(); // // String[] cmds = {"bash", "-c", PROTO_DESCRIPTOR_FILE_GENERATOR_CMD}; // Process process = Runtime.getRuntime().exec(cmds, null, new File("./")); // // int exitCode = process.waitFor(); // } // // @Test // public void serializationRowToProtobufTest() throws IOException { // // ProtobufRowSerializationSchema s = new ProtobufRowSerializationSchema(Person.class); // // byte[] b = s.serialize(this.r); // // Person p1 = Person.parseFrom(b); // // Assert.assertEquals(p1, this.p); // // } // // // @Test // public void serializationRowToProtobufByDescriptorTest() throws IOException { // // File file = new File("./person.desc"); // // FileInputStream fis = new FileInputStream(file); // // byte[] descriptorBytes = new byte[(int) file.length()]; // // fis.read(descriptorBytes); // // ProtobufRowSerializationSchema s = new ProtobufRowSerializationSchema(descriptorBytes); // // byte[] b = s.serialize(this.r); // // Person p1 = Person.parseFrom(b); // // Assert.assertEquals(p1, this.p); // // } // // // @Test // public void seAndDeseProtobufRowerializationSchema() throws IOException, ClassNotFoundException { // // ProtobufRowSerializationSchema s = new ProtobufRowSerializationSchema(Person.class); // // ByteArrayOutputStream bros = new ByteArrayOutputStream(); // // ObjectOutputStream oos = new ObjectOutputStream(bros); // // oos.writeObject(s); // // byte[] b = bros.toByteArray(); // // ByteArrayInputStream bris = new ByteArrayInputStream(b); // // ObjectInputStream ois = new ObjectInputStream(bris); // // Object o = ois.readObject(); // // Assert.assertTrue(true); // // } // // // @Test // public void seAndDeseProtobufRowSerializationSchemaByDescriptor() throws IOException, ClassNotFoundException { // // File file = new File("./person.desc"); // // FileInputStream fis = new FileInputStream(file); // // byte[] descriptorBytes = new byte[(int) file.length()]; // // fis.read(descriptorBytes); // // ProtobufRowSerializationSchema ds = new ProtobufRowSerializationSchema(descriptorBytes); // // ByteArrayOutputStream bros = new ByteArrayOutputStream(); // // ObjectOutputStream oos = new ObjectOutputStream(bros); // // oos.writeObject(ds); // // byte[] b = bros.toByteArray(); // // ByteArrayInputStream bris = new ByteArrayInputStream(b); // // ObjectInputStream ois = new ObjectInputStream(bris); // // Object o = ois.readObject(); // // Assert.assertTrue(true); // // } // //} ================================================ FILE: flink-examples-1.13/src/test/java/flink/examples/sql/_06/calcite/CalciteTest.java ================================================ package flink.examples.sql._06.calcite; import java.util.List; import org.apache.calcite.plan.RelOptUtil; import org.apache.calcite.plan.RelTraitDef; import org.apache.calcite.rel.RelNode; import org.apache.calcite.schema.SchemaPlus; import org.apache.calcite.sql.parser.SqlParser; import org.apache.calcite.tools.FrameworkConfig; import org.apache.calcite.tools.Frameworks; import org.apache.calcite.tools.Programs; import org.apache.calcite.tools.RelBuilder; public class CalciteTest { public static void main(String[] args) { final FrameworkConfig config = config().build(); final RelBuilder builder = RelBuilder.create(config); final RelNode node = builder .scan("EMP") .build(); System.out.println(RelOptUtil.toString(node)); } public static Frameworks.ConfigBuilder config() { final SchemaPlus rootSchema = Frameworks.createRootSchema(true); return Frameworks.newConfigBuilder() .parserConfig(SqlParser.Config.DEFAULT) .traitDefs((List) null) .programs(Programs.heuristicJoinOrder(Programs.RULE_SET, true, 2)); } } ================================================ FILE: flink-examples-1.13/src/test/java/flink/examples/sql/_07/query/_06_joins/JaninoCompileTest.java ================================================ package flink.examples.sql._07.query._06_joins; import org.apache.flink.api.common.functions.RichFlatMapFunction; import org.apache.flink.table.runtime.collector.TableFunctionCollector; import flink.core.source.JaninoUtils; public class JaninoCompileTest { public static void main(String[] args) throws Exception { String s = "import java.util.List;\n" + "\n" + "public class BatchJoinTableFuncCollector$8 extends org.apache.flink.table.runtime.collector" + ".TableFunctionCollector {\n" + "\n" + " org.apache.flink.table.data.GenericRowData out = new org.apache.flink.table.data" + ".GenericRowData(2);\n" + " org.apache.flink.table.data.utils.JoinedRowData joinedRow$7 = new org.apache.flink.table.data" + ".utils.JoinedRowData();\n" + "\n" + " public BatchJoinTableFuncCollector$8(Object[] references) throws Exception {\n" + "\n" + " }\n" + "\n" + " @Override\n" + " public void open(org.apache.flink.configuration.Configuration parameters) throws Exception {\n" + "\n" + " }\n" + "\n" + " @Override\n" + " public void collect(Object record) throws Exception {\n" + " List l = (List) " + "getInput();\n" + " List r = (List) " + "record;\n" + "\n" + " for (int i = 0; i < l.size(); i++) {\n" + "\n" + " org.apache.flink.table.data.RowData in1 = (org.apache.flink.table.data.RowData) l.get(i);\n" + " org.apache.flink.table.data.RowData in2 = (org.apache.flink.table.data.RowData) r.get(i);\n" + "\n" + " org.apache.flink.table.data.binary.BinaryStringData field$5;\n" + " boolean isNull$5;\n" + " long field$6;\n" + " boolean isNull$6;\n" + " isNull$6 = in2.isNullAt(1);\n" + " field$6 = -1L;\n" + " if (!isNull$6) {\n" + " field$6 = in2.getLong(1);\n" + " }\n" + " isNull$5 = in2.isNullAt(0);\n" + " field$5 = org.apache.flink.table.data.binary.BinaryStringData.EMPTY_UTF8;\n" + " if (!isNull$5) {\n" + " field$5 = ((org.apache.flink.table.data.binary.BinaryStringData) in2.getString(0))" + ";\n" + " }\n" + "\n" + "\n" + "\n" + "\n" + "\n" + "\n" + " if (isNull$5) {\n" + " out.setField(0, null);\n" + " } else {\n" + " out.setField(0, field$5);\n" + " }\n" + "\n" + "\n" + "\n" + " if (isNull$6) {\n" + " out.setField(1, null);\n" + " } else {\n" + " out.setField(1, field$6);\n" + " }\n" + "\n" + "\n" + " joinedRow$7.replace(in1, out);\n" + " joinedRow$7.setRowKind(in1.getRowKind());\n" + " outputResult(joinedRow$7);\n" + " }\n" + "\n" + " }\n" + "\n" + " @Override\n" + " public void close() throws Exception {\n" + "\n" + " }\n" + "}"; Class c = JaninoUtils.genClass("BatchJoinTableFuncCollector$8", s, TableFunctionCollector.class); System.out.println(1); String s2 = "\n" + " public class JoinTableFuncCollector$8 extends org.apache.flink.table.runtime.collector" + ".TableFunctionCollector {\n" + "\n" + " org.apache.flink.table.data.GenericRowData out = new org.apache.flink.table.data" + ".GenericRowData(2);\n" + "org.apache.flink.table.data.utils.JoinedRowData joinedRow$7 = new org.apache.flink.table.data" + ".utils.JoinedRowData();\n" + "\n" + " public JoinTableFuncCollector$8(Object[] references) throws Exception {\n" + " \n" + " }\n" + "\n" + " @Override\n" + " public void open(org.apache.flink.configuration.Configuration parameters) throws Exception" + " {\n" + " \n" + " }\n" + "\n" + " @Override\n" + " public void collect(Object record) throws Exception {\n" + " org.apache.flink.table.data.RowData in1 = (org.apache.flink.table.data.RowData) getInput" + "();\n" + " org.apache.flink.table.data.RowData in2 = (org.apache.flink.table.data.RowData) record;\n" + " org.apache.flink.table.data.binary.BinaryStringData field$5;\n" + "boolean isNull$5;\n" + "long field$6;\n" + "boolean isNull$6;\n" + " isNull$6 = in2.isNullAt(1);\n" + "field$6 = -1L;\n" + "if (!isNull$6) {\n" + " field$6 = in2.getLong(1);\n" + "}\n" + "isNull$5 = in2.isNullAt(0);\n" + "field$5 = org.apache.flink.table.data.binary.BinaryStringData.EMPTY_UTF8;\n" + "if (!isNull$5) {\n" + " field$5 = ((org.apache.flink.table.data.binary.BinaryStringData) in2.getString(0));\n" + "}\n" + " \n" + " \n" + "\n" + "\n" + "\n" + "\n" + "if (isNull$5) {\n" + " out.setField(0, null);\n" + "} else {\n" + " out.setField(0, field$5);\n" + "}\n" + " \n" + "\n" + "\n" + "if (isNull$6) {\n" + " out.setField(1, null);\n" + "} else {\n" + " out.setField(1, field$6);\n" + "}\n" + " \n" + " \n" + "joinedRow$7.replace(in1, out);\n" + "joinedRow$7.setRowKind(in1.getRowKind());\n" + "outputResult(joinedRow$7);\n" + " \n" + " }\n" + "\n" + " @Override\n" + " public void close() throws Exception {\n" + " \n" + " }\n" + " }\n" + " "; Class c1 = JaninoUtils.genClass("JoinTableFuncCollector$8", s2, TableFunctionCollector.class); System.out.println(1); String s3 = "/* 1 */\n" + "/* 2 */ import java.util.LinkedList;\n" + "/* 3 */ import java.util.List;\n" + "/* 4 */ public class LookupFunction$4\n" + " /* 5 */ extends org.apache.flink.api.common.functions.RichFlatMapFunction {\n" + " /* 6 */\n" + " /* 7 */ private transient flink.examples.sql._03.source_sink.table.redis.v2.source" + ".RedisRowDataLookupFunction " + "function_flink$examples$sql$_03$source_sink$table$redis$v2$source$RedisRowDataLookupFunction$9a02959d27765bacc6e3b2107f2d01bc;\n" + " /* 8 */ private TableFunctionResultConverterCollector$2 resultConverterCollector$3 = " + "null;\n" + " /* 9 */\n" + " /* 10 */ public LookupFunction$4(Object[] references) throws Exception {\n" + " /* 11 */ " + "function_flink$examples$sql$_03$source_sink$table$redis$v2$source$RedisRowDataLookupFunction$9a02959d27765bacc6e3b2107f2d01bc = (((flink.examples.sql._03.source_sink.table.redis.v2.source.RedisRowDataLookupFunction) references[0]));\n" + " /* 12 */ }\n" + " /* 13 */\n" + " /* 14 */\n" + " /* 15 */\n" + " /* 16 */ @Override\n" + " /* 17 */ public void open(org.apache.flink.configuration.Configuration parameters) " + "throws Exception {\n" + " /* 18 */\n" + " /* 19 */ " + "function_flink$examples$sql$_03$source_sink$table$redis$v2$source$RedisRowDataLookupFunction$9a02959d27765bacc6e3b2107f2d01bc.open(new org.apache.flink.table.functions.FunctionContext(getRuntimeContext()));\n" + " /* 20 */\n" + " /* 21 */\n" + " /* 22 */ resultConverterCollector$3 = new TableFunctionResultConverterCollector$2" + "();\n" + " /* 23 */ resultConverterCollector$3.setRuntimeContext(getRuntimeContext());\n" + " /* 24 */ resultConverterCollector$3.open(new org.apache.flink.configuration" + ".Configuration());\n" + " /* 25 */\n" + " /* 26 */\n" + " /* 27 */ " + "function_flink$examples$sql$_03$source_sink$table$redis$v2$source$RedisRowDataLookupFunction$9a02959d27765bacc6e3b2107f2d01bc.setCollector(resultConverterCollector$3);\n" + " /* 28 */\n" + " /* 29 */ }\n" + " /* 30 */\n" + " /* 31 */ @Override\n" + " /* 32 */ public void flatMap(Object _in1, org.apache.flink.util.Collector c) throws " + "Exception {\n" + " /* 33 */ List l = (List) _in1;\n" + " /* 34 */ List list = new " + "LinkedList();\n" + " /* 35 */ for (int i = 0; i < l.size(); i++) {\n" + " /* 36 */\n" + " /* 37 */ org.apache.flink.table.data.RowData in1 = (org.apache.flink" + ".table.data.RowData) l.get(i);\n" + " /* 38 */\n" + " /* 39 */\n" + " /* 40 */ org.apache.flink.table.data.binary.BinaryStringData field$0;\n" + " /* 41 */ boolean isNull$0;\n" + " /* 42 */\n" + " /* 43 */ isNull$0 = in1.isNullAt(2);\n" + " /* 44 */ field$0 = org.apache.flink.table.data.binary.BinaryStringData" + ".EMPTY_UTF8;\n" + " /* 45 */ if (!isNull$0) {\n" + " /* 46 */ field$0 = ((org.apache.flink.table.data.binary" + ".BinaryStringData) in1.getString(2));\n" + " /* 47 */ }\n" + " /* 48 */\n" + " /* 49 */ list.add(field$0);\n" + " /* 50 */ }\n" + " /* 51 */\n" + " /* 52 */\n" + " /* 53 */ resultConverterCollector$3.setCollector(c);\n" + " /* 54 */\n" + " /* 55 */\n" + " /* 56 */ " + "function_flink$examples$sql$_03$source_sink$table$redis$v2$source$RedisRowDataLookupFunction$9a02959d27765bacc6e3b2107f2d01bc.eval((List) list);\n" + " /* 57 */\n" + " /* 58 */\n" + " /* 59 */ }\n" + " /* 60 */\n" + " /* 61 */ @Override\n" + " /* 62 */ public void close() throws Exception {\n" + " /* 63 */\n" + " /* 64 */ " + "function_flink$examples$sql$_03$source_sink$table$redis$v2$source$RedisRowDataLookupFunction$9a02959d27765bacc6e3b2107f2d01bc.close();\n" + " /* 65 */\n" + " /* 66 */ }\n" + " /* 67 */\n" + " /* 68 */\n" + " /* 69 */ public class TableFunctionResultConverterCollector$2 extends org.apache" + ".flink.table.runtime.collector.WrappingCollector {\n" + " /* 70 */\n" + " /* 71 */\n" + " /* 72 */\n" + " /* 73 */ public TableFunctionResultConverterCollector$2() throws Exception " + "{\n" + " /* 74 */\n" + " /* 75 */ }\n" + " /* 76 */\n" + " /* 77 */ @Override\n" + " /* 78 */ public void open(org.apache.flink.configuration.Configuration " + "parameters) throws Exception {\n" + " /* 79 */\n" + " /* 80 */ }\n" + " /* 81 */\n" + " /* 82 */ @Override\n" + " /* 83 */ public void collect(Object record) throws Exception {\n" + " /* 84 */ List externalResult$1 =" + " (List) record;\n" + " /* 85 */\n" + " /* 86 */\n" + " /* 87 */\n" + " /* 88 */\n" + " /* 89 */ if (externalResult$1 != null) {\n" + " /* 90 */ outputResult(externalResult$1);\n" + " /* 91 */ }\n" + " /* 92 */\n" + " /* 93 */ }\n" + " /* 94 */\n" + " /* 95 */ @Override\n" + " /* 96 */ public void close() {\n" + " /* 97 */ try {\n" + " /* 98 */\n" + " /* 99 */ } catch (Exception e) {\n" + " /* 100 */ throw new RuntimeException(e);\n" + " /* 101 */ }\n" + " /* 102 */ }\n" + " /* 103 */ }\n" + " /* 104 */\n" + " /* 105 */ }\n" + "/* 106 */ "; Class c3 = JaninoUtils.genClass("LookupFunction$4", s3, RichFlatMapFunction.class); System.out.println(1); } } ================================================ FILE: flink-examples-1.13/src/test/proto/person.proto ================================================ syntax = "proto3"; package flink; option java_package = "flink.examples.sql._05.format.formats.protobuf"; option java_outer_classname = "PersonOuterClassname"; option java_multiple_files = true; message Person { string name = 1; repeated string names = 2; int32 id = 3; repeated int32 ids = 4; int64 long = 5; repeated int64 longs = 6; map si_map = 7; map sl_map = 8; map sd_map = 9; Dog dog = 10; repeated Dog dogs = 11; enum ContactType { MOBILE = 0; MESSAGE = 1; WECHAT = 2; EMAIL = 3; } message Contact { string number = 1; ContactType contact_type = 2; } repeated Contact contacts = 12; } message Dog { string name = 1; int32 id = 2; } ================================================ FILE: flink-examples-1.13/src/test/scala/ScalaEnv.scala ================================================ import org.apache.flink.api.java.tuple.Tuple3 import org.apache.flink.api.scala._ import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment import org.apache.flink.table.api.bridge.scala.StreamTableEnvironment import org.apache.flink.table.api.{DataTypes, Schema} import org.apache.flink.types.Row // https://ci.apache.org/projects/flink/flink-docs-release-1.8/dev/table/udfs.html /** * https://blog.csdn.net/fct2001140269/article/details/84066274 * * https://ci.apache.org/projects/flink/flink-docs-release-1.8/dev/table/ * * https://blog.csdn.net/qq_35338741/article/details/108645832 */ object ScalaEnv { def main(args: Array[String]): Unit = { val env = StreamExecutionEnvironment.getExecutionEnvironment // create a TableEnvironment val tableEnv = StreamTableEnvironment.create(env) val source = env.fromCollection(scala.Iterator.apply(Tuple3.of(new String("2"), 1L, 1627218000000L), Tuple3.of(new String("2"), 101L, 1627218000000L + 6000L), Tuple3.of(new String("2"), 201L, 1627218000000L + 7000L), Tuple3.of(new String("2"), 301L, 1627218000000L + 7000L))) tableEnv.createTemporaryView("source_db.source_table" , source , Schema .newBuilder() .column("f0", DataTypes.STRING()) .column("f1", DataTypes.BIGINT()) .column("f2", DataTypes.BIGINT()) .build()) tableEnv.createFunction("hashCode" , classOf[TableFunc0]) val sql = "select * from source_db.source_table as a LEFT JOIN LATERAL TABLE(table1(a.f1)) AS DIM(status_new) ON TRUE" tableEnv.toDataStream(tableEnv.sqlQuery(sql), classOf[Row]).print() // execute env.execute() } } ================================================ FILE: flink-examples-1.13/src/test/scala/TableFunc0.scala ================================================ import org.apache.flink.table.functions.TableFunction case class SimpleUser(name: String, age: Int) class TableFunc0 extends TableFunction[SimpleUser] { // make sure input element's format is "" def eval(user: Long): Unit = { // if (user.contains("#")) { // // val splits = user.split("#") // // collect(SimpleUser(splits(0), splits(1).toInt)) // // } } } ================================================ FILE: flink-examples-1.14/pom.xml ================================================ flink-study com.github.antigeneral 1.0-SNAPSHOT 4.0.0 com.github.antigeneral flink-examples-1.14 1.14.0 ================================================ FILE: flink-examples-1.14/src/main/java/flink/examples/sql/_08/batch/HiveModuleV2.java ================================================ package flink.examples.sql._08.batch; import static org.apache.flink.util.Preconditions.checkArgument; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Optional; import java.util.Set; import org.apache.flink.annotation.VisibleForTesting; import org.apache.flink.table.catalog.hive.client.HiveShim; import org.apache.flink.table.catalog.hive.client.HiveShimLoader; import org.apache.flink.table.catalog.hive.factories.HiveFunctionDefinitionFactory; import org.apache.flink.table.functions.FunctionDefinition; import org.apache.flink.table.module.Module; import org.apache.flink.table.module.hive.udf.generic.GenericUDFLegacyGroupingID; import org.apache.flink.table.module.hive.udf.generic.HiveGenericUDFGrouping; import org.apache.flink.util.StringUtils; import org.apache.hadoop.hive.ql.exec.FunctionInfo; public class HiveModuleV2 implements Module { // a set of functions that shouldn't be overridden by HiveModule @VisibleForTesting static final Set BUILT_IN_FUNC_BLACKLIST = Collections.unmodifiableSet( new HashSet<>( Arrays.asList( "count", "cume_dist", "current_date", "current_timestamp", "dense_rank", "first_value", "lag", "last_value", "lead", "ntile", "rank", "row_number", "hop", "hop_end", "hop_proctime", "hop_rowtime", "hop_start", "percent_rank", "session", "session_end", "session_proctime", "session_rowtime", "session_start", "tumble", "tumble_end", "tumble_proctime", "tumble_rowtime", "tumble_start"))); private final HiveFunctionDefinitionFactory factory; private final String hiveVersion; private final HiveShim hiveShim; private Set functionNames; public HiveModuleV2() { this(HiveShimLoader.getHiveVersion()); } public HiveModuleV2(String hiveVersion) { checkArgument( !StringUtils.isNullOrWhitespaceOnly(hiveVersion), "hiveVersion cannot be null"); this.hiveVersion = hiveVersion; this.hiveShim = HiveShimLoader.loadHiveShim(hiveVersion); this.factory = new HiveFunctionDefinitionFactory(hiveShim); this.functionNames = new HashSet<>(); this.map = new HashMap<>(); } @Override public Set listFunctions() { // lazy initialize if (functionNames.isEmpty()) { functionNames = hiveShim.listBuiltInFunctions(); functionNames.removeAll(BUILT_IN_FUNC_BLACKLIST); functionNames.add("grouping"); functionNames.add(GenericUDFLegacyGroupingID.NAME); functionNames.addAll(map.keySet()); } return functionNames; } @Override public Optional getFunctionDefinition(String name) { if (BUILT_IN_FUNC_BLACKLIST.contains(name)) { return Optional.empty(); } // We override Hive's grouping function. Refer to the implementation for more details. if (name.equalsIgnoreCase("grouping")) { return Optional.of( factory.createFunctionDefinitionFromHiveFunction( name, HiveGenericUDFGrouping.class.getName())); } // this function is used to generate legacy GROUPING__ID value for old hive versions if (name.equalsIgnoreCase(GenericUDFLegacyGroupingID.NAME)) { return Optional.of( factory.createFunctionDefinitionFromHiveFunction( name, GenericUDFLegacyGroupingID.class.getName())); } Optional info = hiveShim.getBuiltInFunctionInfo(name); if (info.isPresent()) { return info.map( functionInfo -> factory.createFunctionDefinitionFromHiveFunction( name, functionInfo.getFunctionClass().getName())); } else { return Optional.ofNullable(this.map.get(name)) .map(hiveUDFClassName -> factory.createFunctionDefinitionFromHiveFunction(name, hiveUDFClassName)); } } public String getHiveVersion() { return hiveVersion; } private final Map map; public void registryHiveUDF(String hiveUDFName, String hiveUDFClassName) { this.map.put(hiveUDFName, hiveUDFClassName); } } ================================================ FILE: flink-examples-1.14/src/main/java/flink/examples/sql/_08/batch/Test.java ================================================ package flink.examples.sql._08.batch; import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.RuntimeExecutionMode; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.typeutils.ResultTypeQueryable; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.api.functions.source.SourceFunction; import org.apache.flink.table.api.SqlDialect; import org.apache.flink.table.api.Table; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; import org.apache.flink.table.catalog.hive.HiveCatalog; import org.apache.flink.table.module.CoreModule; import org.apache.flink.types.Row; /** * hadoop 启动:/usr/local/Cellar/hadoop/3.2.1/sbin/start-all.sh * http://localhost:9870/ * http://localhost:8088/cluster *

* hive 启动:$HIVE_HOME/bin/hive --service metastore & * hive cli:$HIVE_HOME/bin/hive */ public class Test { public static void main(String[] args) { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.setRestartStrategy(RestartStrategies.failureRateRestart(6, org.apache.flink.api.common.time.Time .of(10L, TimeUnit.MINUTES), org.apache.flink.api.common.time.Time.of(5L, TimeUnit.SECONDS))); env.getConfig().setGlobalJobParameters(parameterTool); env.setParallelism(1); // ck 设置 env.getCheckpointConfig().setFailOnCheckpointingErrors(false); env.enableCheckpointing(30 * 1000L, CheckpointingMode.EXACTLY_ONCE); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3L); env.getCheckpointConfig() .enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); env.setRuntimeMode(RuntimeExecutionMode.BATCH); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env); tEnv.getConfig().getConfiguration().setString("pipeline.name", "1.14.0 Interval Outer Join 事件时间案例"); String defaultDatabase = "default"; String hiveConfDir = "/usr/local/Cellar/hive/3.1.2/libexec/conf"; HiveCatalog hive = new HiveCatalog("default", defaultDatabase, hiveConfDir); tEnv.registerCatalog("default", hive); tEnv.getConfig().setSqlDialect(SqlDialect.DEFAULT); // set the HiveCatalog as the current catalog of the session tEnv.useCatalog("default"); String version = "3.1.2"; tEnv.unloadModule("core"); HiveModuleV2 hiveModuleV2 = new HiveModuleV2(version); tEnv.loadModule("default", hiveModuleV2); tEnv.loadModule("core", CoreModule.INSTANCE); String sql3 = "" + "with tmp as (" + "" + "select count(1) as part_pv\n" + " , max(order_amount) as part_max\n" + " , min(order_amount) as part_min\n" + " from hive_table\n" + " where p_date between '20210920' and '20210920'\n" + ")\n" + "select * from tmp"; Table t = tEnv.sqlQuery(sql3); DataStream r = env.addSource(new UserDefinedSource()); tEnv.createTemporaryView("test", r); tEnv.executeSql("select * from test") .print(); } private static class UserDefinedSource implements SourceFunction, ResultTypeQueryable { private volatile boolean isCancel; @Override public void run(SourceContext sourceContext) throws Exception { int i = 0; while (!this.isCancel) { sourceContext.collect(Row.of("a" + i, "b", 1L)); Thread.sleep(10L); i++; if (i == 100) { this.isCancel = true; } } } @Override public void cancel() { this.isCancel = true; } @Override public TypeInformation getProducedType() { return new RowTypeInfo(new TypeInformation[] { TypeInformation.of(String.class) , TypeInformation.of(String.class) , TypeInformation.of(Long.class) }, new String[] {"a", "b", "c"}); } } } ================================================ FILE: flink-examples-1.8/.gitignore ================================================ HELP.md target/ !.mvn/wrapper/maven-wrapper.jar !**/src/main/** #**/src/test/** .idea/ *.iml *.DS_Store ### IntelliJ IDEA ### .idea *.iws *.ipr ================================================ FILE: flink-examples-1.8/pom.xml ================================================ flink-study com.github.antigeneral 1.0-SNAPSHOT 4.0.0 com.github.antigeneral flink-examples-1.8 kr.motd.maven os-maven-plugin ${os-maven-plugin.version} org.apache.maven.plugins maven-compiler-plugin 8 8 org.xolstice.maven.plugins protobuf-maven-plugin ${protobuf-maven-plugin.version} src/test/proto com.google.protobuf:protoc:3.1.0:exe:${os.detected.classifier} grpc-java io.grpc:protoc-gen-grpc-java:${grpc-plugin.version}:exe:${os.detected.classifier} compile compile-custom net.alchim31.maven scala-maven-plugin 3.3.1 scala-compile-first process-resources compile scala-test-compile process-test-resources testCompile -Xms128m -Xmx512m org.codehaus.mojo build-helper-maven-plugin add-source generate-sources add-source src/main/scala add-test-source generate-test-sources add-test-source src/test/java src/test/scala org.apache.maven.plugins maven-eclipse-plugin 2.8 true org.scala-ide.sdt.core.scalanature org.eclipse.jdt.core.javanature org.scala-ide.sdt.core.scalabuilder org.scala-ide.sdt.launching.SCALA_CONTAINER org.eclipse.jdt.launching.JRE_CONTAINER org.scala-lang:scala-library org.scala-lang:scala-compiler **/*.scala **/*.java 2.1.1 2.11.12 1.8.0 1.18.20 2.11 2.4.12.Final 2.12.0 2.1.1 2.5.7 2.2.4 30.1.1-jre 2.0.0 1.2.3 1.8.0-beta2 1.23.1 0.6.1 3.11.0 2.5 1.6.2 1.8 1.8 ================================================ FILE: pom.xml ================================================ 4.0.0 com.github.antigeneral flink-study 1.0-SNAPSHOT flink-examples-1.8 flink-examples-1.12 flink-examples-1.13 flink-examples-1.10 flink-examples-1.14 pom UTF-8 1.8 1.8 1.7.25 1.2.3 3.7.0 4.0.1 1.13.5 2.1.1 1.18.6 3.6 1.1.0 0.0.2 3.2.1 2.0.5 2.14 1.15.0 2.1.5 2.10.0 2.2.0.RELEASE 1.18.20 2.11 2.4.12.Final 2.12.0 2.8.0 2.5.7 2.2.4 30.1.1-jre 2.0.0 1.2.3 1.8.0-beta2 1.23.1 0.6.1 3.11.0 2.5 1.6.2 2.12.4 0.9.12 1.27.0 3.6.3 7.0.10 4.13.2 3.1.2 8.0.17 org.apache.flink flink-connector-hive_2.11 ${flink.version} org.apache.hive hive-exec ${hive.version} com.twitter chill-protobuf 0.7.6 com.esotericsoftware.kryo kryo net.mguenther.kafka kafka-junit 2.8.0 org.apache.kafka kafka_2.13 ${kafka.version} junit junit ${junit.version} test net.java.dev.javacc javacc ${javacc.version} elastic-job-common-core com.dangdang ${elastic-job.version} elastic-job-lite-core com.dangdang ${elastic-job.version} elastic-job-lite-spring com.dangdang ${elastic-job.version} elastic-job-cloud-executor com.dangdang ${elastic-job.version} org.apache.curator curator-test ${curator.version} org.springframework spring-context ${springframework.version} org.springframework.boot spring-boot-starter ${spring-boot.version} org.springframework.boot spring-boot-starter-actuator ${spring-boot.version} org.springframework.boot spring-boot-starter-web ${spring-boot.version} org.springframework.boot spring-boot-configuration-processor ${spring-boot.version} true org.springframework.boot spring-boot-starter-jdbc ${spring-boot.version} org.slf4j jcl-over-slf4j ${slf4j.version} org.slf4j log4j-over-slf4j ${slf4j.version} org.slf4j slf4j-api ${slf4j.version} in.zapr.druid druidry ${druidry.version} org.apache.hbase hbase-client ${hbase.version} org.apache.hadoop hadoop-common ${hadoop.version} slf4j-log4j12 org.slf4j jsr311-api javax.ws.rs jersey-core com.sun.jersey jersey-server com.sun.jersey jersey-servlet com.sun.jersey jersey-json com.sun.jersey org.apache.hadoop hadoop-client ${hadoop.version} org.apache.hadoop hadoop-hdfs ${hadoop.version} jsr311-api javax.ws.rs jersey-core com.sun.jersey jersey-server com.sun.jersey org.apache.hadoop hadoop-mapreduce-client-core ${hadoop.version} slf4j-log4j12 org.slf4j jersey-client com.sun.jersey jersey-server com.sun.jersey jersey-servlet com.sun.jersey jersey-core com.sun.jersey jersey-json com.sun.jersey org.apache.hadoop hadoop-auth ${hadoop.version} slf4j-log4j12 org.slf4j org.apache.flink flink-streaming-java_2.11 ${flink.version} org.apache.flink flink-statebackend-rocksdb_2.11 ${flink.version} org.apache.flink flink-clients_2.11 ${flink.version} org.apache.flink flink-clients_2.12 ${flink.version} org.apache.flink flink-connector-kafka-0.10_2.12 ${flink.version} org.apache.flink flink-connector-filesystem_2.12 ${flink.version} org.apache.flink flink-core ${flink.version} org.apache.zookeeper zookeeper ${zookeeper.version} slf4j-log4j12 org.slf4j log4j log4j org.apache.kafka kafka-clients ${kafka.version} org.projectlombok lombok ${lombok.version} org.apache.commons commons-lang3 ${common-lang3.version} org.apache.curator curator-recipes ${curator-recipes.version} ch.qos.logback logback-classic ${logback.version} org.slf4j slf4j-api io.github.resilience4j resilience4j-retry ${resilience4j.version} io.github.resilience4j resilience4j-circuitbreaker ${resilience4j.version} io.github.resilience4j resilience4j-ratelimiter ${resilience4j.version} io.github.resilience4j resilience4j-bulkhead ${resilience4j.version} io.github.resilience4j resilience4j-annotations ${resilience4j.version} io.github.resilience4j resilience4j-timelimiter ${resilience4j.version} commons-dbcp commons-dbcp ${commons-dbcp.version} com.h2database h2 ${h2.version} mysql mysql-connector-java ${mysql.version} org.apache.httpcomponents httpclient 4.5.10 compile joda-time joda-time provided true ${joda-time.version} com.google.protobuf protobuf-java ${protobuf-java.version} com.github.rholder guava-retrying ${guava.retrying.version} com.google.guava guava ${guava.version} org.projectlombok lombok ${lombok.version} org.apache.flink flink-java ${flink.version} org.apache.flink flink-streaming-java_2.11 ${flink.version} org.apache.flink flink-clients_2.11 ${flink.version} org.mvel mvel2 ${mvel2.version} redis.clients jedis ${jedis.version} org.apache.curator curator-framework ${curator.version} org.apache.curator curator-recipes ${curator.version} org.codehaus.groovy groovy ${groovy.version} org.codehaus.groovy groovy-ant ${groovy.version} org.codehaus.groovy groovy-cli-commons ${groovy.version} org.codehaus.groovy groovy-cli-picocli ${groovy.version} org.codehaus.groovy groovy-console ${groovy.version} org.codehaus.groovy groovy-datetime ${groovy.version} org.codehaus.groovy groovy-docgenerator ${groovy.version} org.codehaus.groovy groovy-groovydoc ${groovy.version} org.codehaus.groovy groovy-groovysh ${groovy.version} org.codehaus.groovy groovy-jmx ${groovy.version} org.codehaus.groovy groovy-json ${groovy.version} org.codehaus.groovy groovy-jsr223 ${groovy.version} org.codehaus.groovy groovy-macro ${groovy.version} org.codehaus.groovy groovy-nio ${groovy.version} org.codehaus.groovy groovy-servlet ${groovy.version} org.codehaus.groovy groovy-sql ${groovy.version} org.codehaus.groovy groovy-swing ${groovy.version} org.codehaus.groovy groovy-templates ${groovy.version} org.codehaus.groovy groovy-test ${groovy.version} org.codehaus.groovy groovy-test-junit5 ${groovy.version} org.codehaus.groovy groovy-testng ${groovy.version} org.codehaus.groovy groovy-xml ${groovy.version} org.apache.flink flink-table-planner_2.11 ${flink.version} com.google.code.gson gson ${gson.version} org.apache.flink flink-table-common ${flink.version} compile org.apache.flink flink-table-api-java ${flink.version} compile org.apache.flink flink-table-api-java-bridge_2.11 ${flink.version} compile org.apache.flink flink-table-planner-blink_2.11 ${flink.version} compile org.apache.flink flink-connector-jdbc_2.11 ${flink.version} org.apache.flink flink-connector-hbase-2.2_2.11 ${flink.version} org.apache.flink flink-streaming-scala_2.11 ${flink.version} org.apache.flink flink-json ${flink.version} org.apache.bahir flink-connector-redis_2.10 1.0 org.apache.flink flink-connector-kafka_2.12 ${flink.version} ch.qos.logback logback-classic compile ${logback-classic.version} org.slf4j slf4j-log4j12 ${slf4j-log4j12.version} org.apache.flink flink-runtime-web_2.11 ${flink.version} com.fasterxml.jackson.core jackson-databind ${jackson.version} com.fasterxml.jackson.core jackson-core ${jackson.version} com.fasterxml.jackson.core jackson-annotations ${jackson.version} com.fasterxml.jackson.module jackson-module-kotlin ${jackson.version} com.fasterxml.jackson.module jackson-module-parameter-names ${jackson.version} com.fasterxml.jackson.datatype jackson-datatype-guava ${jackson.version} com.hubspot.jackson jackson-datatype-protobuf ${jackson-datatype-protobuf.version} org.apache.calcite calcite-core ${calcite.version} org.apache.maven.plugins maven-compiler-plugin 8 8 org.xolstice.maven.plugins protobuf-maven-plugin ${protobuf-maven-plugin.version} src/test/proto src/main/proto com.google.protobuf:protoc:3.1.0:exe:${os.detected.classifier} grpc-java io.grpc:protoc-gen-grpc-java:${grpc-plugin.version}:exe:${os.detected.classifier} compile compile-custom org.apache.maven.plugins maven-dependency-plugin unpack-parser-template initialize unpack org.apache.calcite calcite-core jar true ${project.build.directory}/ **/Parser.jj maven-resources-plugin copy-fmpp-resources initialize copy-resources ${project.build.directory}/codegen src/main/codegen false com.googlecode.fmpp-maven-plugin fmpp-maven-plugin 1.0 org.freemarker freemarker 2.3.28 generate-fmpp-sources generate-sources generate ${project.build.directory}/codegen/config.fmpp target/generated-sources ${project.build.directory}/codegen/templates org.codehaus.mojo javacc-maven-plugin 2.4 generate-sources javacc javacc ${project.build.directory}/generated-sources/ **/Simple1.jj 1 false ${project.build.directory}/generated-sources/ org.apache.maven.plugins maven-surefire-plugin 1 false