Repository: walmartlabs/bigben Branch: master Commit: 61f4760a6617 Files: 99 Total size: 405.4 KB Directory structure: gitextract_bc7hdl64/ ├── .gitignore ├── .looper.yml ├── LICENSE.txt ├── README.md ├── app/ │ ├── LICENSE.txt │ ├── pom.xml │ └── src/ │ ├── main/ │ │ ├── kotlin/ │ │ │ └── com/ │ │ │ └── walmartlabs/ │ │ │ └── bigben/ │ │ │ └── app/ │ │ │ ├── app.kt │ │ │ └── run.kt │ │ └── resources/ │ │ ├── application.conf │ │ ├── bigben-lifecycle.yaml │ │ ├── bigben.yaml │ │ └── log4j.xml │ └── test/ │ ├── kotlin/ │ │ └── com/ │ │ └── walmartlabs/ │ │ └── bigben/ │ │ └── tests/ │ │ ├── APITests.kt │ │ ├── BigBenTests.kt │ │ └── KafkaTests.kt │ └── resources/ │ ├── bigben-api-test.yaml │ ├── bigben-kafka-test.yaml │ ├── bigben-test.yaml │ └── log4j.xml ├── build/ │ ├── configs/ │ │ ├── log4j.xml │ │ └── overrides.yaml │ ├── docker/ │ │ ├── Dockerfile │ │ ├── app_run.sh │ │ ├── cassandra_run.sh │ │ ├── cleanup.sh │ │ ├── deploy.sh │ │ ├── docker-compose.yml │ │ ├── docker_build.sh │ │ ├── single_node_run.sh │ │ └── start.sh │ └── exec/ │ ├── app_run.sh │ ├── build.sh │ └── cleanup.sh ├── cassandra/ │ ├── LICENSE.txt │ ├── pom.xml │ └── src/ │ ├── main/ │ │ ├── kotlin/ │ │ │ └── com/ │ │ │ └── walmartlabs/ │ │ │ └── bigben/ │ │ │ └── providers/ │ │ │ └── domain/ │ │ │ └── cassandra/ │ │ │ ├── CassandraModule.kt │ │ │ ├── ClusterConfig.kt │ │ │ ├── Entities.kt │ │ │ └── codecs.kt │ │ └── resources/ │ │ └── bigben-schema.cql │ └── test/ │ ├── kotlin/ │ │ └── com/ │ │ └── walmartlabs/ │ │ └── bigben/ │ │ └── cassandra/ │ │ └── tests/ │ │ ├── IntegrationTests.kt │ │ └── ORMTests.kt │ └── resources/ │ ├── bigben-test.yaml │ ├── log4j.xml │ └── testng.xml ├── commons/ │ ├── LICENSE.txt │ ├── pom.xml │ └── src/ │ ├── main/ │ │ └── kotlin/ │ │ └── com/ │ │ └── walmartlabs/ │ │ └── bigben/ │ │ └── utils/ │ │ ├── _extns.kt │ │ ├── _future_extns.kt │ │ ├── commons/ │ │ │ ├── ListenableFutureAdapter.kt │ │ │ ├── Props.kt │ │ │ ├── TaskExecutor.kt │ │ │ └── modules.kt │ │ └── hz/ │ │ ├── ClusterSingleton.kt │ │ ├── Hz.kt │ │ └── Service.kt │ └── test/ │ ├── kotlin/ │ │ └── PropsTests.kt │ └── resources/ │ ├── a.yaml │ ├── b.yaml │ ├── log4j.xml │ ├── overrides.yaml │ ├── props.yaml │ ├── sub1-overrides.yaml │ └── sub1.yaml ├── cron/ │ ├── pom.xml │ └── src/ │ └── main/ │ └── kotlin/ │ └── com/ │ └── walmartlabs/ │ └── bigben/ │ └── cron/ │ ├── cron-hz.kt │ ├── cron-processors.kt │ └── cron.kt ├── kafka/ │ ├── LICENSE.txt │ ├── pom.xml │ └── src/ │ └── main/ │ └── kotlin/ │ └── com/ │ └── walmartlabs/ │ └── bigben/ │ └── kafka/ │ ├── kafka-mocks.kt │ ├── kafka-module.kt │ ├── kafka-processor.kt │ └── kafka-producer.kt ├── lib/ │ ├── LICENSE.txt │ ├── pom.xml │ └── src/ │ └── main/ │ ├── kotlin/ │ │ └── com/ │ │ └── walmartlabs/ │ │ └── bigben/ │ │ ├── BigBen.kt │ │ ├── api/ │ │ │ ├── EventReceiver.kt │ │ │ └── EventService.kt │ │ ├── core/ │ │ │ ├── BucketManager.kt │ │ │ ├── BucketSnapshot.kt │ │ │ ├── BucketsLoader.kt │ │ │ ├── ScheduleScanner.kt │ │ │ └── StatusSyncer.kt │ │ ├── entities/ │ │ │ ├── EntityProvider.kt │ │ │ └── entities.kt │ │ ├── extns/ │ │ │ ├── _api_response_extns.kt │ │ │ ├── _bigben_extns.kt │ │ │ ├── _do_extns.kt │ │ │ └── _time_extns.kt │ │ ├── hz/ │ │ │ ├── BucketStore.kt │ │ │ └── HzObjectFactory.kt │ │ ├── modules.kt │ │ ├── processors/ │ │ │ ├── no_ops.kt │ │ │ └── processors.kt │ │ └── tasks/ │ │ └── tasks.kt │ └── resources/ │ └── hz.template.xml ├── pom.xml └── run_bigben_standalone.sh ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitignore ================================================ **/target/ # Mobile Tools for Java (J2ME) .mtj.tmp/ # Package Files # *.ear # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml hs_err_pid* pom.xml.tag pom.xml.releaseBackup pom.xml.versionsBackup pom.xml.next release.properties dependency-reduced-pom.xml buildNumber.properties .DS_Store .idea/ .modules/ app/app.iml cassandra/bigben-cassandra.iml commons/bigben-commons.iml cron/cron.iml kafka/bigben-kafka.iml lib/bigben-lib.iml ================================================ FILE: .looper.yml ================================================ tools: jdk: 8 maven: 3.5.2 triggers: - manual: Run default - manual: name: Release Build call: release flows: default: - call: versionsCheck - call: build pr: - echo "Running build for $GITHUB_PR_URL" - call: versionsCheck - (name Maven build) mvn -B clean install versionsCheck: - (name JDK Version) java -version - (name Maven version) mvn -v build: - exposeVars(maven) - (name Project information) echo "Building ${MAVEN_GROUP_ID}:${MAVEN_ARTIFACT_ID}:${MAVEN_VERSION}" - (name Maven deploy) mvn -B -DskipTests -Darguments=-DskipTests clean deploy release: - call: versionsCheck - (name Maven release) mvn -B -DskipTests -Darguments=-DskipTests clean release:prepare release:perform ================================================ FILE: LICENSE.txt ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2018 Sandeep Malik Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: README.md ================================================ *** # NOTICE: ## This repository has been archived and is not supported. [![No Maintenance Intended](http://unmaintained.tech/badge.svg)](http://unmaintained.tech/) *** NOTICE: SUPPORT FOR THIS PROJECT HAS ENDED This projected was owned and maintained by Walmart. This project has reached its end of life and Walmart no longer supports this project. We will no longer be monitoring the issues for this project or reviewing pull requests. You are free to continue using this project under the license terms or forks of this project at your own risk. This project is no longer subject to Walmart's bug bounty program or other security monitoring. ## Actions you can take We recommend you take the following action: * Review any configuration files used for build automation and make appropriate updates to remove or replace this project * Notify other members of your team and/or organization of this change * Notify your security team to help you evaluate alternative options ## Forking and transition of ownership For [security reasons](https://www.theregister.co.uk/2018/11/26/npm_repo_bitcoin_stealer/), Walmart does not transfer the ownership of our primary repos on Github or other platforms to other individuals/organizations. Further, we do not transfer ownership of packages for public package management systems. If you would like to fork this package and continue development, you should choose a new name for the project and create your own packages, build automation, etc. Please review the licensing terms of this project, which continue to be in effect even after decommission. # BigBen `BigBen` is a generic, multi-tenant, time-based event scheduler and cron scheduling framework based on `Cassandra` and `Hazelcast` It has following features: * **Distributed** - `BigBen` uses a distributed design and can be deployed on 10's or 100's of machines and can be dc-local or cross-dc * **Horizontally scalable** - `BigBen` scales linearly with the number of machines. * **Fault tolerant** - `BigBen` employs a number of failure protection modes and can withstand arbitrary prolonged down times * **Performant** - `BigBen` can easily scale to 10,000's or even millions's of event triggers with a very small cluster of machines. It can also easily manage million's of crons running in a distributed manner * **Highly Available** - As long as a single machine is available in the cluster, `BigBen` will guarantee the execution of events (albeit with a lower throughput) * **Extremely consistent** - `BigBen` employs a single master design (the master itself is highly available with `n-1` masters on standby in an `n` cluster machine) to ensure that no two nodes fire the same event or execute the same cron. * **NoSql based** - `BigBen` comes with default implementation with `Cassandra` but can be easily extended to support other `NoSql` or even `RDBMS` data stores * **Auditable** - `BigBen` keeps a track of all the events fired and crons executed with a configurable retention * **Portable, cloud friendly** - `BigBen` comes as application bundled as `war` or an embedded lib as `jar`, and can be deployed on any cloud, `on-prem` or `public` ## Use cases `BigBen` can be used for a variety of time based workloads, both single trigger based or repeating crons. Some of the use cases can be * **Delayed execution** - E.g. if a job is to be executed 30 mins from now * **System retries** - E.g. if a service A wants to call service B and service B is down at the moment, then service A can schedule an exponential backoff retry strategy with retry intervals of 1 min, 10 mins, 1 hour, 12 hours, and so on. * **Timeout tickers** - E.g. if service A sends a message to service B via `Kafka` and expects a response in 1 min, then it can schedule a `timeout check` event to be executed after 1 min * **Polling services** - E.g. if service A wants to poll service B at some frequency, it can schedule a cron to be executed at some specified frequency * **Notification Engine** - `BigBen` can be used to implement `notification engine` with scheduled deliveries, scheduled polls, etc * **Workflow state machine** - `BigBen` can be used to implement a distributed `workflow` with state suspensions, alerts and monitoring of those suspensions. ## Architectural Goals `BigBen` was designed to achieve the following goals: * Uniformly distributed storage model * Resilient to hot spotting due to sudden surge in traffic * Uniform execution load profile in the cluster * Ensure that all nodes have similar load profiles to minimize misfires * Linear Horizontal Scaling * Lock-free execution * Avoid resource contentions * Plugin based architecture to support variety of data bases like `Cassandra, Couchbase, Solr Cloud, Redis, RDBMS`, etc * Low maintenance, elastic scaling ## Design and architecture See the blog published at [Medium](https://medium.com/walmartlabs/an-approach-to-designing-distributed-fault-tolerant-horizontally-scalable-event-scheduler-278c9c380637) for a full description of various design elements of `BigBen` ## Events Inflow `BigBen` can receive events in two modes: * **kafka** - inbound and outbound Kafka topics to consume event requests and publish event triggers * **http** - HTTP APIs to send event requests and HTTP APIs to receive event triggers. *It is strongly recommended to use `kafka` for better scalability* ### Event Inflow diagram ![inflow](/docs/assets/inflow.png "Events Inflow diagram") *Request and Response channels can be mixed. For example, the event requests can be sent through HTTP APIs but the event triggers (response) can be received through a Kafka Topic.* ## Event processing guarantees `BigBen` has a robust event processing guarantees to survive various failures. However, `event-processing` is not same as `event-acknowledgement`. `BigBen` works in a no-acknowledgement mode (*at least for now*). Once an event is triggered, it is either published to `Kafka` or sent through an `HTTP API`. Once the `Kafka` producer returns success, or `HTTP API` returns non-500 status code, the event is **assumed** to be processed and marked as such in the system. However, for whatever reason if the event was not processed and resulted in an error (e.g. `Kafka` producer timing out, or `HTTP API` throwing `503`), then the event will be retried multiple times as per the strategies discussed below ### Event misfire strategy Multiple scenarios can cause `BigBen` to be not able to trigger an event on time. Such scenarios are called misfires. Some of them are: * `BigBen`'s internal components are down during event trigger. E.g. * `BigBen`'s data store is down and events could not be fetched * `VMs` are down * `Kafka` Producer could not publish due to loss of partitions / brokers or any other reasons * `HTTP API` returned a 500 error code * Any other unexpected failure In any of these cases, the event is first retried in memory using an exponential back-off strategy. Following parameters control the retry behavior: * _event.processor.max.retries_ - how many in-memory retries will be made before declaring the event as error, default is 3 * _event.processor.initial.delay_ - how long in seconds the system should wait before kicking in the retry, default is 1 second * _event.processor.backoff.multiplier_ - the back off multiplier factor, default is 2. E.g. the intervals would be 1 second, 2 seconds, 4 seconds. If the event still is not processed, then the event is marked as `ERROR`. All the events marked `ERROR` are retried up to a configured limit called `events.backlog.check.limit`. This value can be an arbitrary amount of time, e.g. 1 day, 1 week, or even 1 year. E.g. if the the limit is set at `1 week` then any event failures will be retried for `1 week` after which, they will be permanently marked as `ERROR` and ignored. The `events.backlog.check.limit` can be changed at any time by changing the value in `bigben.yaml` file and bouncing the servers. ### Event bucketing and shard size `BigBen` shards events by minutes. However, since it's not known in advance how many events will be scheduled in a given minute, the buckets are further sharded by a pre defined shard size. The shard size is a design choice that needs to be made before deployment. Currently, it's not possible to change the shard size once defined. An undersized shard value has minimal performance impact, however an oversized shard value may keep some machines idling. The default value of `1000` is good enough for most practical purposes as long as number of events to be scheduled per minute exceed `1000 x n`, where `n` is the number of machines in the cluster. If the events to be scheduled are much less than `1000` then a smaller shard size may be chosen. ### Multi shard parallel processing Each bucket with all its shards is distributed across the cluster for execution with an algorithm that ensures a random and uniform distribution. The following diagram shows the execution flow. ![shard design](https://cdn-images-1.medium.com/max/1600/1*euaHLOnw6G96SigfXxWhtA.png "BigBen processing flow") ### Multi-tenancy Multiple tenants can use `BigBen` in parallel. Each one can configure how the events will be delivered once triggered. Tenant 1 can configure the events to be delivered in `kafka` topic `t1`, where as tenant 2 can have them delivered via a specific `http` url. The usage of tenants will become more clearer with the below explanation of `BigBen` APIs ## Docker support BigBen is dockerized and image (`bigben`) is available on docker hub. The code also contains scripts, which start `cassandra`, `hazelcast` and `app`. To quickly set up the application for local dev testing, do the following steps: 1. `git clone $repo` 2. `cd bigben/build/docker` 3. execute `./docker_build.sh` 4. start cassandra container by executing `./cassandra_run.sh` 5. start app by executing `./app_run.sh` 6. To run multiple app nodes `export NUM_INSTANCES=3 && ./app_run.sh` 6. wait for application to start on port `8080` 7. verify that `curl http://localhost:8080/ping` returns `200` 8. Use `./cleanup.sh` to stop and remove all `BigBen` related containers ## Non-docker execution `BigBen` can be run without docker as well. Following are the steps 1. `git clone $repo` 2. `cd bigben/build/exec` 3. execute `./build.sh` 4. execute `./app_run.sh` ## Env properties You can set the following environment properties 1. `APP_CONTAINER_NAME` (default bigben_app) 2. `SERVER_PORT` (default 8080) 3. `HZ_PORT` (default 5701) 4. `NUM_INSTANCES` (default 1) 5. `LOGS_DIR` (default bigben/../bigben_logs) 6. `CASSANDRA_SEED_IPS` (default $HOST_IP) 7. `HZ_MEMBER_IPS` (default $HOST_IP) 8. `JAVA_OPTS` #How to override default config values? `BigBen` employs an extensive override system to allow someone to override the default properties. The order of priority is system properties > system env variables > overrides > defaults The overrides can be defined in `config/overrides.yaml` file. The `log4j.xml` can also be changed to change log behavior without recompiling binaries ## How to setup `Cassandra` for `BigBen`? Following are the steps to set up `Cassandra`: 1. git clone the `master` branch 2. Set up a Cassandra cluster 3. create a keyspace `bigben` in `Cassandra` cluster with desired replication 4. Open the file `bigben-schema.cql` and execute `cqlsh -f bigben-schema.cql` ## APIs ### cluster `GET /events/cluster` * response sample (a 3 node cluster running on single machine and three different ports (5701, 5702, 5703)): ```json { "[127.0.0.1]:5702": "Master", "[127.0.0.1]:5701": "Slave", "[127.0.0.1]:5703": "Slave" } ``` The node marked `Master` is the master node that does the scheduling. ### tenant registration A tenant can be registered by calling the following API `POST /events/tenant/register` * payload schema ```json { "$schema": "http://json-schema.org/draft-04/schema#", "type": "object", "properties": { "tenant": { "type": "string" }, "type": { "type": "string" }, "props": { "type": "object" } }, "required": [ "tenant", "type", "props" ] } ``` * `tenant` - specifies a tenant and can be any arbitrary value. * `type` - specifies the type of `tenant`. One of the three types can be used * MESSAGING - specifies that `tenant` wants events delivered via a messaging queue. Currently, `kafka` is the only supported messaging system. * HTTP - specifies that `tenant` wants events delivered via an http callback URL. * CUSTOM_CLASS - specifies a custom event processor implemented for custom processing of events * `props` - A bag of properties needed for each type of tenant. * kafka sample: ```json { "tenant": "TenantA/ProgramB/EnvC", "type": "MESSAGING", "props": { "topic": "some topic name", "bootstrap.servers": "node1:9092,node2:9092" } } ``` * http sample ```json { "tenant": "TenantB/ProgramB/EnvC", "type": "HTTP", "props": { "url": "http://someurl", "headers": { "header1": "value1", "header2": "value2" } } } ``` ### fetch all tenants: `GET /events/tenants` ### event scheduling `POST /events/schedule` `Payload - List` `EventRequest` schema: ```json { "$schema": "http://json-schema.org/draft-04/schema#", "type": "object", "properties": { "id": { "type": "string" }, "eventTime": { "type": "string", "description": "An ISO-8601 formatted timestamp e.g. 2018-01-31T04:00.00Z" }, "tenant": { "type": "string" }, "payload": { "type": "string", "description": "an optional event payload, must NOT be null with deliveryOption = PAYLOAD_ONLY" }, "mode": { "type": "string", "enum": ["UPSERT", "REMOVE"], "default": "UPSERT", "description": "Use REMOVE to delete an event, UPSERT to add/update an event" }, "deliveryOption": { "type": "string", "enum": ["FULL_EVENT", "PAYLOAD_ONLY"], "default": "FULL_EVENT", "description": "Use FULL_EVENT to have full event delivered via kafka/http, PAYLOAD_ONLY to have only the payload delivered" } }, "required": [ "id", "eventTime", "tenant" ] } ``` ### find an event `GET /events/find?id=?&tenant=?` ### dry run `POST /events/dryrun?id=?&tenant=?` fires an event without changing its final status ## cron APIs coming up... ================================================ FILE: app/LICENSE.txt ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2018 Sandeep Malik Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: app/pom.xml ================================================ 4.0.0 com.walmartlabs.bigben bigben 1.0.7-SNAPSHOT bigben-app BigBen:app http://maven.apache.org takari-jar 1.7.25 com.walmartlabs.bigben bigben-lib com.walmartlabs.bigben bigben-cassandra com.walmartlabs.bigben bigben-kafka com.walmartlabs.bigben bigben-cron org.slf4j slf4j-log4j12 ${slf4j-api.version} io.ktor ktor-server-core io.ktor ktor-server-netty io.netty netty-codec-http2 io.ktor ktor-jackson io.ktor ktor-client-core test io.ktor ktor-client-apache test org.testng testng test org.jetbrains.kotlin kotlin-maven-plugin org.apache.maven.plugins maven-surefire-plugin ${project.build.directory} maven-resources-plugin false org.apache.maven.plugins maven-assembly-plugin 3.1.0 bigben false jar-with-dependencies true com.walmartlabs.bigben.app.RunKt assemble-all package single ================================================ FILE: app/src/main/kotlin/com/walmartlabs/bigben/app/app.kt ================================================ /*- * #%L * BigBen:app * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.app /** * Created by smalik3 on 2/28/18 */ import com.walmartlabs.bigben.BigBen import com.walmartlabs.bigben.BigBen.module import com.walmartlabs.bigben.api.EventReceiver import com.walmartlabs.bigben.entities.EventRequest import com.walmartlabs.bigben.entities.EventStatus.REJECTED import com.walmartlabs.bigben.extns.bucket import com.walmartlabs.bigben.extns.nowUTC import com.walmartlabs.bigben.utils.* import com.walmartlabs.bigben.utils.commons.Module import com.walmartlabs.bigben.utils.commons.ModuleRegistry import org.slf4j.Logger import java.time.Duration import java.time.ZonedDateTime import java.time.temporal.ChronoUnit import java.util.* import java.util.concurrent.ThreadLocalRandom import java.util.concurrent.TimeUnit import kotlin.system.exitProcess class App { init { try { val lifecycle = typeRefYaml>(App::class.java.classLoader.getResource("bigben-lifecycle.yaml").readText()) initPhase("pre-init", lifecycle, null) val l = logger() l.info("phase:pre-init finished") println( "\n" + " ____ _ ____ \n" + " | _ \\(_) | _ \\ \n" + " | |_) |_ __ _| |_) | ___ _ __ \n" + " | _ <| |/ _` | _ < / _ \\ '_ \\ \n" + " | |_) | | (_| | |_) | __/ | | |\n" + " |____/|_|\\__, |____/ \\___|_| |_|\n" + " __/ | \n" + " |___/ \n" ) BigBen.init() initPhase("post-init", lifecycle, l) l.info("Bigben => successfully started") } catch (e: Exception) { try { val l: Logger = logger() l.error("Bigben:error => unknown error, system will exit", e.rootCause()!!) } catch (ignore: Exception) { } exitProcess(1) } } private fun initPhase(phase: String, lifecycle: Map, l: Logger?) { l?.info("phase:$phase started") lifecycle["$phase-class"]?.run { (Class.forName(this).newInstance() as Module).init(ModuleRegistry()) } ?: lifecycle["$phase-object"]?.run { (Class.forName(this).getDeclaredField("INSTANCE").apply { isAccessible = true }.get(null) as Module).init(ModuleRegistry()) } l?.info("phase:$phase finished") } } object EventGenerator { data class EventGeneration(val offset: String, val period: String, val numEvents: Int, val tenant: String) private val l = logger() fun generateEvents(eg: EventGeneration): Map { val random = ThreadLocalRandom.current() val t1 = nowUTC().bucket() + Duration.parse(eg.offset) val t2 = t1 + Duration.parse(eg.period) val delta = ChronoUnit.MILLIS.between(t1, t2) l.info("generating ${eg.numEvents} random events between $t1 and $t2") return (1..eg.numEvents).map { val t = if(delta > 0) t1.plus(random.nextLong(delta), ChronoUnit.MILLIS) else t1 module().addEvent(EventRequest().also { it.tenant = eg.tenant it.eventTime = t.toString() it.id = UUID.randomUUID().toString() }).transform { if (it?.eventStatus == REJECTED) throw IllegalArgumentException(it.error?.message) else it } }.reduce().transform { it!!.groupBy { ZonedDateTime.parse(it!!.eventTime).bucket() }.mapValues { it.value.size }.toSortedMap() }.get(30L, TimeUnit.MINUTES) } } ================================================ FILE: app/src/main/kotlin/com/walmartlabs/bigben/app/run.kt ================================================ package com.walmartlabs.bigben.app import com.fasterxml.jackson.databind.SerializationFeature.INDENT_OUTPUT import com.walmartlabs.bigben.BigBen.module import com.walmartlabs.bigben.api.EventService import com.walmartlabs.bigben.cron.CronService import com.walmartlabs.bigben.extns.APIResponse import com.walmartlabs.bigben.utils.stackTraceAsString import com.walmartlabs.bigben.utils.typeRefJson import io.ktor.application.Application import io.ktor.application.ApplicationCall import io.ktor.application.call import io.ktor.application.install import io.ktor.features.ContentNegotiation import io.ktor.features.StatusPages import io.ktor.http.HttpStatusCode import io.ktor.http.HttpStatusCode.Companion.BadRequest import io.ktor.http.HttpStatusCode.Companion.InternalServerError import io.ktor.jackson.jackson import io.ktor.request.receive import io.ktor.response.header import io.ktor.response.respond import io.ktor.routing.* import io.ktor.server.netty.EngineMain import org.apache.commons.text.StrLookup import org.apache.commons.text.StrSubstitutor import org.apache.log4j.xml.DOMConfigurator import java.io.File fun main(args: Array) = EngineMain.main(args) fun logs() { System.getProperty("bigben.log.config")?.run { val logFile = File(this) if (logFile.exists()) { println("configuring logger") StrSubstitutor(StrLookup.systemPropertiesLookup()).run { logFile.readLines().map { replace(it) } }.joinToString("\n").run { File(System.getProperty("java.io.tmpdir"), "log4j-overrides-substituted.xml").let { println("using log file from ${it.absolutePath}") it.writeText(this) DOMConfigurator.configure(it.toURI().toURL()) } } } } } fun app() = App() fun Application.routes() { routing { get("/ping") { call.respond(mapOf("status" to "OK")) } route("/events") { val es = module() get("/cluster") { call.fromAPIResponse(es.clusterStats()) } post("/schedule") { call.fromAPIResponse(es.schedule(typeRefJson(call.receive()))) } post("/tenant/register") { call.fromAPIResponse(es.registerProcessor(call.receive())) } get("/tenants") { call.fromAPIResponse(es.registeredTenants()) } get("/find") { call.fromAPIResponse(es.find(call.request.queryParameters["id"]!!, call.request.queryParameters["tenant"]!!)) } post("/dryrun") { call.fromAPIResponse(es.dryrun(call.request.queryParameters["id"]!!, call.request.queryParameters["tenant"]!!)) } } post("/generation/random") { call.respond(EventGenerator.generateEvents(call.receive())) } route("/cron") { post { call.fromAPIResponse(CronService.upsert(call.receive())) } get("/describe") { call.fromAPIResponse(CronService.describe(call.receive())) } get("/{tenant}/{id}") { call.fromAPIResponse( CronService.get( call.parameters["tenant"]!!, call.parameters["id"]!!, call.request.queryParameters["describe"]?.toBoolean() ) ) } delete("/{tenant}/{id}/{type}") { call.fromAPIResponse(CronService.delete(call.parameters["tenant"]!!, call.parameters["id"]!!, call.parameters["type"]!!)) } } } } fun Application.configure() { install(ContentNegotiation) { jackson { enable(INDENT_OUTPUT) } } install(StatusPages) { exception { e -> call.response.status(BadRequest) call.respond(mapOf("message" to (e.message ?: ""))) } exception { e -> call.response.status(InternalServerError) if (call.request.queryParameters["debug"] != null) { call.respond(mapOf("message" to ((e.message ?: "")), "stacktrace" to e.stackTraceAsString())) } else call.respond(mapOf("message" to (e.message ?: ""))) } } } private suspend fun ApplicationCall.fromAPIResponse(r: APIResponse) { r.headers.forEach { h -> h.value.forEach { response.header(h.key, it) } } response.status(HttpStatusCode.fromValue(r.status)) respond(r.entity) } ================================================ FILE: app/src/main/resources/application.conf ================================================ ktor { deployment { port = 8080 port = ${?app.server.port} } application { modules = [ com.walmartlabs.bigben.app.RunKt.logs com.walmartlabs.bigben.app.RunKt.configure com.walmartlabs.bigben.app.RunKt.app com.walmartlabs.bigben.app.RunKt.routes ] } } ================================================ FILE: app/src/main/resources/bigben-lifecycle.yaml ================================================ pre-init-class: null post-init-class: null ================================================ FILE: app/src/main/resources/bigben.yaml ================================================ # top level modules modules: - name: domain class: com.walmartlabs.bigben.providers.domain.cassandra.CassandraModule - name: processors object: com.walmartlabs.bigben.processors.ProcessorRegistry - name: hz class: com.walmartlabs.bigben.utils.hz.Hz - name: scheduler object: com.walmartlabs.bigben.SchedulerModule - name: events object: com.walmartlabs.bigben.EventModule - name: messaging object: com.walmartlabs.bigben.kafka.KafkaModule enabled: ${kafka.module.enabled:-false} - name: cron object: com.walmartlabs.bigben.cron.CronRunner enabled: ${cron.module.enabled:-false} # hazelcast properties hz: template: file://hz.template.xml group: name: bigben-dev password: bigben-dev network: autoIncrementPort: true members: 127.0.0.1 port: 5701 map: store: writeDelay: 30 # message related properties messaging.producer.factory.class: com.walmartlabs.bigben.kafka.KafkaMessageProducerFactory # cassandra related properties cassandra: keyspace: bigben cluster: contactPoints: 127.0.0.1 clusterName: bigben-cluster port: 9042 localDataCenter: null coreConnectionsPerLocalHost: 1 maxConnectionsPerLocalHost: 1 coreConnectionsPerRemoteHost: 1 maxConnectionsPerRemoteHost: 1 maxRequestsPerLocalConnection: 32768 maxRequestsPerRemoteConnection: 2048 newLocalConnectionThreshold: 3000 newRemoteConnectionThreshold: 400 poolTimeoutMillis: 0 keepTCPConnectionAlive: true connectionTimeOut: 5000 readTimeout: 12000 reconnectPeriod: 5 username: null password: null downgradingConsistency: false writeConsistency: LOCAL_ONE readConsistency: LOCAL_ONE # kafka consumer properties kafka: consumers: - num.consumers: ${num.consumers:-8} processor.impl.class: com.walmartlabs.bigben.kafka.ProcessorImpl topics: ${bigben.inbound.topic.name:-null} max.poll.wait.time: ${max.poll.wait.time:-10000} message.retry.max.count: ${message.retry.max.count:-10} config: key.deserializer: org.apache.kafka.common.serialization.StringDeserializer value.deserializer: org.apache.kafka.common.serialization.StringDeserializer bootstrap.servers: ${bigben.inbound.topic.bootstrap.servers:-null} #fetch.min.bytes: 1 group.id: ${group.id:-bigben-inbound} heartbeat.interval.ms: ${heartbeat.interval.ms:-3000} session.timeout.ms: 30000 auto.offset.reset: ${auto.offset.reset:-latest} fetch.max.bytes: 324000 max.poll.interval.ms: 30000 max.poll.records: 100 receive.buffer.bytes: 65536 request.timeout.ms: 60000 #send.buffer.bytes: 131072 enable.auto.commit: ${enable.auto.commit:-false} producer: config: # this is default kafka producer config, these values will be used if not supplied during the tenant registration key.serializer: org.apache.kafka.common.serialization.StringSerializer value.serializer: org.apache.kafka.common.serialization.StringSerializer acks: "1" buffer.memory: 32400 retries: 3 # system properties task: executor: #retry.thread.count: 8 retry.time.units: SECONDS delay: 1 max.retries: 3 backoff.multiplier: 2 app.server.port: 8080 generic.future.max.get.time: 60 events: scheduler.enabled: true schedule.scan.interval.minutes: 1 num.shard.submitters: 8 receiver: shard.size: 1000 lapse.offset.minutes: 0 delete: max.retries: 3 initial.delay: 1 backoff.multiplier: 1 submit: initial.delay: 1 backoff.multiplier: 1 max.retries: 3 processor: max.retries: 3 initial.delay: 1 backoff.multiplier: 2 eager.loading: true tasks: max.events.in.memory: 100000 scheduler.worker.threads: 8 # bucket manager / loader related properties buckets: backlog.check.limit: 1440 # 1 Day background: load.fetch.size: 100 load.wait.interval.seconds: 15 cron: runner: core.pool.size: 8 load: max.retries: 10 delay: 1 backoff.multiplier: 1 time.units: "SECONDS" ================================================ FILE: app/src/main/resources/log4j.xml ================================================ ================================================ FILE: app/src/test/kotlin/com/walmartlabs/bigben/tests/APITests.kt ================================================ package com.walmartlabs.bigben.tests import com.datastax.driver.core.Session import com.fasterxml.jackson.databind.SerializationFeature.INDENT_OUTPUT import com.walmartlabs.bigben.BigBen import com.walmartlabs.bigben.BigBen.module import com.walmartlabs.bigben.app.EventGenerator import com.walmartlabs.bigben.app.main import com.walmartlabs.bigben.entities.EntityProvider import com.walmartlabs.bigben.entities.EventDeliveryOption.FULL_EVENT import com.walmartlabs.bigben.entities.EventDeliveryOption.PAYLOAD_ONLY import com.walmartlabs.bigben.entities.EventLoader import com.walmartlabs.bigben.entities.EventRequest import com.walmartlabs.bigben.entities.EventResponse import com.walmartlabs.bigben.entities.EventStatus.* import com.walmartlabs.bigben.extns.nowUTC import com.walmartlabs.bigben.kafka.MockMessageProducerFactory import com.walmartlabs.bigben.processors.ProcessorConfig import com.walmartlabs.bigben.processors.ProcessorConfig.Type.* import com.walmartlabs.bigben.utils.fromJson import com.walmartlabs.bigben.utils.json import com.walmartlabs.bigben.utils.stackTraceAsString import com.walmartlabs.bigben.utils.typeRefJson import io.ktor.application.call import io.ktor.application.install import io.ktor.client.HttpClient import io.ktor.client.call.call import io.ktor.client.engine.apache.Apache import io.ktor.client.request.accept import io.ktor.client.request.post import io.ktor.client.request.url import io.ktor.client.response.readText import io.ktor.content.TextContent import io.ktor.features.ContentNegotiation import io.ktor.features.StatusPages import io.ktor.http.ContentType.Application.Json import io.ktor.http.HttpMethod import io.ktor.http.HttpMethod.Companion.Get import io.ktor.http.HttpMethod.Companion.Post import io.ktor.http.HttpStatusCode import io.ktor.http.HttpStatusCode.Companion.OK import io.ktor.jackson.jackson import io.ktor.request.contentType import io.ktor.request.header import io.ktor.request.receive import io.ktor.response.respond import io.ktor.routing.post import io.ktor.routing.routing import io.ktor.server.engine.embeddedServer import io.ktor.server.netty.Netty import kotlinx.coroutines.runBlocking import org.testng.annotations.AfterClass import org.testng.annotations.BeforeClass import org.testng.annotations.Test import java.time.ZonedDateTime import java.util.UUID.randomUUID import java.util.concurrent.ConcurrentHashMap import java.util.concurrent.SynchronousQueue import kotlin.concurrent.thread import kotlin.test.assertEquals import kotlin.test.assertFalse import kotlin.test.assertTrue class APITests { companion object { init { System.setProperty("bigben.configs", "file://bigben-api-test.yaml") thread { main(emptyArray()) } BigBen.init() } } private val client = HttpClient(Apache) private val server = "http://localhost:8080" private val responses = ConcurrentHashMap>() private val payloadResponse = SynchronousQueue() @BeforeClass private fun `clean up db`() { println("cleaning up the db") (module>().unwrap() as Session).apply { execute("truncate bigben.events;") execute("truncate bigben.lookups;") execute("truncate bigben.buckets;") execute("truncate bigben.kv_table;") } thread { embeddedServer(Netty, 9090) { println("starting test server") install(ContentNegotiation) { jackson { enable(INDENT_OUTPUT) } } install(StatusPages) { exception { e -> //e.printStackTrace() call.response.status(HttpStatusCode.InternalServerError) call.respond(mapOf("message" to ((e.message ?: "")), "stacktrace" to e.stackTraceAsString())) } } routing { post("/test") { if (call.request.queryParameters["error"] != null) { throw IllegalArgumentException("test error") } val er = call.receive() assertEquals(call.request.header("header1"), "value1") assertEquals(call.request.header("header2"), "value2") assertEquals(call.request.contentType().contentType, Json.contentType) assertEquals(call.request.contentType().contentSubtype, Json.contentSubtype) responses[er.id!!]!!.put(er) call.respond(OK, mapOf("status" to "OK")) } post("/payload") { if (call.request.queryParameters["error"] != null) { throw IllegalArgumentException("test error") } val payload = call.receive() assertEquals(call.request.header("header1"), "value1") assertEquals(call.request.header("header2"), "value2") assertEquals(call.request.contentType().contentType, Json.contentType) assertEquals(call.request.contentType().contentSubtype, Json.contentSubtype) payloadResponse.put(payload) call.respond(OK, mapOf("status" to "OK")) } } }.start(true) } } @AfterClass fun teardown() { client.close() } @Test(enabled = true) fun `test events at the same time`() { val tenant = "test" assertEquals(runBlocking { client.call { url("$server/events/tenant/register") accept(Json) method = Post body = TextContent( ProcessorConfig( tenant, CUSTOM_CLASS, mapOf("eventProcessorClass" to "com.walmartlabs.bigben.processors.NoOpCustomClassProcessor") ).json(), Json ) }.response.status.value }, 200) // schedule 1000 events at exactly same time at the start of the minute: runBlocking { client.post { url("$server/generation/random") accept(Json) body = TextContent(EventGenerator.EventGeneration("PT1M", "PT0S", 1000, tenant).json(), Json) } } // schedule 1000 events at exactly same time at the start of the minute + 30 seconds: val bucket = runBlocking { client.post { url("$server/generation/random") accept(Json) body = TextContent(EventGenerator.EventGeneration("PT1M30S", "PT0S", 1000, tenant).json(), Json) } }.run { typeRefJson>(this).run { ZonedDateTime.parse(entries.first().key) } } Thread.sleep(2 * 60 * 1000) // sleep for 2 minutes var total = 0 (0..1).forEach { // 2000 events -> 2 shards var l = module().load(bucket, 0, 400).get() while (l.second.isNotEmpty()) { l.second.forEach { assertEquals(it.bucketId, bucket) assertEquals(it.status, PROCESSED) assertTrue { it.eventTime == bucket || it.eventTime == bucket.plusSeconds(30) } total++ } l = module().load(bucket, 0, 400, l.second.last().eventTime!!, l.second.last().id!!, l.first) .get() } } } @Test fun `test unknown tenant rejection`() { val (status, content) = client.call("/events/schedule", listOf(EventRequest("id123", nowUTC().toString(), "ABC"))) assertEquals(status, 400) assertEquals(typeRefJson>(content)[0].eventStatus, REJECTED) } @Test fun `test missing tenant`() { val (status, content) = client.call("/events/schedule", listOf(EventRequest("id123", nowUTC().toString()))) assertEquals(status, 400) assertEquals(typeRefJson>(content)[0].eventStatus, REJECTED) } @Test fun `test missing event time`() { val (status, content) = client.call("/events/schedule", listOf(EventRequest("id123", tenant = "ABC"))) assertEquals(status, 400) assertEquals(typeRefJson>(content)[0].eventStatus, REJECTED) } @Test fun `test event time wrong format`() { val (status, content) = client.call("/events/schedule", listOf(EventRequest("id123", "time", tenant = "ABC"))) assertEquals(status, 400) assertEquals(typeRefJson>(content)[0].eventStatus, REJECTED) } @Test fun `test event time in past`() { val tenant = "http" assertEquals( client.call( "/events/tenant/register", ProcessorConfig( tenant, HTTP, mapOf( "url" to "http://localhost:9090/test", "headers" to mapOf("header1" to "value1", "header2" to "value2") ) ) ).first, 200 ) val eventId = "id123" responses[eventId] = SynchronousQueue() val (status, content) = client.call("/events/schedule", listOf(EventRequest(eventId, nowUTC().minusMinutes(1).toString(), tenant))) assertEquals(status, 200) assertEquals(typeRefJson>(content)[0].eventStatus, TRIGGERED) val er = responses[eventId]!!.take() assertEquals(er.eventStatus, TRIGGERED) assertEquals(er.deliveryOption, FULL_EVENT) assertEquals(er.tenant!!, tenant) } @Test fun `test event time in past - payload only`() { val tenant = "http3" assertEquals( client.call( "/events/tenant/register", ProcessorConfig( tenant, HTTP, mapOf( "url" to "http://localhost:9090/payload", "headers" to mapOf("header1" to "value1", "header2" to "value2") ) ) ).first, 200 ) val eventId = "id234" responses[eventId] = SynchronousQueue() val (status, content) = client.call("/events/schedule", listOf(EventRequest(eventId, nowUTC().minusMinutes(1).toString(), tenant, payload = "testP", deliveryOption = PAYLOAD_ONLY))) assertEquals(status, 200) assertEquals(typeRefJson>(content)[0].eventStatus, TRIGGERED) val payload = payloadResponse.take() assertEquals(payload, "testP") } @Test fun `test event - null payload with payload only option`() { val tenant = "http2" assertEquals( client.call( "/events/tenant/register", ProcessorConfig( tenant, HTTP, mapOf( "url" to "http://localhost:9090/test", "headers" to mapOf("header1" to "value1", "header2" to "value2") ) ) ).first, 200 ) val eventId = randomUUID().toString() responses[eventId] = SynchronousQueue() val (status, content) = client.call("/events/schedule", listOf(EventRequest(eventId, nowUTC().plusMinutes(1).toString(), tenant, deliveryOption = PAYLOAD_ONLY))) assertEquals(status, 400) assertEquals(typeRefJson>(content)[0].eventStatus, REJECTED) } @Test fun `test find and dryrun APIs`() { val tenant = "http4" assertEquals( client.call( "/events/tenant/register", ProcessorConfig( tenant, HTTP, mapOf( "url" to "http://localhost:9090/test", "headers" to mapOf("header1" to "value1", "header2" to "value2") ) ) ).first, 200 ) val eventId = randomUUID().toString() responses[eventId] = SynchronousQueue() val eventTime = nowUTC().plusMinutes(100000).toString() val (status, content) = client.call("/events/schedule", listOf(EventRequest(eventId, eventTime, tenant, payload = "P1"))) assertEquals(status, 200) assertEquals(typeRefJson>(content)[0].eventStatus, ACCEPTED) val (s, c) = client.call("/events/find?tenant=$tenant&id=$eventId", null, Get) assertEquals(s, 200) val er = EventResponse::class.java.fromJson(c) assertEquals(er.eventStatus, UN_PROCESSED) assertEquals(er.payload, "P1") assertEquals(er.tenant, tenant) assertFalse { er.eventId!!.startsWith("a-") } assertEquals(er.deliveryOption, FULL_EVENT) assertEquals(er.eventTime, eventTime) val (s1, c1) = client.call("/events/dryrun?tenant=$tenant&id=$eventId", null) assertEquals(s1, 200) val er1 = EventResponse::class.java.fromJson(c1) assertEquals(er1.eventStatus, UN_PROCESSED) assertEquals(er1.payload, "P1") assertEquals(er1.tenant, tenant) assertFalse { er1.eventId!!.startsWith("a-") } assertEquals(er1.deliveryOption, FULL_EVENT) assertEquals(er1.eventTime, eventTime) responses[eventId]!!.take().apply { assertEquals(eventStatus, TRIGGERED) assertEquals(payload, "P1") assertEquals(this.tenant, tenant) assertFalse { this.eventId!!.startsWith("a-") } assertEquals(deliveryOption, FULL_EVENT) assertEquals(eventTime, eventTime) } } @Test fun `test find and dryrun APIs - payload only`() { val tenant = "http5" assertEquals( client.call( "/events/tenant/register", ProcessorConfig( tenant, HTTP, mapOf( "url" to "http://localhost:9090/payload", "headers" to mapOf("header1" to "value1", "header2" to "value2") ) ) ).first, 200 ) val eventId = randomUUID().toString() responses[eventId] = SynchronousQueue() val eventTime = nowUTC().plusMinutes(100000).toString() val (status, content) = client.call("/events/schedule", listOf(EventRequest(eventId, eventTime, tenant, payload = "P1", deliveryOption = PAYLOAD_ONLY))) assertEquals(status, 200) assertEquals(typeRefJson>(content)[0].eventStatus, ACCEPTED) val (s, c) = client.call("/events/find?tenant=$tenant&id=$eventId", null, Get) assertEquals(s, 200) val er = EventResponse::class.java.fromJson(c) assertEquals(er.eventStatus, UN_PROCESSED) assertEquals(er.payload, "P1") assertEquals(er.tenant, tenant) assertTrue { er.eventId!!.startsWith("a-") } assertEquals(er.deliveryOption, PAYLOAD_ONLY) assertEquals(er.eventTime, eventTime) val (s1, c1) = client.call("/events/dryrun?tenant=$tenant&id=$eventId", null) assertEquals(s1, 200) val er1 = EventResponse::class.java.fromJson(c1) assertEquals(er1.eventStatus, UN_PROCESSED) assertEquals(er1.payload, "P1") assertEquals(er1.tenant, tenant) assertTrue { er1.eventId!!.startsWith("a-") } assertEquals(er1.deliveryOption, PAYLOAD_ONLY) assertEquals(er1.eventTime, eventTime) payloadResponse.take().apply { assertEquals(this, "P1") } } @Test fun `test event kafka tenant API`() { val tenant = "kafka1" assertEquals( client.call( "/events/tenant/register", ProcessorConfig( tenant, MESSAGING, mapOf( "topic" to "topic1", "bootstrap.servers" to "localhost:9092" ) ) ).first, 200 ) val eventId = randomUUID().toString() responses[eventId] = SynchronousQueue() val eventTime = nowUTC().minusMinutes(1).toString() val (status, content) = client.call("/events/schedule", listOf(EventRequest(eventId, eventTime, tenant, payload = "P1"))) assertEquals(status, 200) assertEquals(typeRefJson>(content)[0].eventStatus, TRIGGERED) Thread.sleep(2000) val er = MockMessageProducerFactory.LAST_MESSAGE.get() assertEquals(er.eventStatus, TRIGGERED) assertEquals(er.payload, "P1") assertEquals(er.tenant, tenant) assertEquals(er.deliveryOption, FULL_EVENT) assertEquals(er.eventTime, eventTime) } @Test fun `test event kafka tenant API - payload only`() { val tenant = "kafka2" assertEquals( client.call( "/events/tenant/register", ProcessorConfig( tenant, MESSAGING, mapOf( "topic" to "topic2", "bootstrap.servers" to "localhost:9092" ) ) ).first, 200 ) val eventId = randomUUID().toString() responses[eventId] = SynchronousQueue() val eventTime = nowUTC().minusMinutes(1).toString() val (status, content) = client.call("/events/schedule", listOf(EventRequest(eventId, eventTime, tenant, payload = "P2", deliveryOption = PAYLOAD_ONLY))) assertEquals(status, 200) assertEquals(typeRefJson>(content)[0].eventStatus, TRIGGERED) val er = MockMessageProducerFactory.LAST_MESSAGE.get() assertEquals(er.eventStatus, TRIGGERED) assertEquals(er.payload, "P2") assertEquals(er.tenant, tenant) assertEquals(er.deliveryOption, PAYLOAD_ONLY) assertEquals(er.eventTime, eventTime) } private fun HttpClient.call(url: String, body: Any?, method: HttpMethod = Post): Pair { return runBlocking { client.call { url("$server$url") accept(Json) this.method = method body?.let { this.body = TextContent(it.json(), Json) } }.response.run { status.value to this.readText().apply { println("response: $this") } } } } } ================================================ FILE: app/src/test/kotlin/com/walmartlabs/bigben/tests/BigBenTests.kt ================================================ /*- * #%L * BigBen:app * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.tests import com.datastax.driver.core.Session import com.google.common.util.concurrent.ListeningScheduledExecutorService import com.sun.net.httpserver.HttpServer import com.walmartlabs.bigben.BigBen import com.walmartlabs.bigben.BigBen.module import com.walmartlabs.bigben.api.EventService import com.walmartlabs.bigben.core.BucketManager import com.walmartlabs.bigben.core.BucketsLoader import com.walmartlabs.bigben.core.ScheduleScanner import com.walmartlabs.bigben.entities.* import com.walmartlabs.bigben.entities.EventStatus.* import com.walmartlabs.bigben.entities.Mode.REMOVE import com.walmartlabs.bigben.extns.bucket import com.walmartlabs.bigben.extns.fetch import com.walmartlabs.bigben.extns.nowUTC import com.walmartlabs.bigben.extns.save import com.walmartlabs.bigben.processors.NoOpCustomClassProcessor import com.walmartlabs.bigben.processors.ProcessorConfig import com.walmartlabs.bigben.processors.ProcessorConfig.Type.* import com.walmartlabs.bigben.utils.commons.Props.int import com.walmartlabs.bigben.utils.commons.Props.map import com.walmartlabs.bigben.utils.commons.TaskExecutor import com.walmartlabs.bigben.utils.fromJson import com.walmartlabs.bigben.utils.json import org.apache.commons.text.RandomStringGenerator import org.apache.kafka.clients.consumer.KafkaConsumer import org.apache.kafka.clients.producer.KafkaProducer import org.apache.kafka.clients.producer.ProducerRecord import org.testng.annotations.BeforeClass import org.testng.annotations.BeforeMethod import org.testng.annotations.Test import java.lang.Thread.sleep import java.net.InetSocketAddress import java.time.ZonedDateTime import java.util.* import java.util.concurrent.CountDownLatch import java.util.concurrent.ScheduledThreadPoolExecutor import java.util.concurrent.TimeUnit.MINUTES import java.util.concurrent.atomic.AtomicInteger import kotlin.reflect.full.memberProperties import kotlin.reflect.jvm.isAccessible import kotlin.test.assertEquals import kotlin.test.assertTrue /** * Created by smalik3 on 4/11/18 */ class BigBenTests { companion object { init { System.setProperty("bigben.configs", "file://bigben-test.yaml,file://bigben.yaml") BigBen.init() } private val eventService = BigBen.module() } @BeforeClass fun `set up tenant`() { `clean up db`() eventService.registerProcessor( ProcessorConfig( "default", CUSTOM_CLASS, mapOf("eventProcessorClass" to NoOpCustomClassProcessor::class.java.name) ) ).apply { assertEquals(this.status, 200) } println("tenant set up done") } @BeforeMethod private fun `clean up db`() { println("cleaning up the db") (module>().unwrap() as Session).apply { execute("truncate bigben.events;") execute("truncate bigben.lookups;") execute("truncate bigben.buckets;") execute("truncate bigben.kv_table;") } } @Test fun `event service schedule and find API`() { val eventTime = nowUTC().plusMinutes(3) val tenant = "default" val xrefId = "abc" //add: eventService.schedule(listOf(EventRequest(xrefId, eventTime.toString(), tenant, "P"))).apply { assertEquals(status, 200) } eventService.find(xrefId, tenant).apply { assertEquals(status, 200) (entity as EventResponse).apply { assertEquals(ZonedDateTime.parse(this.eventTime), eventTime) assertEquals(payload, "P") } } //update payload: eventService.schedule(listOf(EventRequest(xrefId, eventTime.toString(), tenant, "P1"))).apply { assertEquals(status, 200) } eventService.find(xrefId, tenant).apply { assertEquals(status, 200) (entity as EventResponse).apply { assertEquals(ZonedDateTime.parse(this.eventTime), eventTime) assertEquals(payload, "P1") } } // update time: eventService.schedule(listOf(EventRequest(xrefId, eventTime.plusMinutes(1).toString(), tenant, "P2"))).apply { assertEquals(status, 200) } eventService.find(xrefId, tenant).apply { assertEquals(status, 200) (entity as EventResponse).apply { assertEquals(ZonedDateTime.parse(this.eventTime), eventTime.plusMinutes(1)) assertEquals(payload, "P2") } } //remove event: eventService.schedule(listOf(EventRequest(xrefId, eventTime.plusMinutes(1).toString(), tenant, "P2", REMOVE))) .apply { assertEquals(status, 200) } eventService.find(xrefId, tenant).apply { assertEquals(status, 404) } } @Test fun `sharding works as expected`() { val r = Random() val time = nowUTC().plusMinutes(2).bucket() (0..100).forEach { eventService.schedule(listOf(EventRequest("id_$it", time.plusSeconds(r.nextInt(60).toLong()).toString(), "default", "Payload_$it"))) } (0..100).forEach { i -> eventService.find("id_$i", "default").apply { assertEquals(status, 200) fetch { it.xrefId = "id_$i"; it.tenant = "default" }.get()!!.apply { assertEquals(shard, i / int("events.receiver.shard.size")) fetch { it.bucketId = time; it.shard = shard; it.eventTime = eventTime; it.id = eventId }.get()!!.apply { assertEquals(status, UN_PROCESSED) } } } } } @Test fun `test bucket loader`() { val bucketId = nowUTC().bucket() val toBeLoaded = (1..10).map { bucketId.minusMinutes(it.toLong()) }.toSet() save { it.bucketId = bucketId.minusMinutes(3); it.count = 100; it.status = PROCESSED }.get()!! val latch = CountDownLatch(10) val now = System.currentTimeMillis() BucketsLoader(10, 5, 60, bucketId) { try { assertTrue { toBeLoaded.contains(it.bucketId) } if (it.bucketId == bucketId.minusMinutes(3)) { assertEquals(it.status, PROCESSED) assertEquals(it.count, 100) } else { assertEquals(it.status, EMPTY) } latch.countDown() } catch (e: Throwable) { e.printStackTrace() } }.run() if (!latch.await(1, MINUTES)) throw IllegalStateException("buckets registry did not complete on time") assertTrue { System.currentTimeMillis() - now > 1 } } @Test fun `test bucket manager`() { val time = nowUTC().bucket() println("time : $time") val range = 0..9 val buckets = range.map { time.minusMinutes(it.toLong()) }.toSortedSet() println("buckets: $buckets") val shards = range.toList() // test back ground load range.forEach { i -> save { it.bucketId = time.minusMinutes(i.toLong()); it.count = 100L; it.status = UN_PROCESSED }.get()!! } val bm = BucketManager(10, 2 * 60, 60, module()) bm.getProcessableShardsForOrBefore(time).get()!! sleep(2000) bm.getProcessableShardsForOrBefore(time).get()!!.apply { assertEquals(this.keySet().toSortedSet(), buckets.toMutableSet().apply { add(time) }.toSortedSet()) this.keySet().forEach { assertEquals(this[it].toList(), shards) } } // test purge: (1..5).forEach { i -> save { it.bucketId = time.plusMinutes(i.toLong()); it.count = 100L; it.status = UN_PROCESSED }.get()!! } (1..5).forEach { bm.getProcessableShardsForOrBefore(time.plusMinutes(it.toLong())).get()!! } bm.purgeIfNeeded() (1..5).forEach { val b = bm.getProcessableShardsForOrBefore(time.plusMinutes(1)).get()!! assertEquals(b.keySet().size, 10) assertEquals(buckets - buckets.take(5) + (1..5).map { time.plusMinutes(it.toLong()) }.toSortedSet(), b.keySet().toSortedSet()) } } @Test fun `test http processor - ok case - past event`() { var server: HttpServer? = null try { val port = 8383 eventService.registerProcessor( ProcessorConfig( "http", HTTP, mapOf ( "url" to "http://localhost:$port/test", "headers" to mapOf("header" to "Header1") ) ) ).apply { assertEquals(this.status, 200) } val eReq = EventRequest("id123", nowUTC().minusSeconds(1).toString(), "http", "Payload1") println("event request: $eReq") server = HttpServer.create(InetSocketAddress(port), 0) val latch = CountDownLatch(1) server.createContext("/test") { try { val eResp = EventResponse::class.java.fromJson(String(it.requestBody.readBytes())) println("event response: $eResp") assertEquals(it.requestHeaders.getFirst("header"), "Header1") assertEquals(eReq.id, eResp.id) assertEquals(eReq.eventTime, eResp.eventTime) assertEquals(eReq.payload, eResp.payload) assertEquals(eReq.tenant, eResp.tenant) assertEquals(eReq.mode, eResp.mode) assertTrue(eResp.eventId == null) assertTrue(eResp.eventStatus == TRIGGERED) mapOf("status" to "OK").json().apply { it.sendResponseHeaders(200, length.toLong()) it.responseBody.write(toByteArray()) } } catch (e: Throwable) { e.printStackTrace() mapOf("status" to "error").json().apply { it.sendResponseHeaders(500, length.toLong()) it.responseBody.write(toByteArray()) } throw AssertionError("test failed") } finally { it.close() } latch.countDown() } server.start() eventService.schedule(listOf(eReq)).apply { assertEquals(200, status) } if (!latch.await(1, MINUTES)) throw AssertionError("latch not down") } finally { server?.run { stop(0) } } } @Test fun `test http processor - ok case - future event`() { var server: HttpServer? = null try { val port = 8383 eventService.registerProcessor( ProcessorConfig( "http", HTTP, mapOf ( "url" to "http://localhost:$port/test", "headers" to mapOf("header" to "Header1") ) ) ).apply { assertEquals(this.status, 200) } val time = nowUTC().plusMinutes(1).withSecond(10).withNano(0) val eReq = EventRequest("id123", time.toString(), "http", "Payload1") println("event request: $eReq") server = HttpServer.create(InetSocketAddress(port), 0) val latch = CountDownLatch(1) server.createContext("/test") { try { val eResp = EventResponse::class.java.fromJson(String(it.requestBody.readBytes())) println("event response: $eResp") assertEquals(it.requestHeaders.getFirst("header"), "Header1") assertEquals(eReq.id, eResp.id) assertEquals(eReq.eventTime, eResp.eventTime) assertEquals(eReq.payload, eResp.payload) assertEquals("Payload2", eResp.payload) assertEquals(eReq.tenant, eResp.tenant) assertEquals(eReq.mode, eResp.mode) assertTrue(eResp.eventId != null) assertTrue(eResp.eventStatus == TRIGGERED) mapOf("status" to "OK").json().apply { it.sendResponseHeaders(200, length.toLong()) it.responseBody.write(toByteArray()) } } catch (e: Throwable) { e.printStackTrace() mapOf("status" to "error").json().apply { it.sendResponseHeaders(500, length.toLong()) it.responseBody.write(toByteArray()) } throw AssertionError("test failed") } finally { it.close() } latch.countDown() } server.start() eventService.schedule(listOf(eReq)).apply { assertEquals(200, status) } eventService.schedule(listOf(eReq.apply { payload = "Payload2" })).apply { assertEquals(200, status) } val bm = BucketManager(1, 2 * 60, 60, module()) println("manually scheduling $time") ScheduleScanner(module()).scan(time.withSecond(0).withNano(0), bm) if (!latch.await(2, MINUTES)) throw AssertionError("latch not down") } finally { server?.run { stop(0) } } } @Test fun `test http processor - error case`() { var server: HttpServer? = null try { val port = 8383 eventService.registerProcessor( ProcessorConfig( "http", HTTP, mapOf ( "url" to "http://localhost:$port/test", "header" to "Header1" ) ) ).apply { assertEquals(this.status, 200) } val eReq = EventRequest("id123", nowUTC().minusSeconds(1).toString(), "http", "Payload1") server = HttpServer.create(InetSocketAddress(port), 0) val latch = CountDownLatch(1) val tries = AtomicInteger() server.createContext("/test") { try { tries.incrementAndGet() mapOf("status" to "error").json().apply { it.sendResponseHeaders(500, length.toLong()) it.responseBody.write(toByteArray()) } } finally { it.close() } latch.countDown() } server.start() eventService.schedule(listOf(eReq)).apply { assertEquals(200, status) } if (!latch.await(1, MINUTES)) throw AssertionError("latch not down") var passed = false loop@ for (i in (1..10)) { if (tries.get() != 4) sleep(1000) else { passed = true; break@loop } } assertTrue(passed) } finally { server?.run { stop(0) } } } @Test fun `test http processor - bad request case`() { var server: HttpServer? = null try { val port = 8383 eventService.registerProcessor( ProcessorConfig( "http", HTTP, mapOf ( "url" to "http://localhost:$port/test", "header" to "Header1" ) ) ).apply { assertEquals(this.status, 200) } val eReq = EventRequest("id123", nowUTC().minusSeconds(1).toString(), "http", "Payload1") server = HttpServer.create(InetSocketAddress(port), 0) val latch = CountDownLatch(1) val tries = AtomicInteger() server.createContext("/test") { try { tries.incrementAndGet() mapOf("status" to "error").json().apply { it.sendResponseHeaders(400, length.toLong()) it.responseBody.write(toByteArray()) } } finally { it.close() } latch.countDown() } server.start() eventService.schedule(listOf(eReq)).apply { assertEquals(200, status) } if (!latch.await(1, MINUTES)) throw AssertionError("latch not down") var passed = false loop@ for (i in (1..5)) { if (tries.get() != 1) sleep(1000) else { passed = true; break@loop } } sleep(2000) assertTrue(passed) } finally { server?.run { stop(0) } } } @Test fun `test kafka integration - ok case`() { eventService.registerProcessor( ProcessorConfig( "kafka", MESSAGING, mapOf ( "topic" to "test", "brokers.url" to "" ) ) ).apply { assertEquals(this.status, 200) } val eReq = EventRequest("id123", nowUTC().minusSeconds(1).toString(), "kafka", "Payload1") eventService.schedule(listOf(eReq)).apply { assertEquals(200, status) } } @Test fun `test kafka integration - error case`() { val x: ScheduledThreadPoolExecutor = (TaskExecutor.Companion::class.memberProperties .filter { it.name == "RETRY_POOL" }[0] .apply { isAccessible = true }.get(TaskExecutor.Companion) as ListeningScheduledExecutorService) .let { it::class.java.getDeclaredField("delegate").apply { isAccessible = true }.get(it) } as ScheduledThreadPoolExecutor val current = x.completedTaskCount eventService.registerProcessor( ProcessorConfig( "kafka", MESSAGING, mapOf ( "topic" to "test", "brokers.url" to "", "fail" to true ) ) ).apply { assertEquals(this.status, 200) } val eReq = EventRequest("id123", nowUTC().minusSeconds(1).toString(), "kafka", "Payload1") eventService.schedule(listOf(eReq)).apply { assertEquals(200, status) } sleep(10000) assertTrue(x.completedTaskCount - current >= 3.toLong()) } @Test(enabled = false) fun `test kafka consumer`() { eventService.registerProcessor( ProcessorConfig( "kafka", MESSAGING, mapOf ( "topic" to "outbound", "brokers.url" to "localhost:9092" ) ) ).apply { assertEquals(this.status, 200) } /*val consumer = (BigBen.messageProcessors[0] as MockKafkaProcessor).consumer consumer.rebalance(setOf(TopicPartition("inbound", 1))) consumer.updateBeginningOffsets(mapOf(TopicPartition("inbound", 0) to 1.toLong())) consumer.updateBeginningOffsets(mapOf(TopicPartition("inbound", 0) to Long.MAX_VALUE))*/ val eReq = EventRequest("id123", nowUTC().minusSeconds(1).toString(), "kafka", "Payload1") println(eReq.json()) //sleep(Long.MAX_VALUE) } @Test(enabled = false) fun `end to end kafka`() { eventService.registerProcessor( ProcessorConfig( "kafka", MESSAGING, mapOf ( "topic" to "outbound", "bootstrap.servers" to "localhost:9092" ) ) ).apply { assertEquals(this.status, 200) } val producer = KafkaProducer(map("kafka.producer.config").mapKeys { it.key.removePrefix("kafka.producer.config.") } + mapOf ( "topic" to "outbound", "bootstrap.servers" to "localhost:9092" )) (1..100).forEach { println("sending $it") val eReq = EventRequest("id123", nowUTC().minusSeconds(1).toString(), "kafka", RandomStringGenerator.Builder().build().generate(1024)) producer.send(ProducerRecord("outbound", eReq.json())).get() } sleep(3000) } @Test(enabled = false) fun `test consumer`() { val consumer = KafkaConsumer(map("kafka.consumer.config") + mapOf("group.id" to UUID.randomUUID().toString())) consumer.subscribe(setOf("outbound")) while (true) { println("polling outbound") val records = consumer.poll(3000) println(records.count()) consumer.commitSync() } } } ================================================ FILE: app/src/test/kotlin/com/walmartlabs/bigben/tests/KafkaTests.kt ================================================ /*- * #%L * BigBen:commons * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.tests import com.datastax.driver.core.Session import com.google.common.util.concurrent.Futures import com.google.common.util.concurrent.ListenableFuture import com.walmartlabs.bigben.BigBen import com.walmartlabs.bigben.BigBen.entityProvider import com.walmartlabs.bigben.BigBen.module import com.walmartlabs.bigben.api.EventService import com.walmartlabs.bigben.entities.EntityProvider import com.walmartlabs.bigben.entities.Event import com.walmartlabs.bigben.entities.EventRequest import com.walmartlabs.bigben.entities.EventResponse import com.walmartlabs.bigben.kafka.KafkaMessageProcessor import com.walmartlabs.bigben.kafka.ProcessorImpl import com.walmartlabs.bigben.processors.NoOpCustomClassProcessor import com.walmartlabs.bigben.processors.ProcessorConfig import com.walmartlabs.bigben.processors.ProcessorConfig.Type.CUSTOM_CLASS import com.walmartlabs.bigben.processors.ProcessorConfig.Type.MESSAGING import com.walmartlabs.bigben.processors.ProcessorRegistry import com.walmartlabs.bigben.utils.commons.PropsLoader import com.walmartlabs.bigben.utils.fromJson import com.walmartlabs.bigben.utils.json import org.apache.kafka.clients.consumer.ConsumerRecord import org.testng.annotations.BeforeMethod import org.testng.annotations.Test import java.time.ZonedDateTime import java.util.concurrent.CountDownLatch import java.util.concurrent.TimeUnit import java.util.concurrent.atomic.AtomicInteger /** * Created by smalik3 on 9/19/18 */ class KafkaTests { companion object { val latch = CountDownLatch(1) init { System.setProperty("bigben.props", "file://bigben-kafka-test.yaml") BigBen.init() } } @BeforeMethod private fun `clean up db`() { println("cleaning up the db") try { (BigBen.module>().unwrap() as Session).apply { execute("truncate bigben.events;") execute("truncate bigben.lookups;") execute("truncate bigben.buckets;") execute("truncate bigben.kv_table;") } } catch (e: Throwable) { e.printStackTrace() throw e } println("setting up tenant") module().registerProcessor(ProcessorConfig("tenant1", CUSTOM_CLASS, mapOf("eventProcessorClass" to NoOpCustomClassProcessor::class.java.name))) } @Test(enabled = false) fun `test kafka integration`() { println("in kafka tests") module().registerProcessor(ProcessorConfig("tenant2", MESSAGING, mapOf( "topic" to "topic1", "bootstrap.servers" to "localhost:9092" ))) (1..1).forEach { entityProvider().let { it.raw(it.selector(Event::class.java)) }.apply { id = "id_$it" eventTime = ZonedDateTime.parse("2018-09-19T20:42Z") payload = "Payload_$it" tenant = "tenant2" xrefId = "xref_$id" }.apply { module().invoke(this).get() } } if (!latch.await(2, TimeUnit.MINUTES)) throw AssertionError("test failed") } } class MockProcessorImpl(props: PropsLoader) : KafkaMessageProcessor(props) { private val impl = ProcessorImpl(props) private val counter = AtomicInteger(0) init { println("---> starting the kafka consumer") println(EventRequest("id123", "2018-09-19T20:42Z", "tenant1", "Payload1").json()) } override fun process(cr: ConsumerRecord): ListenableFuture { println("got a new record: ${cr.value()}") val er = EventResponse::class.java.fromJson(cr.value()) val cr2 = ConsumerRecord(cr.topic(), cr.partition(), cr.offset(), cr.key(), er.run { EventRequest(id, eventTime, tenant, payload).json() }) impl.process(cr2).get() counter.incrementAndGet() if (counter.get() == 10) KafkaTests.latch.countDown() return Futures.immediateFuture(cr) } } ================================================ FILE: app/src/test/resources/bigben-api-test.yaml ================================================ # top level modules modules: - name: domain class: com.walmartlabs.bigben.providers.domain.cassandra.CassandraModule - name: processors object: com.walmartlabs.bigben.processors.ProcessorRegistry - name: hz class: com.walmartlabs.bigben.utils.hz.Hz - name: scheduler object: com.walmartlabs.bigben.SchedulerModule - name: events object: com.walmartlabs.bigben.EventModule - name: messaging object: com.walmartlabs.bigben.kafka.KafkaModule enabled: false - name: cron object: com.walmartlabs.bigben.cron.CronRunner enabled: false # hazelcast properties hz: template: file://hz.template.xml group: name: bigben-dev password: bigben-dev network: autoIncrementPort: true members: 127.0.0.1 port: 5701 map: store: writeDelay: 30 # cassandra related properties cassandra: keyspace: bigben cluster: contactPoints: 127.0.0.1 clusterName: bigben-cluster port: 9042 localDataCenter: null coreConnectionsPerHost: 8 maxHostsPerConnection: 32768 keepTCPConnectionAlive: true connectionTimeOut: 5000 readTimeout: 12000 reconnectPeriod: 5 username: null password: null downgradingConsistency: false writeConsistency: "LOCAL_QUORUM" readConsistency: "LOCAL_QUORUM" # kafka related properties kafka: consumers: - num.consumers: 8 processor.class: com.walmartlabs.bigben.kafka.ProcessorImpl topics: null max.poll.wait.time: 10000 message.retry.max.count: 10 unknown.exception.retries: 3 config: key.deserializer: org.apache.kafka.common.serialization.StringDeserializer value.deserializer: org.apache.kafka.common.serialization.StringDeserializer bootstrap.servers: null #fetch.min.bytes: 1 group.id: bigben-inbound #heartbeat.interval.ms: 3000 session.timeout.ms: 30000 auto.offset.reset: earliest fetch.max.bytes: 324000 max.poll.interval.ms: 30000 max.poll.records: 100 receive.buffer.bytes: 65536 request.timeout.ms: 60000 #send.buffer.bytes: 131072 enable.auto.commit: false producer: config: # this is default kafka producer config, these values will be used if not supplied during the tenant registration key.serializer: org.apache.kafka.common.serialization.StringSerializer value.serializer: org.apache.kafka.common.serialization.StringSerializer acks: "1" buffer.memory: 32400 retries: 3 # system properties task: executor: #retry.thread.count: 8 retry.time.units: SECONDS delay: 1 max.retries: 3 backoff.multiplier: 2 # scheduler / event related properties events: scheduler.enabled: true schedule.scan.interval.minutes: 1 num.shard.submitters: 8 receiver: shard.size: 1000 lapse.offset.minutes: 0 delete: max.retries: 3 initial.delay: 1 backoff.multiplier: 1 submit: initial.delay: 1 backoff.multiplier: 1 max.retries: 3 processor: max.retries: 3 initial.delay: 1 backoff.multiplier: 2 eager.loading: true tasks: max.events.in.memory: 100000 scheduler.worker.threads: 8 # bucket manager / loader related properties buckets: backlog.check.limit: 300 background: load.fetch.size: 100 load.wait.interval.seconds: 15 # cron related properties cron: runner: core.pool.size: 8 load: max.retries: 10 delay: 1 backoff.multiplier: 1 time.units: "SECONDS" messaging.producer.factory.class: com.walmartlabs.bigben.kafka.MockMessageProducerFactory generic.future.max.get.time: 60 ================================================ FILE: app/src/test/resources/bigben-kafka-test.yaml ================================================ # top level modules modules: - name: domain class: com.walmartlabs.bigben.providers.domain.cassandra.CassandraModule - name: processors object: com.walmartlabs.bigben.processors.ProcessorRegistry - name: hz class: com.walmartlabs.bigben.utils.hz.Hz - name: scheduler object: com.walmartlabs.bigben.SchedulerModule - name: events object: com.walmartlabs.bigben.EventModule - name: kafka object: com.walmartlabs.bigben.kafka.KafkaModule # hazelcast properties hz: template: /hz.template.xml group: name: bigben-dev password: bigben-dev network: autoIncrementPort: true members: 127.0.0.1 port: 5701 map: store: writeDelay: 30 # cassandra related properties cassandra: keyspace: bigben cluster: contactPoints: 127.0.0.1 clusterName: bigben-cluster port: 9042 localDataCenter: null coreConnectionsPerHost: 8 maxHostsPerConnection: 32768 keepTCPConnectionAlive: true connectionTimeOut: 5000 readTimeout: 12000 reconnectPeriod: 5 username: null password: null downgradingConsistency: false writeConsistency: "LOCAL_QUORUM" readConsistency: "LOCAL_QUORUM" # system properties task: executor: #retry.thread.count: 8 retry.time.units: SECONDS delay: 1 max.retries: 3 backoff.multiplier: 2 # kafka related properties kafka: consumers: - num.consumers: 1 processor.impl.class: com.walmartlabs.bigben.tests.MockProcessorImpl topics: topic1 max.poll.wait.time: 10000 message.retry.max.count: 10 unknown.exception.retries: 3 config: key.deserializer: org.apache.kafka.common.serialization.StringDeserializer value.deserializer: org.apache.kafka.common.serialization.StringDeserializer bootstrap.servers: localhost:9092 #fetch.min.bytes: 1 group.id: bigben-kafka-test #heartbeat.interval.ms: 3000 session.timeout.ms: 30000 auto.offset.reset: earliest fetch.max.bytes: 324000 max.poll.interval.ms: 30000 max.poll.records: 100 receive.buffer.bytes: 65536 request.timeout.ms: 60000 #send.buffer.bytes: 131072 enable.auto.commit: false producer: config: # this is default kafka producer config, these values will be used if not supplied during the tenant registration key.serializer: org.apache.kafka.common.serialization.StringSerializer value.serializer: org.apache.kafka.common.serialization.StringSerializer acks: "1" buffer.memory: 32400 retries: 3 messaging.producer.factory.class: com.walmartlabs.bigben.kafka.KafkaMessageProducerFactory generic.future.max.get.time: 60 # scheduler / event related properties events: scheduler.enabled: false schedule.scan.interval.minutes: 1 num.shard.submitters: 8 receiver: shard.size: 10 lapse.offset.minutes: 0 delete: max.retries: 3 initial.delay: 1 backoff.multiplier: 1 submit: initial.delay: 1 backoff.multiplier: 1 max.retries: 3 processor: max.retries: 3 initial.delay: 1 backoff.multiplier: 2 eager.loading: true tasks: max.events.in.memory: 100000 scheduler.worker.threads: 8 # bucket manager / loader related properties buckets: backlog.check.limit: 1 background: load.fetch.size: 10 load.wait.interval.seconds: 1 checkpoint: interval: 60 interval.units: SECONDS ================================================ FILE: app/src/test/resources/bigben-test.yaml ================================================ events: receiver: shard.size: 10 processor.eager.loading: false ================================================ FILE: app/src/test/resources/log4j.xml ================================================ ================================================ FILE: build/configs/log4j.xml ================================================ ================================================ FILE: build/configs/overrides.yaml ================================================ hz: network.members: ${HZ_MEMBER_IPS} cassandra.cluster.contactPoints: ${CASSANDRA_SEED_IPS} bigben.inbound.topic: name: some_topic bootstrap.servers: some_servers #events.scheduler.enabled: false ================================================ FILE: build/docker/Dockerfile ================================================ FROM openjdk:8-jre-alpine ENV APPLICATION_USER bigben RUN adduser -D -g '' $APPLICATION_USER ENV APP_ROOT /dist RUN if [ -d "$APP_ROOT" ]; then rm -Rf $APP_ROOT; fi RUN mkdir $APP_ROOT RUN chown -R $APPLICATION_USER $APP_ROOT USER $APPLICATION_USER COPY ./build/bin/bigben.jar $APP_ROOT/bigben.jar COPY ./build/docker/start.sh $APP_ROOT/start.sh USER root RUN chmod +x $APP_ROOT/start.sh WORKDIR $APP_ROOT EXPOSE 8080 5701 CMD ["sh", "-c", "$APP_ROOT/start.sh"] ================================================ FILE: build/docker/app_run.sh ================================================ #!/usr/bin/env bash set -e APP_CONTAINER_NAME=${APP_CONTAINER_NAME:-bigben_app} SERVER_PORT=${SERVER_PORT:-8080} HZ_PORT=5701 NUM_INSTANCES=${NUM_INSTANCES:-1} APP_ROOT=/dist BUILD_DIR=${PWD}/.. LOGS_DIR=${LOGS_DIR:-${BUILD_DIR}/../../bigben_logs} HOST_IP=${HOST_IP:-`ifconfig | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*' | grep -v '127.0.0.1'`} CASSANDRA_SEED_IPS=${CASSANDRA_SEED_IPS:-${HOST_IP}} HZ_MEMBER_IPS=${HZ_MEMBER_IPS:-${HOST_IP}} DEFAULT_JAVA_OPTS="-server -XX:+UnlockExperimentalVMOptions -XX:InitialRAMFraction=2 -XX:MinRAMFraction=2 -XX:+UseG1GC -XX:MaxGCPauseMillis=100 -XX:+UseStringDeduplication" JAVA_OPTS=${JAVA_OPTS} if [[ "x${JAVA_OPTS}" != "x" ]]; then JAVA_OPTS="${DEFAULT_JAVA_OPTS} ${JAVA_OPTS}" else JAVA_OPTS="${DEFAULT_JAVA_OPTS}" fi echo HOST_IP: ${HOST_IP}, SERVER_PORT: ${SERVER_PORT}, \ HZ_MEMBER_IPS: ${HZ_MEMBER_IPS}, CASSANDRA_SEED_IPS: ${CASSANDRA_SEED_IPS}, \ HZ_PORT: ${HZ_PORT}, NUM_INSTANCES: ${NUM_INSTANCES} function stop() { echo "stopping app servers, if any" i=1 while [[ ${i} -lt $(($NUM_INSTANCES + 1)) ]]; do app_port=$((${SERVER_PORT} + 101 * $((i - 1)))) echo "stopping ${APP_CONTAINER_NAME}_$app_port" docker stop "${APP_CONTAINER_NAME}_$app_port" || true let i=i+1 done } function start() { echo "starting ${NUM_INSTANCES} app node(s)" i=1 while [[ ${i} -lt $(($NUM_INSTANCES + 1)) ]]; do app_port=$((${SERVER_PORT} + 101 * $((i - 1)))) hz_port=$((${HZ_PORT} + i - 1)) echo "starting ${APP_CONTAINER_NAME}_$app_port at app port: $app_port, hz port: $hz_port" docker run -d --rm \ -p ${app_port}:${SERVER_PORT} \ -p ${hz_port}:${HZ_PORT} \ -v ${BUILD_DIR}/bin/bigben.yaml:${APP_ROOT}/bigben.yaml \ -v ${BUILD_DIR}/configs/overrides.yaml:${APP_ROOT}/overrides.yaml \ -v ${BUILD_DIR}/configs/log4j.xml:${APP_ROOT}/log4j.xml \ -v ${LOGS_DIR}:${APP_ROOT}/logs \ -e HOST_IP="${HOST_IP}" \ -e CASSANDRA_SEED_IPS="${CASSANDRA_SEED_IPS}" \ -e HZ_MEMBER_IPS="${HZ_MEMBER_IPS}" \ -e JAVA_OPTS="${JAVA_OPTS} -Dbigben.configs=uri://${APP_ROOT}/overrides.yaml,uri://${APP_ROOT}/bigben.yaml \ -Dapp.server.port=${SERVER_PORT} \ -Dbigben.log.file=${APP_ROOT}/logs/bigben_app_${app_port}.log \ -Dbigben.log.config=${APP_ROOT} \ -Dhazelcast.local.publicAddress=${HOST_IP}:${hz_port}" \ --name "${APP_CONTAINER_NAME}_$app_port" sandeepmalik/bigben:1 let i=i+1 done echo "waiting for app servers to boot up" i=1 while [[ ${i} -lt $(($NUM_INSTANCES + 1)) ]]; do app_server_docker_ip=`docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "${APP_CONTAINER_NAME}_$app_port"` echo "waiting for app server ${APP_CONTAINER_NAME}_$app_port, docker ip: $app_server_docker_ip" docker run --rm dadarek/wait-for-dependencies ${app_server_docker_ip}:${SERVER_PORT} let i=i+1 done } if [[ $1 == "start" ]]; then start elif [[ $1 == "stop" ]]; then stop else stop start fi ================================================ FILE: build/docker/cassandra_run.sh ================================================ #!/usr/bin/env bash set -e CASSANDRA_CONTAINER_NAME=${CASSANDRA_CONTAINER_NAME:-bigben_cassandra} CASSANDRA_PORT=${CASSANDRA_PORT:-9042} CASSANDRA_GOSSIP_PORT=${CASSANDRA_GOSSIP_PORT:-7000} HOST_IP=${HOST_IP:-`ifconfig | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*' | grep -v '127.0.0.1'`} echo "determined host ip: $HOST_IP" echo "stopping ${CASSANDRA_CONTAINER_NAME}, if running" docker stop ${CASSANDRA_CONTAINER_NAME} || true echo "starting ${CASSANDRA_CONTAINER_NAME}" docker run -d --rm \ -p ${CASSANDRA_PORT}:${CASSANDRA_PORT} \ -e CASSANDRA_BROADCAST_ADDRESS=${HOST_IP} \ -p ${CASSANDRA_GOSSIP_PORT}:${CASSANDRA_GOSSIP_PORT} \ -v ${PWD}/../bin/bigben-schema.cql:/tmp/bigben-schema.cql \ --name ${CASSANDRA_CONTAINER_NAME} cassandra CASSANDRA_DOCKER_IP=`docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' ${CASSANDRA_CONTAINER_NAME}` echo "${CASSANDRA_CONTAINER_NAME} docker ip: ${CASSANDRA_DOCKER_IP}" echo "waiting for ${CASSANDRA_CONTAINER_NAME} to boot up" docker run --rm dadarek/wait-for-dependencies ${CASSANDRA_DOCKER_IP}:${CASSANDRA_PORT} echo "creating bigben schema" docker exec -it ${CASSANDRA_CONTAINER_NAME} cqlsh -f /tmp/bigben-schema.cql ================================================ FILE: build/docker/cleanup.sh ================================================ #!/usr/bin/env bash docker stop $(docker ps -a -q) docker rm $(docker ps -a -q) ================================================ FILE: build/docker/deploy.sh ================================================ #!/usr/bin/env bash set -e ./docker_build.sh docker push sandeepmalik/bigben:1 ================================================ FILE: build/docker/docker-compose.yml ================================================ version: '3' services: cassandra: image: cassandra:3 container_name: cassandra hostname: cassandra restart: on-failure volumes: - ../cassandra/src/main/resources/bigben-schema.cql:/tmp/bigben-schema.cql bigben: image: sandeepmalik/bigben:1 hostname: bigben container_name: bigben volumes: - ../app/src/main/resources/bigben.yaml:/dist/bigben-config.yaml - ./configs/overrides.yaml:/dist/bigben-overrides.yaml - ./configs/log4j.xml:/dist/log4j-overrides.xml - ./configs/hz.xml:/dist/hz.xml setup_cassandra: image: dadarek/wait-for-dependencies container_name: setup_cassandra depends_on: - cassandra command: cassandra:9042 ================================================ FILE: build/docker/docker_build.sh ================================================ #!/usr/bin/env bash set -e ../exec/build.sh cd ../.. docker build -f build/docker/Dockerfile -t sandeepmalik/bigben:1 . cd build/docker ================================================ FILE: build/docker/single_node_run.sh ================================================ #!/usr/bin/env bash set -e ./cassandra_run.sh export NUM_INSTANCES=1 ./app_run.sh ================================================ FILE: build/docker/start.sh ================================================ #!/bin/sh DEFAULT_JAVA_OPTS="-server -XX:+UnlockExperimentalVMOptions -XX:InitialRAMFraction=2 -XX:MinRAMFraction=2 -XX:+UseG1GC -XX:MaxGCPauseMillis=100 -XX:+UseStringDeduplication" if [[ "x${JAVA_OPTS}" != "x" ]]; then export JAVA_OPTS="${DEFAULT_JAVA_OPTS} ${JAVA_OPTS}"; else export JAVA_OPTS="${DEFAULT_JAVA_OPTS}"; fi echo "using JAVA_OPTS: ${JAVA_OPTS}" java ${JAVA_OPTS} -jar bigben.jar ================================================ FILE: build/exec/app_run.sh ================================================ #!/usr/bin/env bash export HOST_IP=${HOST_IP:-`ifconfig | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*' | grep -v '127.0.0.1'`} export SERVER_PORT=${SERVER_PORT:-8080} APP_ROOT=${PWD}/../configs export HZ_MEMBER_IPS=${HZ_MEMBER_IPS:-${HOST_IP}} export CASSANDRA_SEED_IPS=${CASSANDRA_SEED_IPS:-${HOST_IP}} export LOGS_DIR=${LOGS_DIR:-${APP_ROOT}/../../../bigben_logs} NUM_INSTANCES=${NUM_INSTANCES:-1} HZ_PORT=${HZ_PORT:-5701} echo HOST_IP: ${HOST_IP}, SERVER_PORT: ${SERVER_PORT}, \ HZ_MEMBER_IPS: ${HZ_MEMBER_IPS}, CASSANDRA_SEED_IPS: ${CASSANDRA_SEED_IPS}, \ HZ_PORT: ${HZ_PORT}, NUM_INSTANCES: ${NUM_INSTANCES} DEFAULT_JAVA_OPTS="-server -XX:+UnlockExperimentalVMOptions -XX:InitialRAMFraction=2 -XX:MinRAMFraction=2 -XX:+UseG1GC -XX:MaxGCPauseMillis=100 -XX:+UseStringDeduplication" JAVA_OPTS=${JAVA_OPTS} echo "using JAVA_OPTS: ${JAVA_OPTS}" if [[ "x${JAVA_OPTS}" != "x" ]]; then export JAVA_OPTS="${DEFAULT_JAVA_OPTS} ${JAVA_OPTS}" else export JAVA_OPTS="${DEFAULT_JAVA_OPTS}" fi echo "starting ${NUM_INSTANCES} app node(s)" i=1 while [[ ${i} -lt $(($NUM_INSTANCES + 1)) ]]; do app_port=$((${SERVER_PORT} + 101 * $((i - 1)))) hz_port=$((${HZ_PORT} + i - 1)) echo "starting node $i at app port: $app_port, hz port: $hz_port, logs: ${LOGS_DIR}/bigben_app_${app_port}.log" LOG_FILE="${LOGS_DIR}/bigben_app_${app_port}.log" java ${JAVA_OPTS} \ -Dbigben.log.config=${APP_ROOT}/log4j.xml \ -Dbigben.log.file=${LOG_FILE} \ -Dapp.server.port=${app_port} \ -Dbigben.configs="uri://${APP_ROOT}/overrides.yaml,uri://${APP_ROOT}/../bin/bigben.yaml" \ -Dhz.network.port=${hz_port} \ -jar ../bin/bigben.jar > /dev/null & if [[ ${NUM_INSTANCES} == 1 ]]; then tail -f ${LOG_FILE} fi let i=i+1 done ================================================ FILE: build/exec/build.sh ================================================ #!/usr/bin/env bash set -e cd ../.. mvn clean install rm -rf build/bin || true mkdir build/bin cp app/target/bigben.jar build/bin/ cp cassandra/src/main/resources/bigben-schema.cql ./build/bin/bigben-schema.cql cp app/src/main/resources/bigben.yaml ./build/bin/bigben.yaml ================================================ FILE: build/exec/cleanup.sh ================================================ #!/usr/bin/env bash set -e ps aux | grep bigben.jar | grep -v grep | awk '{print $2}' | xargs kill -9 ================================================ FILE: cassandra/LICENSE.txt ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2018 Sandeep Malik Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: cassandra/pom.xml ================================================ 4.0.0 bigben com.walmartlabs.bigben 1.0.7-SNAPSHOT bigben-cassandra takari-jar BigBen:cassandra 3.2.0 com.walmartlabs.bigben bigben-lib com.datastax.cassandra cassandra-driver-core ${cassandra-driver.version} com.datastax.cassandra cassandra-driver-mapping ${cassandra-driver.version} org.testng testng test org.jetbrains.kotlin kotlin-maven-plugin org.apache.maven.plugins maven-surefire-plugin ${project.build.directory} ================================================ FILE: cassandra/src/main/kotlin/com/walmartlabs/bigben/providers/domain/cassandra/CassandraModule.kt ================================================ /*- * #%L * BigBen:cassandra * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.providers.domain.cassandra import com.datastax.driver.core.* import com.datastax.driver.core.HostDistance.LOCAL import com.datastax.driver.core.HostDistance.REMOTE import com.datastax.driver.core.policies.* import com.datastax.driver.mapping.Mapper import com.datastax.driver.mapping.Mapper.Option.consistencyLevel import com.datastax.driver.mapping.Mapper.Option.saveNullFields import com.datastax.driver.mapping.MappingManager import com.google.common.util.concurrent.ListenableFuture import com.walmartlabs.bigben.entities.* import com.walmartlabs.bigben.extns.nowUTC import com.walmartlabs.bigben.utils.commons.Module import com.walmartlabs.bigben.utils.commons.ModuleRegistry import com.walmartlabs.bigben.utils.commons.Props.map import com.walmartlabs.bigben.utils.commons.Props.string import com.walmartlabs.bigben.utils.fromJson import com.walmartlabs.bigben.utils.json import com.walmartlabs.bigben.utils.logger import com.walmartlabs.bigben.utils.transform import java.time.ZonedDateTime /** * Created by smalik3 on 3/2/18 */ open class CassandraModule : EntityProvider, ClusterFactory, EventLoader, Module { companion object { private val l = logger>() private val cluster: Cluster val mappingManager: MappingManager private val loaderQuery: PreparedStatement private val kvAllQuery: PreparedStatement private val session: Session private val clusterConfig = ClusterConfig::class.java.fromJson(map("cassandra.cluster").json()) private val writeConsistency = consistencyLevel(clusterConfig.writeConsistency) private val readConsistency = consistencyLevel(clusterConfig.readConsistency) init { l.info("initialing the Cassandra module") cluster = (Class.forName(string("domain.cluster.factory.class", CassandraModule::class.java.name)).newInstance() as ClusterFactory).create() session = cluster.connect(string("cassandra.keyspace")) mappingManager = MappingManager(session) loaderQuery = mappingManager.session.prepare("SELECT * FROM ${session.loggedKeyspace}.events WHERE bucket_id = ? AND shard = ? AND (event_time, id) > (?,?) LIMIT ?;") kvAllQuery = mappingManager.session.prepare("SELECT * FROM ${session.loggedKeyspace}.kv_table WHERE key = ?;") } } override fun init(registry: ModuleRegistry) { } @Suppress("UNCHECKED_CAST") override fun selector(type: Class): T { return when (type) { Event::class.java -> EventC() as T Bucket::class.java -> BucketC() as T EventLookup::class.java -> EventLookupC() as T KV::class.java -> KVC() as T else -> throw IllegalArgumentException("unknown entity $type") } } override fun raw(selector: T) = selector override fun kvs(selector: KV): ListenableFuture> { require(selector.key != null) { "key must be provided" } return session.executeAsync(kvAllQuery.bind(selector.key)).transform { it?.run { mappingManager.mapper(KVC::class.java).map(this).map { it } } ?: emptyList() } } override fun fetch(selector: T): ListenableFuture { return mappingManager.mapper(selector::class.java).let { when (selector) { is EventC -> { require( selector.eventTime != null && selector.id != null && selector.shard != null && selector.shard!! >= 0 ) { "event keys not provided: $selector" } it.getAsync(selector.bucketId, selector.shard, selector.eventTime, selector.id, readConsistency).transform { it } } is BucketC -> { require(selector.bucketId != null) { "bucket id not provided: $selector" } it.getAsync(selector.bucketId, readConsistency).transform { it } } is EventLookupC -> { require(selector.tenant != null && selector.xrefId != null) { "look up keys not provided: $selector" } it.getAsync(selector.tenant, selector.xrefId, readConsistency).transform { it } } is KVC -> { require(selector.key != null && selector.column != null) { "kv keys not provided: $selector" } it.getAsync(selector.key, selector.column, readConsistency).transform { it } } else -> throw IllegalArgumentException("unknown selector: $selector") } }.apply { transform { if (l.isDebugEnabled) l.debug("fetched entity: {}", it) } } } override fun save(selector: T): ListenableFuture { return mappingManager.mapper(selector::class.java).let { @Suppress("UNCHECKED_CAST") val m = it as Mapper when (selector) { is EventC -> { require( selector.eventTime != null && selector.id != null && selector.bucketId != null && selector.shard != null && selector.shard!! >= 0 ) { "event keys not provided: $selector" } } is BucketC -> { require(selector.bucketId != null) { "bucket id not provided: $selector" } } is EventLookupC -> { require(selector.tenant != null && selector.xrefId != null) { "look up keys not provided: $selector" } selector.lastModified = nowUTC() } is KVC -> { require(selector.key != null && selector.column != null) { "kv keys not provided: $selector" } selector.lastModified = nowUTC() } else -> throw IllegalArgumentException("unknown selector: $selector") } if (l.isDebugEnabled) l.debug("saving entity {}", selector) m.saveAsync(selector, saveNullFields(false), writeConsistency).transform { _ -> if (l.isDebugEnabled) l.debug("saved entity {}", selector); selector } } } override fun remove(selector: T): ListenableFuture { return mappingManager.mapper(selector::class.java).let { @Suppress("UNCHECKED_CAST") val m = it as Mapper when (selector) { is EventC -> { require( selector.eventTime != null && selector.id != null && selector.shard != null && selector.shard!! >= 0 ) { "event keys not provided: $selector" } } is BucketC -> { require(selector.bucketId != null) { "bucket id not provided: $selector" } } is EventLookupC -> { require(selector.tenant != null && selector.xrefId != null) { "look up keys not provided: $selector" } } is KVC -> { require(selector.key != null && selector.column != null) { "kv keys not provided: $selector" } } else -> throw IllegalArgumentException("unknown selector: $selector") } if (l.isDebugEnabled) l.debug("deleting entity: {}", selector) m.deleteAsync(selector, writeConsistency).transform { _ -> if (l.isDebugEnabled) l.debug("deleted entity {}", selector); selector } } } override fun create(): Cluster { return Cluster.builder() .withCodecRegistry(CodecRegistry().register(EnumCodec(EventStatus.values().toSet())).register(ZdtCodec())) .withClusterName(clusterConfig.clusterName) .withPort(clusterConfig.port) .also { clusterConfig.compression?.run { it.withCompression(ProtocolOptions.Compression.valueOf(this)) } } .withRetryPolicy(if (clusterConfig.downgradingConsistency) DowngradingConsistencyRetryPolicy.INSTANCE else DefaultRetryPolicy.INSTANCE) .also { clusterConfig.localDataCenter?.run { it.withLoadBalancingPolicy(TokenAwarePolicy(DCAwareRoundRobinPolicy.builder().withLocalDc(this).withUsedHostsPerRemoteDc(0).build())) } } .withReconnectionPolicy(ConstantReconnectionPolicy(clusterConfig.reconnectPeriod)) .withSocketOptions(SocketOptions().apply { connectTimeoutMillis = clusterConfig.connectionTimeOut readTimeoutMillis = clusterConfig.readTimeout keepAlive = clusterConfig.keepTCPConnectionAlive }) .withPoolingOptions(PoolingOptions().apply { clusterConfig.apply { setConnectionsPerHost(LOCAL, coreConnectionsPerLocalHost, maxConnectionsPerLocalHost) setConnectionsPerHost(REMOTE, coreConnectionsPerRemoteHost, maxConnectionsPerRemoteHost) } heartbeatIntervalSeconds = 60 }) .also { clusterConfig.username?.run { it.withCredentials(this, clusterConfig.password) } } .addContactPoints(*clusterConfig.contactPoints.split(",").toTypedArray()) .apply { decorate(this) } .build() } protected open fun decorate(builder: Cluster.Builder) { } override fun unwrap() = session override fun load(bucketId: ZonedDateTime, shard: Int, fetchSize: Int, eventTime: ZonedDateTime, eventId: String, context: Any?): ListenableFuture>> { return mappingManager.session.executeAsync(loaderQuery.bind(bucketId, shard, eventTime, eventId, fetchSize)).transform { null to mappingManager.mapper(EventC::class.java).map(it!!).toList() } } } ================================================ FILE: cassandra/src/main/kotlin/com/walmartlabs/bigben/providers/domain/cassandra/ClusterConfig.kt ================================================ /*- * #%L * BigBen:cassandra * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.providers.domain.cassandra import com.datastax.driver.core.Cluster import com.datastax.driver.core.ConsistencyLevel import com.datastax.driver.core.ConsistencyLevel.LOCAL_ONE import com.datastax.driver.core.SocketOptions.DEFAULT_CONNECT_TIMEOUT_MILLIS import com.datastax.driver.core.SocketOptions.DEFAULT_READ_TIMEOUT_MILLIS /** * Created by smalik3 on 3/2/18 */ data class ClusterConfig( val clusterName: String = "bigben", val contactPoints: String, val port: Int = 9042, val localDataCenter: String?, val compression: String?, val keepTCPConnectionAlive: Boolean = true, val coreConnectionsPerLocalHost: Int = 1, val maxConnectionsPerLocalHost: Int = 1, val coreConnectionsPerRemoteHost: Int = 1, val maxConnectionsPerRemoteHost: Int = 1, val maxRequestsPerLocalConnection: Int = 32768, val maxRequestsPerRemoteConnection: Int = 2048, val newLocalConnectionThreshold: Int = 3000, val newRemoteConnectionThreshold: Int = 400, val poolTimeoutMillis: Int = 0, val connectionTimeOut: Int = DEFAULT_CONNECT_TIMEOUT_MILLIS, val readTimeout: Int = DEFAULT_READ_TIMEOUT_MILLIS, val reconnectPeriod: Long = 5L, val username: String?, val password: String?, val downgradingConsistency: Boolean = false, val writeConsistency: ConsistencyLevel = LOCAL_ONE, val readConsistency: ConsistencyLevel = LOCAL_ONE ) interface ClusterFactory { fun create(): Cluster } ================================================ FILE: cassandra/src/main/kotlin/com/walmartlabs/bigben/providers/domain/cassandra/Entities.kt ================================================ /*- * #%L * BigBen:cassandra * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.providers.domain.cassandra import com.datastax.driver.mapping.annotations.* import com.hazelcast.nio.ObjectDataInput import com.hazelcast.nio.ObjectDataOutput import com.walmartlabs.bigben.entities.* import com.walmartlabs.bigben.extns.utc import com.walmartlabs.bigben.hz.HzObjectFactory.Companion.BIGBEN_FACTORY_ID import com.walmartlabs.bigben.hz.HzObjectFactory.ObjectId.BUCKET import java.time.ZonedDateTime import java.util.* /** * Created by smalik3 on 2/26/18 */ @Table(name = "buckets") data class BucketC(@PartitionKey @Column(name = "id") override var bucketId: ZonedDateTime? = null, override var status: EventStatus? = null, override var count: Long? = null, @Column(name = "processed_at") override var processedAt: ZonedDateTime? = null, @Column(name = "modified_at") override var updatedAt: ZonedDateTime? = null, @Column(name = "failed_shards", codec = FailedShardsCodec::class) override var failedShards: Set? = null) : Bucket { @Transient override fun getFactoryId() = BIGBEN_FACTORY_ID @Transient override fun getId() = BUCKET.ordinal override fun writeData(out: ObjectDataOutput) { BitSet(4).apply { set(0, bucketId != null) set(1, status != null) set(2, count != null) set(3, processedAt != null) set(4, updatedAt != null) }.also { out.writeByteArray(it.toByteArray()) }.apply { if (get(0)) out.writeLong(bucketId!!.toInstant().toEpochMilli()) if (get(1)) out.writeByte(status!!.ordinal) if (get(2)) out.writeLong(count!!) if (get(3)) out.writeLong(processedAt!!.toInstant().toEpochMilli()) if (get(4)) out.writeLong(updatedAt!!.toInstant().toEpochMilli()) } } override fun readData(ins: ObjectDataInput) { BitSet.valueOf(ins.readByteArray()).apply { if (get(0)) bucketId = utc(ins.readLong()) if (get(1)) status = EventStatus.values()[ins.readByte().toInt()] if (get(2)) count = ins.readLong() if (get(3)) processedAt = utc(ins.readLong()) if (get(4)) updatedAt = utc(ins.readLong()) } } } @Table(name = "events") data class EventC(@ClusteringColumn @Column(name = "event_time") override var eventTime: ZonedDateTime? = null, @ClusteringColumn(1) override var id: String? = null, @PartitionKey @Column(name = "bucket_id") override var bucketId: ZonedDateTime? = null, @PartitionKey(1) override var shard: Int? = null, override var status: EventStatus? = null, override var error: String? = null, override var tenant: String? = null, @Column(name = "xref_id") override var xrefId: String? = null, @Column(name = "processed_at") override var processedAt: ZonedDateTime? = null, override var payload: String? = null, @Transient override var eventResponse: EventResponse? = null, @Transient override var deliveryOption: EventDeliveryOption? = null) : Event @Table(name = "lookups") data class EventLookupC(@PartitionKey override var tenant: String? = null, @PartitionKey(1) @Column(name = "xref_id") override var xrefId: String? = null, @Column(name = "bucket_id") override var bucketId: ZonedDateTime? = null, override var shard: Int? = null, @Column(name = "event_time") override var eventTime: ZonedDateTime? = null, @Column(name = "event_id") override var eventId: String? = null, override var payload: String? = null, @Column(name = "l_m") var lastModified: ZonedDateTime? = null) : EventLookup @Table(name = "kv_table") data class KVC(@PartitionKey override var key: String? = null, @ClusteringColumn override var column: String? = null, override var value: String? = null, @Column(name = "l_m") var lastModified: ZonedDateTime? = null ) : KV ================================================ FILE: cassandra/src/main/kotlin/com/walmartlabs/bigben/providers/domain/cassandra/codecs.kt ================================================ /*- * #%L * BigBen:cassandra * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.providers.domain.cassandra import com.datastax.driver.core.DataType import com.datastax.driver.core.ProtocolVersion import com.datastax.driver.core.TypeCodec import com.google.common.reflect.TypeToken import com.walmartlabs.bigben.extns.utc import com.walmartlabs.bigben.utils.json import com.walmartlabs.bigben.utils.typeRefJson import java.nio.ByteBuffer import java.time.ZonedDateTime /** * Created by smalik3 on 3/2/18 */ class EnumCodec>(values: Set) : TypeCodec(DataType.varchar(), @Suppress("UNCHECKED_CAST") (values.first()::class.java as Class)) { private val forward = values.associate { it.name to it } override fun format(value: T) = value.name override fun parse(value: String?) = value?.let { forward[it] } override fun serialize(value: T?, protocolVersion: ProtocolVersion?) = value?.let { ByteBuffer.wrap(format(it).toByteArray()) } override fun deserialize(bytes: ByteBuffer?, protocolVersion: ProtocolVersion?) = bytes?.let { parse(String(bytes.duplicate().array())) } } class ZdtCodec : TypeCodec(DataType.timestamp(), ZonedDateTime::class.java) { override fun format(value: ZonedDateTime?) = value?.toInstant()?.toEpochMilli()?.toString() override fun parse(value: String?): ZonedDateTime? = value?.let { utc(it.toLong()) } override fun serialize(value: ZonedDateTime?, protocolVersion: ProtocolVersion?) = value?.let { ByteBuffer.allocate(8).apply { asLongBuffer().put(value.toInstant().toEpochMilli()) } } override fun deserialize(bytes: ByteBuffer?, protocolVersion: ProtocolVersion?) = bytes?.let { utc(bytes.duplicate().asLongBuffer().get()) } } class FailedShardsCodec : TypeCodec>(DataType.text(), object : TypeToken>() {}) { override fun format(value: Set?) = value?.json() override fun parse(value: String?): Set? = value?.let { typeRefJson>(it) } override fun serialize(value: Set?, protocolVersion: ProtocolVersion?) = value?.let { ByteBuffer.wrap(it.json().toByteArray()) } override fun deserialize(bytes: ByteBuffer?, protocolVersion: ProtocolVersion?) = bytes?.let { typeRefJson>(String(it.array())) } } ================================================ FILE: cassandra/src/main/resources/bigben-schema.cql ================================================ -- DROP KEYSPACE IF EXISTS bigben; CREATE KEYSPACE IF NOT EXISTS bigben WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }; -- DROP TABLE IF EXISTS bigben.buckets; CREATE TABLE IF NOT EXISTS bigben.buckets ( id timestamp PRIMARY KEY, count bigint, failed_shards text, modified_at timestamp, processed_at timestamp, status text ); -- DROP TABLE IF EXISTS bigben.lookups; CREATE TABLE IF NOT EXISTS bigben.lookups ( tenant text, xref_id text, bucket_id timestamp, event_id text, event_time timestamp, l_m timestamp, payload text, shard int, PRIMARY KEY ((tenant, xref_id)) ); -- DROP TABLE IF EXISTS bigben.events; CREATE TABLE IF NOT EXISTS bigben.events ( bucket_id timestamp, shard int, event_time timestamp, id text, error text, payload text, processed_at timestamp, status text, tenant text, xref_id text, PRIMARY KEY ((bucket_id, shard), event_time, id) ) WITH CLUSTERING ORDER BY (event_time ASC, id ASC); -- DROP TABLE IF EXISTS bigben.kv_table; CREATE TABLE IF NOT EXISTS bigben.kv_table ( key text, column text, l_m timestamp, value text, PRIMARY KEY (key, column) ) WITH CLUSTERING ORDER BY (column ASC); ================================================ FILE: cassandra/src/test/kotlin/com/walmartlabs/bigben/cassandra/tests/IntegrationTests.kt ================================================ /*- * #%L * BigBen:cassandra * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.cassandra.tests import com.datastax.driver.core.Session import com.walmartlabs.bigben.BigBen import com.walmartlabs.bigben.BigBen.module import com.walmartlabs.bigben.entities.* import com.walmartlabs.bigben.extns.bucket import com.walmartlabs.bigben.extns.fetch import com.walmartlabs.bigben.extns.nowUTC import com.walmartlabs.bigben.extns.save import org.testng.annotations.BeforeMethod import org.testng.annotations.Test import kotlin.test.assertEquals import kotlin.test.assertTrue /** * Created by smalik3 on 4/12/18 */ class IntegrationTests { companion object { init { System.setProperty("bigben.configs", "file://bigben-test.yaml") BigBen.init() } } @BeforeMethod private fun `clean up db`() { println("cleaning up the db") try { (module>().unwrap() as Session).apply { execute("truncate bigben.events;") execute("truncate bigben.lookups;") execute("truncate bigben.buckets;") execute("truncate bigben.kv_table;") } } catch (e: Throwable) { e.printStackTrace() throw e } } @Test fun `test bucket`() { val nowUTC = nowUTC().bucket() save { it.bucketId = nowUTC; it.count = 10 }.get() val bucket = fetch { it.bucketId = nowUTC }.get()!! assertEquals(bucket.count, 10) } @Test fun `test event loader`() { val bucket = nowUTC().bucket() val events = (0..99).map { i -> save { it.bucketId = bucket; it.shard = i / 10; it.eventTime = bucket.plusSeconds(10) it.id = "e_$i"; it.status = EventStatus.UN_PROCESSED }.get() }.associate { "${it.eventTime}-${it.id}" to it }.toMutableMap() val fetchSize = 20 (0..10).forEach { var l = module().load(bucket, it, fetchSize).get() while (l.second.isNotEmpty()) { l.second.forEach { assertEquals(events["${it.eventTime}-${it.id}"], it) events.remove("${it.eventTime}-${it.id}") } l = module().load(bucket, it, fetchSize, l.second.last().eventTime!!, l.second.last().id!!, l.first) .get() } } assertTrue { events.isEmpty() } } @Test fun `event added successfully`() { val bucket = nowUTC().bucket() save { it.bucketId = bucket; it.shard = 1; it.eventTime = bucket.plusSeconds(10) it.id = "e1"; it.status = EventStatus.UN_PROCESSED }.get() val event = fetch { it.bucketId = bucket; it.shard = 1; it.eventTime = bucket.plusSeconds(10); it.id = "e1" }.get()!! assertEquals(event.status, EventStatus.UN_PROCESSED) } } ================================================ FILE: cassandra/src/test/kotlin/com/walmartlabs/bigben/cassandra/tests/ORMTests.kt ================================================ /*- * #%L * BigBen:cassandra * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.cassandra.tests import com.walmartlabs.bigben.BigBen import com.walmartlabs.bigben.entities.EventStatus.PROCESSED import com.walmartlabs.bigben.entities.KV import com.walmartlabs.bigben.extns.* import com.walmartlabs.bigben.providers.domain.cassandra.BucketC import com.walmartlabs.bigben.providers.domain.cassandra.CassandraModule.Companion.mappingManager import com.walmartlabs.bigben.providers.domain.cassandra.EventC import com.walmartlabs.bigben.providers.domain.cassandra.EventLookupC import org.testng.annotations.Test import java.util.* import java.util.concurrent.TimeUnit.MINUTES import kotlin.test.assertEquals import kotlin.test.assertNotNull /** * Created by smalik3 on 3/2/18 */ class ORMTests { companion object { init { System.setProperty("bigben.configs", "file://bigben-test.yaml") BigBen.init() } } @Test fun `test bucket orm`() { val b = BucketC(nowUTC(), PROCESSED, 10, nowUTC(), nowUTC()) val mapper = mappingManager.mapper(BucketC::class.java) mapper.save(b) val newBucket = mapper[b.bucketId] assertEquals(b, newBucket) } @Test fun `test event orm`() { val eventTime = nowUTC() val e = EventC(eventTime, UUID.randomUUID().toString(), eventTime.bucket(), 1, PROCESSED, null, "default", processedAt = eventTime.plusSeconds(1), xrefId = "xref_1", payload = "{payload}") val mapper = mappingManager.mapper(EventC::class.java) mapper.save(e) val newEventC = mapper[eventTime.bucket(), 1, eventTime, e.id] assertEquals(e, newEventC) } @Test fun `test event look up orm`() { val el = EventLookupC("default", UUID.randomUUID().toString(), nowUTC().bucket(), 2, nowUTC(), "event_1", "payload1") val mapper = mappingManager.mapper(EventLookupC::class.java) mapper.save(el) val newEventLookupC = mapper[el.tenant, el.xrefId] assertEquals(el, newEventLookupC) } @Test fun `test kv`() { val key = UUID.randomUUID().toString() save { it.key = key; it.column = 1.toString(); it.value = "Value1" }.get(1, MINUTES) save { it.key = key; it.column = 2.toString(); it.value = "Value2" }.get(1, MINUTES) val kv = fetch { it.key = key; it.column = 1.toString() }.get(1, MINUTES) assertNotNull(kv) assertEquals(kv.value, "Value1") val kvs = kvs { it.key = key }.get(1, MINUTES) assertEquals(kvs.size, 2) kvs.associate { it.column to it.value }.apply { assertEquals(this[1.toString()], "Value1") assertEquals(this[2.toString()], "Value2") } } } ================================================ FILE: cassandra/src/test/resources/bigben-test.yaml ================================================ # top level modules modules: - name: domain class: com.walmartlabs.bigben.providers.domain.cassandra.CassandraModule - name: processors object: com.walmartlabs.bigben.processors.ProcessorRegistry - name: hz class: com.walmartlabs.bigben.utils.hz.Hz - name: scheduler object: com.walmartlabs.bigben.SchedulerModule - name: events object: com.walmartlabs.bigben.EventModule # hazelcast properties hz: template: file://hz.template.xml group: name: bigben-dev password: bigben-dev network: autoIncrementPort: true members: 127.0.0.1 port: 5701 map: store: writeDelay: 30 # cassandra related properties cassandra: keyspace: bigben cluster: contactPoints: 127.0.0.1 clusterName: bigben-cluster port: 9042 localDataCenter: null coreConnectionsPerHost: 8 maxHostsPerConnection: 32768 keepTCPConnectionAlive: true connectionTimeOut: 5000 readTimeout: 12000 reconnectPeriod: 5 username: null password: null downgradingConsistency: false writeConsistency: "LOCAL_QUORUM" readConsistency: "LOCAL_QUORUM" # system properties task: executor: #retry.thread.count: 8 retry.time.units: SECONDS delay: 1 max.retries: 3 backoff.multiplier: 2 messaging.producer.factory.class: com.walmartlabs.bigben.processors.NoOpMessageProducerFactory generic.future.max.get.time: 60 # scheduler / event related properties events: scheduler.enabled: true schedule.scan.interval.minutes: 1 num.shard.submitters: 8 receiver: shard.size: 10 lapse.offset.minutes: 0 delete: max.retries: 3 initial.delay: 1 backoff.multiplier: 1 submit: initial.delay: 1 backoff.multiplier: 1 max.retries: 3 processor: max.retries: 3 initial.delay: 1 backoff.multiplier: 2 eager.loading: true tasks: max.events.in.memory: 100000 scheduler.worker.threads: 8 # bucket manager / loader related properties buckets: backlog.check.limit: 30 background: load.fetch.size: 10 load.wait.interval.seconds: 1 checkpoint: interval: 60 interval.units: SECONDS # kafka related properties kafka: producer: config: # this is default kafka producer config, these values will be used if not supplied during the tenant registration key.serializer: org.apache.kafka.common.serialization.StringSerializer value.serializer: org.apache.kafka.common.serialization.StringSerializer acks: "1" buffer.memory: 32400 retries: 3 ================================================ FILE: cassandra/src/test/resources/log4j.xml ================================================ ================================================ FILE: cassandra/src/test/resources/testng.xml ================================================ ================================================ FILE: commons/LICENSE.txt ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2018 Sandeep Malik Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: commons/pom.xml ================================================ 4.0.0 bigben com.walmartlabs.bigben 1.0.7-SNAPSHOT bigben-commons takari-jar BigBen:commons org.slf4j slf4j-api io.github.microutils kotlin-logging com.google.guava guava com.fasterxml.jackson.core jackson-databind com.fasterxml.jackson.module jackson-module-kotlin org.apache.commons commons-text com.hazelcast hazelcast org.jetbrains.kotlin kotlin-reflect com.fasterxml.jackson.dataformat jackson-dataformat-yaml 2.9.5 org.testng testng test org.slf4j slf4j-log4j12 1.7.25 test org.jetbrains.kotlin kotlin-maven-plugin org.apache.maven.plugins maven-surefire-plugin ${project.build.directory} ================================================ FILE: commons/src/main/kotlin/com/walmartlabs/bigben/utils/_extns.kt ================================================ /*- * #%L * BigBen:commons * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.utils import com.fasterxml.jackson.core.JsonGenerator import com.fasterxml.jackson.core.JsonParser import com.fasterxml.jackson.core.Version import com.fasterxml.jackson.core.type.TypeReference import com.fasterxml.jackson.databind.* import com.fasterxml.jackson.databind.module.SimpleModule import com.fasterxml.jackson.dataformat.yaml.YAMLFactory import com.fasterxml.jackson.module.kotlin.KotlinModule import com.google.common.base.Throwables import mu.KotlinLogging import org.slf4j.LoggerFactory import java.time.ZonedDateTime /** * Created by smalik3 on 2/21/18 */ inline fun logger() = KotlinLogging.logger(unwrapCompanionClass(T::class.java).name) //LoggerFactory.getLogger(unwrapCompanionClass(T::class.java).name)!! fun logger(name: String) = LoggerFactory.getLogger(name)!! fun unwrapCompanionClass(ofClass: Class): Class<*> { return if (ofClass.enclosingClass != null && ofClass.enclosingClass.kotlin.isCompanion) { ofClass.enclosingClass } else { ofClass } } fun Throwable?.rootCause() = this?.let { Throwables.getRootCause(this) } fun Throwable?.stackTraceAsString() = this?.let { Throwables.getStackTraceAsString(this) } fun zdtModule() = SimpleModule("ZDT", Version(1, 0, 0, null, null, null)).also { it.addSerializer(ZonedDateTime::class.java, object : JsonSerializer() { override fun serialize(p0: ZonedDateTime?, p1: JsonGenerator, p2: SerializerProvider) { p0?.let { p1.writeString(it.toString()) } ?: p1.writeNull() } }) it.addDeserializer(ZonedDateTime::class.java, object : JsonDeserializer() { override fun deserialize(jp: JsonParser, dc: DeserializationContext): ZonedDateTime? { return jp.codec.readValue(jp, String::class.java)?.let { ZonedDateTime.parse(it) } } }) } typealias Json = Map val om = ObjectMapper().registerModule(KotlinModule()).registerModule(zdtModule())!! fun Any.json(): String = om.writeValueAsString(this) fun Any.yaml(): String = omYaml.writeValueAsString(this) fun Class.fromJson(s: String) = om.readValue(s, this)!! fun TypeReference.fromJson(s: String): T = om.readValue(s, this) inline fun typeRefJson(s: String) = object : TypeReference() {}.fromJson(s) val omYaml = ObjectMapper(YAMLFactory()).registerModule(KotlinModule()).registerModule(zdtModule())!! fun Class.fromYaml(s: String) = omYaml.readValue(s, this)!! fun TypeReference.fromYaml(s: String): T = omYaml.readValue(s, this) inline fun typeRefYaml(s: String) = object : TypeReference() {}.fromYaml(s) ================================================ FILE: commons/src/main/kotlin/com/walmartlabs/bigben/utils/_future_extns.kt ================================================ /*- * #%L * BigBen:commons * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.utils import com.google.common.base.Function import com.google.common.base.Throwables.getStackTraceAsString import com.google.common.util.concurrent.* import com.google.common.util.concurrent.MoreExecutors.directExecutor import com.hazelcast.core.ICompletableFuture import com.walmartlabs.bigben.utils.commons.ListenableFutureAdapter import com.walmartlabs.bigben.utils.commons.Props import com.walmartlabs.bigben.utils.commons.TaskExecutor import org.slf4j.LoggerFactory import java.util.UUID.randomUUID import java.util.concurrent.ExecutorService import java.util.concurrent.Future import java.util.concurrent.ScheduledExecutorService import java.util.concurrent.TimeUnit import java.util.concurrent.TimeUnit.SECONDS /** * Created by smalik3 on 2/21/18 */ val _l = LoggerFactory.getLogger("com.walmartlabs.bigben.utils")!! fun onError(t: Throwable?) = _l.error("error in processing: ${getStackTraceAsString(t.rootCause()!!)}", t.rootCause()) fun ListenableFuture.done(onError: (Throwable?) -> Unit = ::onError, onSuccess: (T?) -> Unit): ListenableFuture { return also { Futures.addCallback(it, object : FutureCallback { override fun onFailure(t: Throwable?) { onError(t) } override fun onSuccess(result: T?) { onSuccess(result) } }, directExecutor()) } } fun List>.done(onError: (Throwable?) -> Unit = ::onError, onSuccess: (List?) -> Unit): ListenableFuture> { return reduce().done(onError, onSuccess) } fun List>.reduce(): ListenableFuture> { return Futures.allAsList(this) } fun ListenableFuture.transform(t: (T?) -> R): ListenableFuture { return Futures.transform(this, Function { t(it) }, directExecutor()) } fun ListenableFuture.catching(t: (Throwable?) -> T): ListenableFuture { return Futures.catching(this, Exception::class.java, Function { t(it) }, directExecutor()) } fun ListenableFuture.catchingAsync(t: (Throwable?) -> ListenableFuture): ListenableFuture { return Futures.catchingAsync(this, Exception::class.java, AsyncFunction { t(it) }, directExecutor()) } fun ListenableFuture.transformAsync(t: (T?) -> ListenableFuture): ListenableFuture { return Futures.transformAsync(this, AsyncFunction { t(it) }, directExecutor()) } fun AsyncCallable.scheduleAsync(delay: Long, units: TimeUnit, scheduledExecutor: ScheduledExecutorService): ListenableFuture { return Futures.scheduleAsync(this, delay, units, scheduledExecutor) } fun AsyncCallable.submitAsync(executorService: ExecutorService): ListenableFuture { return Futures.submitAsync(this, executorService) } private val te = TaskExecutor(setOf(Exception::class.java)) fun (() -> ListenableFuture).retriable(taskId: String = randomUUID().toString(), maxRetries: Int = Props.int("task.executor.max.retries"), delay: Int = Props.int("task.executor.delay"), backoffMultiplier: Int = Props.int("task.executor.backoff.multiplier"), timeUnit: TimeUnit = SECONDS, taskExecutor: TaskExecutor = te): ListenableFuture = taskExecutor.async(taskId, maxRetries, delay, backoffMultiplier, timeUnit, this) @Suppress("UNCHECKED_CAST") fun > F.listenable(): ListenableFutureAdapter { require(this is ICompletableFuture<*>) { "future must be instance of ICompletableFuture" } return ListenableFutureAdapter(this as ICompletableFuture) } fun Future.result(waitTime: Long = Props.long("generic.future.max.get.time"), errorHandler: (Exception) -> T?): T { return try { get(waitTime, SECONDS) } catch (e: Exception) { errorHandler(e) ?: throw e } } ================================================ FILE: commons/src/main/kotlin/com/walmartlabs/bigben/utils/commons/ListenableFutureAdapter.kt ================================================ /*- * #%L * BigBen:commons * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.utils.commons import com.google.common.util.concurrent.AbstractFuture import com.hazelcast.core.ExecutionCallback import com.hazelcast.core.ICompletableFuture import java.util.concurrent.ExecutionException import java.util.concurrent.Executor import java.util.concurrent.TimeUnit import java.util.concurrent.TimeoutException /** * Created by smalik3 on 2/23/18 */ class ListenableFutureAdapter(private val delegate: ICompletableFuture) : AbstractFuture() { override fun addListener(listener: Runnable, executor: Executor) { super.addListener(listener, executor) delegate.andThen(object : ExecutionCallback { override fun onResponse(response: T) { set(response) } override fun onFailure(t: Throwable) { if (t is ExecutionException && t.cause == null) { t.initCause(RuntimeException(t.message)) setException(RuntimeException(t)) } else setException(t) } }, executor) } override fun cancel(mayInterruptIfRunning: Boolean) = super.cancel(mayInterruptIfRunning).run { delegate.cancel(mayInterruptIfRunning) } override fun isCancelled() = delegate.isCancelled override fun isDone() = delegate.isDone @Throws(InterruptedException::class, ExecutionException::class) override fun get(): T? = delegate.get() @Throws(InterruptedException::class, ExecutionException::class, TimeoutException::class) override operator fun get(timeout: Long, unit: TimeUnit): T? = delegate.get(timeout, unit) } ================================================ FILE: commons/src/main/kotlin/com/walmartlabs/bigben/utils/commons/Props.kt ================================================ /*- * #%L * BigBen:commons * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.utils.commons import com.fasterxml.jackson.core.type.TypeReference import com.google.common.cache.CacheBuilder import com.walmartlabs.bigben.utils.Json import com.walmartlabs.bigben.utils.logger import com.walmartlabs.bigben.utils.omYaml import com.walmartlabs.bigben.utils.yaml import org.apache.commons.text.StrLookup import org.apache.commons.text.StrSubstitutor import java.io.File import java.util.concurrent.atomic.AtomicReference import java.util.function.Supplier /** * Created by smalik3 on 2/21/18 */ object Props : PropsLoader() { fun parse(props: Json) = PropsLoader(props) } open class PropsLoader(preloaded: Json? = null) { companion object { private val l = logger() private val NULL: Any = Any() private val NULL_PRESENT: Any = Any() @Suppress("UNCHECKED_CAST") fun flatten(json: Json): Json { return json.entries.map { e -> if (e.value !is Map<*, *>) listOf(e.key to e.value) else { flatten(e.value as Json).entries.map { "${e.key}.${it.key}" to it.value } } }.flatten().associate { it.first to it.second } } private fun unflatten(json: Json): Json { val result = mutableMapOf() val multiple = json.entries.filter { it.key.contains(".") }.map { it.key.split(".")[0] to 1 }.groupBy { it.first } .mapValues { it.value.size }.filter { it.value > 1 } json.filter { it.key.split(".")[0] !in multiple.keys }.run { result.putAll(this) } multiple.forEach { e -> result[e.key.split(".")[0]] = unflatten(json.filterKeys { it.startsWith(e.key + ".") }.mapKeys { it.key.split(".", limit = 2)[1] }) } return result } fun merge(base: Json, override: Json, onlyUpdates: Boolean = false): Json { val fBase = flatten(base) val fOverrides = flatten(override) return merge0(fBase, fOverrides, onlyUpdates) } private fun merge0(base: Json, override: Json, onlyUpdates: Boolean = false): Json { val result = mutableMapOf().apply { putAll(base) } val added = override.keys - base.keys val updates = base.keys.intersect(override.keys) try { if (!onlyUpdates) added.forEach { if (override[it] != null) require(result.putIfAbsent(it, override[it]!!) == null) } } catch (e: Exception) { println(e) } updates.forEach { val baseVal = base[it] val overrideVal = override[it]!! when (baseVal) { is Map<*, *> -> { require(overrideVal is Map<*, *>) { "incompatible values for key $it: $baseVal, $overrideVal" } @Suppress("UNCHECKED_CAST") result[it] = merge(baseVal as Json, overrideVal as Json) } is Collection<*> -> { require(overrideVal is Collection<*>) { "incompatible values for key $it: $baseVal, $overrideVal" } result[it] = LinkedHashSet(baseVal) + LinkedHashSet(overrideVal) } else -> { result[it] = overrideVal } } } return result } private fun substitute(json: Json): Json { val s1 = StrSubstitutor(StrLookup.systemPropertiesLookup()) val s2 = StrSubstitutor(object : StrLookup() { override fun lookup(key: String): String? { return System.getenv(key) } }) val s3 = StrSubstitutor(StrLookup.mapLookup(json)) return substitute0(substitute0(substitute0(json, s1), s2), s3) as Json } private fun substitute0(obj: Any?, substitutor: StrSubstitutor): Any? { return when (obj) { null -> null is Map<*, *> -> { obj.entries.associate { substitute0(it.key, substitutor) to substitute0(it.value, substitutor) } } is Collection<*> -> { obj.map { substitute0(it, substitutor) } } is String -> { substitutor.replace(obj) } else -> obj } } } private val props = AtomicReference().apply { preloaded?.let { set(it) } } private val cache = CacheBuilder.newBuilder().build() fun load(supplier: Supplier) = load(supplier.get()) fun load(vararg props: String): PropsLoader { l.info("loading props") props.reversed().map { val x: Json = omYaml.readValue(ResourceLoader.load(it), object : TypeReference() {}); x to false }.run { this + (System.getenv() to true) + (System.getProperties() as Json to true) } .fold(emptyMap()) { r, e -> merge(r, e.first, e.second) } .run { substitute(this) }.run { unflatten(this) }.let { this.props.set(it) } cache.invalidateAll() l.info("loaded props successfully") if (l.isDebugEnabled) l.debug("resolved props:\n ${this.props.get().yaml()}") return this } fun exists(name: String): Boolean = get(name) != null fun int(name: String, defaultValue: Int = 0) = get(name)?.toString()?.toInt() ?: defaultValue fun long(name: String, defaultValue: Long = 0) = get(name)?.toString()?.toLong() ?: defaultValue fun string(name: String, defaultValue: String = "") = get(name)?.toString() ?: defaultValue fun boolean(name: String, defaultValue: Boolean = false) = get(name)?.toString()?.toBoolean() ?: defaultValue fun int(name: String) = get(name, true)!!.toString().toInt() fun long(name: String) = get(name, true)!!.toString().toLong() fun string(name: String) = get(name, true)!!.toString() fun boolean(name: String) = get(name, true)!!.toString().toBoolean() @Suppress("UNCHECKED_CAST") fun map(name: String) = get(name, true) as Json fun root() = props.get()!! @Suppress("UNCHECKED_CAST") fun list(name: String) = get(name, true) as List private fun get(name: String, required: Boolean = false): Any? { val value = cache.get(name) { resolver(name) } return when { value == NULL && required -> throw IllegalArgumentException("no property with name: $name") value == NULL -> null value == NULL_PRESENT && required -> throw IllegalArgumentException("property '$name' has a 'null' value") else -> value } } private fun resolver(name: String, p: Json = props.get()): Any { if (p.containsKey(name)) return p[name]?.let { it } ?: NULL_PRESENT else if (name.contains(".")) { val parts = name.split(".", limit = 2) return if (p.containsKey(parts[0]) && p[parts[0]] is Map<*, *>) { @Suppress("UNCHECKED_CAST") resolver(parts[1], p[parts[0]] as Json) } else NULL } return NULL } } object ResourceLoader { private val l = logger() fun load(location: String): String { return when { location.startsWith("uri://") -> { val f = File(location.substring("uri://".length)) l.info("reading data from the resource: $f") require(f.exists()) { "could not resolve $f to a location" } f.readText() } location.startsWith("file://") -> { val f = location.substring("file://".length) l.info("reading data from the resource: $f") val ins = Props::class.java.classLoader.getResourceAsStream(f) require(ins != null) { "could not resolve $f to a location" } String(ins.readBytes()) } else -> throw IllegalArgumentException("unknown resource format: $location") } } } ================================================ FILE: commons/src/main/kotlin/com/walmartlabs/bigben/utils/commons/TaskExecutor.kt ================================================ /*- * #%L * BigBen:commons * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.utils.commons import com.fasterxml.jackson.databind.util.ClassUtil.getRootCause import com.google.common.util.concurrent.Futures.immediateFailedFuture import com.google.common.util.concurrent.ListenableFuture import com.google.common.util.concurrent.MoreExecutors.listeningDecorator import com.walmartlabs.bigben.utils.catchingAsync import com.walmartlabs.bigben.utils.commons.Props.int import com.walmartlabs.bigben.utils.commons.Props.string import com.walmartlabs.bigben.utils.logger import com.walmartlabs.bigben.utils.transformAsync import java.lang.Runtime.getRuntime import java.util.UUID.randomUUID import java.util.concurrent.* import java.util.concurrent.atomic.AtomicInteger import java.util.function.Supplier /** * Created by smalik3 on 2/22/18 */ class TaskExecutor(private val logErrorStackDuringIntermediateRetries: Boolean = false, private val isRetriable: (t: Throwable) -> Boolean) { constructor(retriableExceptions: Set>, logErrorStackDuringIntermediateRetries: Boolean = false) : this(logErrorStackDuringIntermediateRetries, fun(t: Throwable?): Boolean { return isRetriable(t, retriableExceptions) }) companion object { private val l = logger() private val serial = AtomicInteger() private val RETRY_POOL = listeningDecorator(ScheduledThreadPoolExecutor(int("task.executor.retry.thread.count", getRuntime().availableProcessors()), ThreadFactory { r -> Thread(r, "task-executor-retry-worker#" + serial.getAndIncrement()) }, ThreadPoolExecutor.CallerRunsPolicy())) fun isRetriable(cause: Throwable?, retriableExceptions: Set>): Boolean { return cause?.let { retriableExceptions.find { t -> t.isAssignableFrom(cause::class.java) } != null } ?: false } } fun async(taskId: String = randomUUID().toString(), maxRetries: Int = int("task.executor.max.retries"), delay: Int = int("task.executor.delay"), backoffMultiplier: Int = int("task.executor.backoff.multiplier"), timeUnit: TimeUnit = TimeUnit.valueOf(string("task.executor.retry.time.units")), task: () -> ListenableFuture): ListenableFuture { return async(taskId, maxRetries, delay, backoffMultiplier, timeUnit, Supplier { Callable { task() } }) } fun async(taskId: String = randomUUID().toString(), maxRetries: Int = int("task.executor.max.retries"), delay: Int = int("task.executor.delay"), backoffMultiplier: Int = int("task.executor.backoff.multiplier"), timeUnit: TimeUnit = TimeUnit.valueOf(string("task.executor.retry.time.units")), supplier: Supplier>>): ListenableFuture { return async(taskId, 0, maxRetries, delay, backoffMultiplier, timeUnit, supplier) } private fun async(taskId: String, retryCount: Int, maxRetries: Int, delay: Int, backoffMultiplier: Int, timeUnit: TimeUnit, task: Supplier>>): ListenableFuture { return try { task.get().call().catchingAsync { mayBeRetry(task, taskId, retryCount, maxRetries, delay, backoffMultiplier, timeUnit, it!!) } } catch (t: Throwable) { mayBeRetry(task, taskId, retryCount, maxRetries, delay, backoffMultiplier, timeUnit, t) } } private fun mayBeRetry(task: Supplier>>, taskId: String, retryCount: Int, maxRetries: Int, delay: Int, backoffMultiplier: Int, timeUnit: TimeUnit, t: Throwable): ListenableFuture { val cause = getRootCause(t) return if (isRetriable(cause)) { if (retryCount < maxRetries) { if (l.isWarnEnabled) { if (logErrorStackDuringIntermediateRetries) l.warn("operation failed, taskId='{}', retrying after {} {}, retry={}, maxRetry={}, exception='{}'", taskId, delay, timeUnit, retryCount, maxRetries, if (cause.message == null) cause::class.java.name else cause.message, cause) else l.warn("operation failed, taskId='{}', retrying after {} {}, retry={}, maxRetry={}, exception='{}'", taskId, delay, timeUnit, retryCount, maxRetries, if (cause.message == null) cause::class.java.name else cause.message) } RETRY_POOL.schedule(Callable { async(taskId, retryCount + 1, maxRetries, backoffMultiplier * delay, backoffMultiplier, timeUnit, task) }, delay.toLong(), timeUnit).transformAsync { it -> it!! } } else { l.error("operation failed, taskId='{}', after {} retries, will not be retried anymore, exception='{}'", taskId, maxRetries, if (cause.message == null) cause::class.java.name else cause.message, cause) immediateFailedFuture(cause) } } else { l.error("operation failed, taskId='{}', unexpected exception", taskId, cause) immediateFailedFuture(cause) } } } ================================================ FILE: commons/src/main/kotlin/com/walmartlabs/bigben/utils/commons/modules.kt ================================================ /*- * #%L * BigBen:commons * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.utils.commons import com.walmartlabs.bigben.utils.Json import com.walmartlabs.bigben.utils.logger import java.util.concurrent.ConcurrentHashMap /** * Created by smalik3 on 9/17/18 */ interface Module { fun init(registry: ModuleRegistry) } open class NoOpModule : Module { override fun init(registry: ModuleRegistry) { } } class ModuleRegistry { private val l = logger() val cache = ConcurrentHashMap, Any>() @Suppress("UNCHECKED_CAST") inline fun module() = cache.computeIfAbsent(T::class.java) { cache.values.firstOrNull { T::class.java.isAssignableFrom(it::class.java) } ?: throw IllegalArgumentException("no module found with type: ${T::class.java}") } as T @Suppress("UNCHECKED_CAST") inline fun register(t: T) = cache.put(t::class.java, t as Any) fun loadModules(props: PropsLoader) { l.info("loading modules") props.list("modules").forEach { @Suppress("UNCHECKED_CAST") val p = Props.parse(it as Json) val enabled = p.boolean("enabled", true) if (!enabled) { l.info("skipping disabled module ${p.string("name")}") } else { l.info("initializing module: ${p.string("name")}") createModule(it).also { it.init(this) l.info("registering module: ${p.string("name")}") register(it) } } } } private fun createModule(m: Json): Module { return try { (if (m.containsKey("class")) { (Class.forName(m["class"].toString()).newInstance() as Module) } else Class.forName(m["object"].toString()).getDeclaredField("INSTANCE").apply { isAccessible = true }.get(null) as Module) } catch (e: Exception) { e.printStackTrace() throw ExceptionInInitializerError(e) } } } ================================================ FILE: commons/src/main/kotlin/com/walmartlabs/bigben/utils/hz/ClusterSingleton.kt ================================================ /*- * #%L * BigBen:commons * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.utils.hz import com.google.common.collect.Sets.newConcurrentHashSet import com.hazelcast.core.HazelcastInstanceNotActiveException import com.hazelcast.core.LifecycleEvent.LifecycleState.SHUTTING_DOWN import com.walmartlabs.bigben.utils.logger import com.walmartlabs.bigben.utils.rootCause import java.lang.Thread.currentThread import java.util.concurrent.ConcurrentHashMap import java.util.concurrent.Executors.newSingleThreadExecutor import java.util.concurrent.atomic.AtomicInteger import javax.print.attribute.standard.PrinterStateReason.SHUTDOWN /** * Created by smalik3 on 3/1/18 */ class ClusterSingleton(private val service: Service, private val hz: Hz) { private val listenerId = ConcurrentHashMap() private val index = AtomicInteger() companion object { val ACTIVE_SERVICES: MutableSet = newConcurrentHashSet()!! private val l = logger() private val nonRetriables = setOf(HazelcastInstanceNotActiveException::class.java) } private val executor = newSingleThreadExecutor() init { executor.submit(task()) } private fun task(): Runnable = Runnable { val lockName = "${service.name}_lock" try { currentThread().name = "${service.name}_service_thread" val clusterSingletonLock = hz.hz.getLock(lockName) clusterSingletonLock.lock() l.info( "cluster singleton elected, '${hz.hz.cluster.localMember.address}/${currentThread().name}' is the new owner for: ${service.name}" ) listenerId.computeIfAbsent("listenerId") { hz.hz.apply { l.info("Adding the shutdown hook for cluster singleton: ${service.name}") } .lifecycleService.addLifecycleListener { event -> if (event.state == SHUTDOWN || event.state == SHUTTING_DOWN) { if (l.isInfoEnabled) l.info("node is shutting down, destroying the service: {}", service.name) try { service.destroy() ACTIVE_SERVICES.remove(service.name) } catch (e: Exception) { l.error("error in destroying the service: ${service.name}", e.rootCause()) } } }.also { l.info("initing the cluster singleton service: ${service.name}") service.init() ACTIVE_SERVICES.add(service.name) } } l.info("executing the cluster singleton service: ${service.name}") service.execute() } catch (e: Exception) { if (e.rootCause()!!::class.java in nonRetriables) l.error("error in running the service: ${service.name}", e.rootCause()) else l.error("error in running the service: ${service.name}, retrying...", e.rootCause()) { try { hz.hz.getLock(lockName).unlock() } catch (e: Exception) { l.error("error in unlocking cluster singleton", e.rootCause()) } finally { l.info("resubmitting ownership claim task: attempt: ${index.incrementAndGet()}") executor.submit(task()) } } } } } ================================================ FILE: commons/src/main/kotlin/com/walmartlabs/bigben/utils/hz/Hz.kt ================================================ /*- * #%L * BigBen:commons * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.utils.hz import com.hazelcast.config.XmlConfigBuilder import com.hazelcast.core.Hazelcast.newHazelcastInstance import com.hazelcast.core.HazelcastInstance import com.walmartlabs.bigben.utils.Json import com.walmartlabs.bigben.utils.commons.Module import com.walmartlabs.bigben.utils.commons.ModuleRegistry import com.walmartlabs.bigben.utils.commons.Props.map import com.walmartlabs.bigben.utils.commons.Props.string import com.walmartlabs.bigben.utils.commons.PropsLoader import com.walmartlabs.bigben.utils.commons.ResourceLoader import com.walmartlabs.bigben.utils.json import com.walmartlabs.bigben.utils.logger import com.walmartlabs.bigben.utils.typeRefJson import org.apache.commons.text.StrLookup import org.apache.commons.text.StrSubstitutor import java.io.ByteArrayInputStream /** * Created by smalik3 on 2/23/18 */ class Hz : Module { companion object { private val l = logger() } val hz: HazelcastInstance init { val config = typeRefJson(PropsLoader.flatten(map("hz"))!!.json()).let { map -> if (l.isDebugEnabled) l.debug("using the hazelcast config from: ${Hz::class.java.getResource(string("hz.template"))}") val template = ResourceLoader.load(string("hz.template")) StrSubstitutor(object : StrLookup() { override fun lookup(key: String): String? { return if (map.containsKey(key)) map[key]!!.toString() else { var current: Any? = map for (it in key.split(".")) { if (current is Map<*, *>) current = current[it] else break } current?.toString() } } }).apply { setValueDelimiter(' ') }.replace(template) } hz = newHazelcastInstance(XmlConfigBuilder(ByteArrayInputStream(config.toByteArray())).build()) if (l.isDebugEnabled) l.debug("hazelcast config file: {}", config) } override fun init(registry: ModuleRegistry) { } } ================================================ FILE: commons/src/main/kotlin/com/walmartlabs/bigben/utils/hz/Service.kt ================================================ /*- * #%L * BigBen:commons * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.utils.hz /** * Created by smalik3 on 2/23/18 */ interface Service { val name: String fun init() fun execute() fun destroy() } ================================================ FILE: commons/src/test/kotlin/PropsTests.kt ================================================ /*- * #%L * BigBen:commons * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ import com.walmartlabs.bigben.utils.commons.Props import com.walmartlabs.bigben.utils.commons.Props.boolean import com.walmartlabs.bigben.utils.commons.Props.exists import com.walmartlabs.bigben.utils.commons.Props.int import com.walmartlabs.bigben.utils.commons.Props.list import com.walmartlabs.bigben.utils.commons.Props.long import com.walmartlabs.bigben.utils.commons.Props.map import com.walmartlabs.bigben.utils.commons.Props.string import com.walmartlabs.bigben.utils.commons.PropsLoader import com.walmartlabs.bigben.utils.commons.ResourceLoader import com.walmartlabs.bigben.utils.typeRefYaml import org.testng.annotations.Test import java.util.function.Supplier import kotlin.test.assertEquals import kotlin.test.assertTrue /** * Created by smalik3 on 7/6/18 */ class PropsTests { @Test(priority = 1) fun `props test - file`() { Props.load("file://props.yaml") asserts() } @Test(priority = 2) fun `prop test - supplier`() { Props.load(Supplier { "file://props.yaml" }) asserts() } @Test fun `test overrides`() { val props = PropsLoader().load("file://overrides.yaml", "file://props.yaml") assertTrue(props.exists("a")) assertTrue(props.exists("a.b")) assertTrue(props.exists("a.c.d")) assertEquals(props.string("a.c.d"), "y1") // override assertEquals(props.string("a.b"), "x") assertEquals(props.int("a.e"), 12) assertEquals(props.boolean("a.f"), true) assertEquals(props.list("a.g"), listOf(1, 2, 3)) // override => list append assertEquals(props.long("a.i", 10), 10) val actual = props.map("a") val expected = mapOf( "b" to "x", "c.d" to "y1", "e" to 12, "f" to true, "g" to listOf(1, 2, 3), "h" to mapOf("h1" to "abc", "h2" to "H2", "h3" to System.getProperty("user.home")), "j" to 1 ) println(expected) println(actual) assertEquals(expected, actual) val actualFlattened = PropsLoader.flatten(props.map("a")) val expectedFlattened = mapOf( "b" to "x", "c.d" to "y1", "e" to 12, "f" to true, "g" to listOf(1, 2, 3), "j" to 1, "h.h1" to "abc", "h.h2" to "H2", "h.h3" to System.getProperty("user.home") ) println(actualFlattened) println(expectedFlattened) assertEquals(expectedFlattened, actualFlattened) } /*@Test fun `test flatten and unflatten`() { //val merged = Props.load("file://sub1-overrides.yaml", "file://sub1.yaml").root() val expected = mapOf( "a" to "b", "c" to listOf( "4", "5", mapOf("i1" to "I1"), mapOf("d1" to "D1"), mapOf("G" to "H1"), mapOf("g" to "h"), mapOf( "d" to mapOf( "d11" to System.getProperty("java.home1", "acc"), "d22" to "D22", "e" to "E22", "l" to listOf( mapOf("a" to System.getProperty("java.io.tmpdir1", "Aaa")), mapOf("a1" to "b1"), mapOf("c" to "d"), mapOf("e" to mapOf("f" to "F1")) ) ) ), mapOf("i" to mapOf("j" to "k11", "l" to "m", "j1" to "J1")) ) ) val flattened = PropsLoader.flatten(expected) val unflattened = PropsLoader.unflatten(flattened) assertEquals(expected, unflattened) } @Test fun `test list substitutions`() { val comparator = Comparator { o1, o2 -> o1.toString().compareTo(o2.toString()) } val merged = Props.load("file://sub1-overrides.yaml", "file://sub1.yaml").root() val expected = mapOf( "a" to "b", "c" to sortedSetOf( comparator, "4", "5", mapOf("i1" to "I1"), mapOf("d1" to "D1"), mapOf("G" to "H1"), mapOf("g" to "h"), mapOf( "d" to mapOf( "d11" to System.getProperty("java.home1", "acc"), "d22" to "D22", "e" to "E22", "l" to sortedSetOf( comparator, mapOf("a" to System.getProperty("java.io.tmpdir1", "Aaa")), mapOf("a1" to "b1"), mapOf("c" to "d"), mapOf("e" to mapOf("f" to "F1")) ) ) ), mapOf("i" to mapOf("j" to "k11", "l" to "m", "j1" to "J1")) ) ) val flattened = PropsLoader.flatten(merged) as Json val unflattened = PropsLoader.unflatten(flattened) println("merged: $merged") println("flatte: $flattened") println("unflat: $unflattened") println("expect: $expected") TODO("complete the asserts") }*/ @Test fun `test substitutions in list`() { val s = ResourceLoader.load("file://a.yaml") val yaml = typeRefYaml>(s) val merged = Props.load("file://b.yaml", "file://a.yaml").root() println(merged) //val unflatten = PropsLoader.unflatten(merged.root()) //println(unflatten.yaml()) } private fun asserts() { assertTrue(exists("a")) assertTrue(exists("a.b")) assertTrue(exists("a.c.d")) assertEquals(string("a.c.d"), "y") assertEquals(string("a.b"), "x") assertEquals(int("a.e"), 12) assertEquals(boolean("a.f"), true) assertEquals(list("a.g"), listOf(1, 2)) assertEquals(long("a.i", 10), 10) assertEquals( PropsLoader.flatten(map("a")), mapOf( "b" to "x", "c.d" to "y", "e" to 12, "f" to true, "g" to listOf(1, 2), "h.h1" to "H1", "h.h2" to "H2" ) ) } } ================================================ FILE: commons/src/test/resources/a.yaml ================================================ # top level modules modules: - name: domain class: com.walmartlabs.bigben.providers.domain.cassandra.CassandraModule - name: processors object: com.walmartlabs.bigben.processors.ProcessorRegistry - name: hz class: com.walmartlabs.bigben.utils.hz.Hz - name: scheduler object: com.walmartlabs.bigben.SchedulerModule - name: events object: com.walmartlabs.bigben.EventModule - name: messaging object: com.walmartlabs.bigben.kafka.KafkaModule enabled: false - name: cron object: com.walmartlabs.bigben.cron.CronRunner enabled: false # hazelcast properties hz: template: file://hz.template.xml group: name: bigben-dev password: bigben-dev network: autoIncrementPort: true members: 127.0.0.1 port: 5701 map: store: writeDelay: 30 # message related properties messaging.producer.factory.class: com.walmartlabs.bigben.kafka.KafkaMessageProducerFactory # cassandra related properties cassandra: keyspace: bigben cluster: contactPoints: 127.0.0.1 clusterName: bigben-cluster port: 9042 localDataCenter: null coreConnectionsPerHost: 8 maxHostsPerConnection: 32768 keepTCPConnectionAlive: true connectionTimeOut: 5000 readTimeout: 12000 reconnectPeriod: 5 username: null password: null downgradingConsistency: false writeConsistency: "LOCAL_QUORUM" readConsistency: "LOCAL_QUORUM" # kafka consumer properties kafka: consumers: - num.consumers: 8 processor.impl.class: com.walmartlabs.bigben.kafka.ProcessorImpl topics: ${inbound.topics.1} max.poll.wait.time: 10000 message.retry.max.count: 10 config: key.deserializer: org.apache.kafka.common.serialization.StringDeserializer value.deserializer: org.apache.kafka.common.serialization.StringDeserializer bootstrap.servers: ${inbound.bootstrap.servers.1} #fetch.min.bytes: 1 group.id: bigben-inbound #heartbeat.interval.ms: 3000 session.timeout.ms: 30000 auto.offset.reset: latest fetch.max.bytes: 324000 max.poll.interval.ms: 30000 max.poll.records: 100 receive.buffer.bytes: 65536 request.timeout.ms: 60000 #send.buffer.bytes: 131072 enable.auto.commit: false producer: config: # this is default kafka producer config, these values will be used if not supplied during the tenant registration key.serializer: org.apache.kafka.common.serialization.StringSerializer value.serializer: org.apache.kafka.common.serialization.StringSerializer acks: "1" buffer.memory: 32400 retries: 3 # system properties task: executor: #retry.thread.count: 8 retry.time.units: SECONDS delay: 1 max.retries: 3 backoff.multiplier: 2 app.server.port: 8080 generic.future.max.get.time: 60 events: scheduler.enabled: true schedule.scan.interval.minutes: 1 num.shard.submitters: 8 receiver: shard.size: 1000 lapse.offset.minutes: 0 delete: max.retries: 3 initial.delay: 1 backoff.multiplier: 1 submit: initial.delay: 1 backoff.multiplier: 1 max.retries: 3 processor: max.retries: 3 initial.delay: 1 backoff.multiplier: 2 eager.loading: true tasks: max.events.in.memory: 100000 scheduler.worker.threads: 8 # bucket manager / loader related properties buckets: backlog.check.limit: 300 background: load.fetch.size: 100 load.wait.interval.seconds: 15 cron: runner: core.pool.size: 8 load: max.retries: 10 delay: 1 backoff.multiplier: 1 time.units: "SECONDS" ================================================ FILE: commons/src/test/resources/b.yaml ================================================ inbound.topics.1: my_topic inbound.bootstrap.servers.1: my_servers ================================================ FILE: commons/src/test/resources/log4j.xml ================================================ ================================================ FILE: commons/src/test/resources/overrides.yaml ================================================ --- a: c.d: y1 g: - 1 - 3 h: h1: ${user.homE:-abc} h3: ${user.home:-abc} j: 1 ================================================ FILE: commons/src/test/resources/props.yaml ================================================ --- a: b: x c.d: y e: 12 f: true g: - 1 - 2 h: h1: H1 h2: H2 ================================================ FILE: commons/src/test/resources/sub1-overrides.yaml ================================================ a: b c: - d: d11: ${java.home1:-acc} d22: D22 e: E22 l: - a: ${java.io.tmpdir1:-Aaa} - c: d - e: f: F1 d1: D1 G: H1 - i: j: k11 j1: J1 - i1: I1 - 5 ================================================ FILE: commons/src/test/resources/sub1.yaml ================================================ a: b c: - d: e: E1 l: - a: b - a1: b1 - g: h - G: H - i: j: k l: m - 4 ================================================ FILE: cron/pom.xml ================================================ 4.0.0 bigben com.walmartlabs.bigben 1.0.7-SNAPSHOT bigben-cron takari-jar Bigben:cron com.walmartlabs.bigben bigben-lib com.cronutils cron-utils 7.0.2 com.datastax.cassandra cassandra-driver-extras 3.3.0 org.jetbrains.kotlin kotlin-maven-plugin ================================================ FILE: cron/src/main/kotlin/com/walmartlabs/bigben/cron/cron-hz.kt ================================================ /*- * #%L * Bigben:cron * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.cron import com.hazelcast.core.MapStore import com.hazelcast.nio.ObjectDataInput import com.hazelcast.nio.ObjectDataOutput import com.hazelcast.nio.serialization.DataSerializable import com.walmartlabs.bigben.entities.KV import com.walmartlabs.bigben.extns.kvs import com.walmartlabs.bigben.extns.save import com.walmartlabs.bigben.utils.* import com.walmartlabs.bigben.utils.commons.Props.int /** * Created by smalik3 on 7/3/18 */ data class Crons @JvmOverloads constructor(var crons: MutableMap = HashMap()) : DataSerializable { override fun writeData(out: ObjectDataOutput) = out.run { writeInt(crons.size); crons.forEach { writeUTF(it.value.json()) } } override fun readData(ins: ObjectDataInput) = ins.run { (1..readInt()).forEach { Cron::class.java.fromJson(readUTF()).apply { crons[cronId()] = this } } } } class CronMapStore : MapStore { private val l = logger() override fun storeAll(map: Map) { { map.entries.map { e -> save { it.key = e.key.toString(); it.column = ""; it.value = e.value.yaml() } }.reduce() } .retriable("cron-map-store:store-all").result { l.error("error in storing / updating crons for keys: ${map.keys}", it.rootCause()!!); throw it.rootCause()!! } } override fun store(key: Int, value: Crons) = storeAll(mapOf(key to value)) override fun loadAllKeys(): Iterable = (1..int("cron.partitions.count", 271)).toList() override fun loadAll(keys: Collection): Map { if (l.isInfoEnabled) l.info("bulk-loading cron keys: $keys, thread: ${Thread.currentThread().name}") return { keys.map { k -> kvs { it.key = k.toString(); it.column = "" }.transform { k to it!! }.catching { println(it!!.stackTrace); 0 to emptyList() } }.reduce() } .retriable("cron-map-store:load-all") .result { l.error("error in loading crons for keys: $keys", it.rootCause()); throw it.rootCause()!! } .associate { if (it.second.isEmpty()) it.first to Crons() else it.second[0].key!!.toInt() to typeRefYaml(it.second[0].value!!) }.apply { if (l.isInfoEnabled) this.filter { it.value.crons.isNotEmpty() }.apply { if (this.isNotEmpty()) l.info("crons loaded: $this}") } } } override fun deleteAll(keys: Collection) = throw UnsupportedOperationException("this must never have happened, keys: $keys") override fun load(key: Int) = loadAll(listOf(key))[key] override fun delete(key: Int) = throw UnsupportedOperationException("this must never have happened, key: $key") } ================================================ FILE: cron/src/main/kotlin/com/walmartlabs/bigben/cron/cron-processors.kt ================================================ /*- * #%L * Bigben:cron * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.cron import com.cronutils.model.CronType import com.cronutils.model.definition.CronDefinitionBuilder import com.cronutils.model.time.ExecutionTime import com.cronutils.parser.CronParser import com.hazelcast.map.AbstractEntryProcessor import com.hazelcast.nio.ObjectDataInput import com.hazelcast.nio.ObjectDataOutput import com.hazelcast.nio.serialization.DataSerializable import com.walmartlabs.bigben.extns.utc import com.walmartlabs.bigben.utils.fromJson import com.walmartlabs.bigben.utils.json import com.walmartlabs.bigben.utils.typeRefJson import java.io.Serializable import java.time.ZoneId import java.time.ZonedDateTime /** * Created by smalik3 on 7/6/18 */ abstract class DataSerializableEntryProcessor(protected var value: String? = null, applyOnBackup: Boolean) : AbstractEntryProcessor(applyOnBackup), DataSerializable { override fun writeData(out: ObjectDataOutput) = out.run { writeUTF(value) } override fun readData(`in`: ObjectDataInput) = `in`.run { value = readUTF() } } class CronDeleteEntryProcessor(cronId: String? = null) : DataSerializableEntryProcessor(cronId, true), Serializable { override fun process(entry: MutableMap.MutableEntry): Any? { return entry.setValue(entry.value.apply { this!!.crons.remove(value) }).let { null } } } class CronEntryProcessor(c: String? = null) : DataSerializableEntryProcessor(c, true) { override fun process(entry: MutableMap.MutableEntry): Any? { val cron = Cron::class.java.fromJson(value!!) return entry.setValue(entry.value.apply { CronRunner.crons.values.forEach { this!!.crons[cron.cronId()] = cron } }).let { null } } } class CronMatchExecutionTimeProcessor(millis: Long? = null) : DataSerializableEntryProcessor(millis?.toString(), true) { override fun process(entry: MutableMap.MutableEntry): List { val zdt = utc(value!!.toLong()) return ArrayList(entry.value.crons.filter { it.value.executionTime().isMatch(zdt) }.values.map { it.json() }) } } class CronUpdateExecutionTimeEntryProcessor(cronId: String? = null, lastExecution: String? = null) : DataSerializableEntryProcessor((cronId to lastExecution).json(), true) { override fun process(entry: MutableMap.MutableEntry): Any? { val (cronId, lastExecution) = typeRefJson>(value!!) return entry.setValue(entry.value.apply { this!!.crons[cronId]?.let { it.lastExecutionTime = ZonedDateTime.parse(lastExecution) } }).let { null } } } fun main(args: Array) { val c = CronParser(CronDefinitionBuilder.instanceDefinitionFor(CronType.UNIX)).parse("* * * * *") val et = ExecutionTime.forCron(c) var zdt = ZonedDateTime.now(ZoneId.of("UTC")) var z = zdt println(zdt) (1..10).forEach { val match = et.isMatch(z) z = z.plusSeconds(1) zdt = et.nextExecution(zdt).get() println("match = $match, zdt = $zdt, z = $z") } } ================================================ FILE: cron/src/main/kotlin/com/walmartlabs/bigben/cron/cron.kt ================================================ /*- * #%L * Bigben:cron * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.cron import com.cronutils.descriptor.CronDescriptor import com.cronutils.model.CronType import com.cronutils.model.definition.CronDefinitionBuilder import com.cronutils.model.time.ExecutionTime import com.cronutils.parser.CronParser import com.fasterxml.jackson.annotation.JsonInclude import com.fasterxml.jackson.annotation.JsonInclude.Include.NON_EMPTY import com.google.common.util.concurrent.ListenableFuture import com.walmartlabs.bigben.BigBen.module import com.walmartlabs.bigben.cron.CronRunner.crons import com.walmartlabs.bigben.entities.Event import com.walmartlabs.bigben.entities.EventResponse import com.walmartlabs.bigben.entities.EventStatus import com.walmartlabs.bigben.entities.KV import com.walmartlabs.bigben.extns.* import com.walmartlabs.bigben.processors.ProcessorRegistry import com.walmartlabs.bigben.utils.* import com.walmartlabs.bigben.utils.commons.Module import com.walmartlabs.bigben.utils.commons.ModuleRegistry import com.walmartlabs.bigben.utils.commons.Props.int import com.walmartlabs.bigben.utils.hz.Hz import java.time.ZonedDateTime import java.time.temporal.ChronoUnit import java.time.temporal.ChronoUnit.* import java.util.* import java.util.concurrent.ConcurrentHashMap import java.util.concurrent.Executors.newScheduledThreadPool import java.util.concurrent.TimeUnit.SECONDS import java.util.concurrent.atomic.AtomicInteger import java.util.concurrent.atomic.AtomicReference /** * Created by smalik3 on 6/29/18 */ @JsonInclude(NON_EMPTY) data class Cron( val id: String, val expression: String, val type: CronType, val tenant: String, var lastExecutionTime: ZonedDateTime?, val lastUpdated: ZonedDateTime?, val tracingEnabled: Boolean = false, val tracingGranularity: ChronoUnit = DAYS ) { private val computed = ConcurrentHashMap() private fun parsed() = computed.computeIfAbsent(0) { CronRunner.parser(type).parse(expression)!! } as com.cronutils.model.Cron internal fun executionTime() = computed.computeIfAbsent(1) { println("computing"); ExecutionTime.forCron(parsed()) } as ExecutionTime init { require(tracingGranularity in supportedGranularities) { "only $supportedGranularities granularities are supported" } } fun cronId() = cronId(tenant, id, type) override fun toString() = "${cronId()}:$expression" companion object { fun cronId(tenant: String, id: String, type: CronType) = "$tenant/$id/$type" private val supportedGranularities = setOf(ChronoUnit.SECONDS, MINUTES, HOURS, DAYS, WEEKS, MONTHS, YEARS) } fun toGranularity(zdt: ZonedDateTime): String { return when (tracingGranularity) { YEARS -> zdt.year.toString() MONTHS -> "${zdt.year}/${zdt.monthValue}" WEEKS -> "${zdt.year}/${zdt.monthValue}/${WEEKS.between(zdt.withDayOfMonth(1), zdt)}" DAYS -> "${zdt.year}/${zdt.dayOfYear}" HOURS -> "${zdt.year}/${zdt.dayOfYear}/${zdt.hour}" MINUTES -> "${zdt.year}/${zdt.dayOfYear}/${zdt.hour}/${zdt.minute}" SECONDS -> "${zdt.year}/${zdt.dayOfYear}/${zdt.hour}/${zdt.minute}/${zdt.second}" else -> throw IllegalArgumentException("unsupported unit: $tracingGranularity") } } fun describe(locale: Locale = Locale.US) = CronDescriptor.instance(locale).run { describe(parsed()) }!! } object CronRunner : Module { private val l = logger() internal val crons = module().hz.getMap("crons") override fun init(registry: ModuleRegistry) { l.info("initializing the cron module: starting the cron runner(s)") val lastRun = AtomicReference() workers.scheduleAtFixedRate({ try { val now = nowUTC().withNano(0) if (lastRun.get() == null || now > lastRun.get()) { lastRun.set(now) val nowString = now.toString() @Suppress("UNCHECKED_CAST") val matches = (crons.executeOnKeys( crons.localKeySet(), CronMatchExecutionTimeProcessor( now.toInstant().toEpochMilli() ) ) as MutableMap>).values.flatten() .map { Cron::class.java.fromJson(it) } if (matches.isNotEmpty()) { matches.map { c -> val e = EventResponse( c.id, nowString, c.tenant, eventId = "${c.type}/$nowString", triggeredAt = nowString, eventStatus = EventStatus.TRIGGERED, payload = c.expression ).event() if (l.isDebugEnabled) l.debug("triggering event for cron: ${c.cronId()} at $nowString") module()(e).transformAsync { updateCronExecutionTime(c, now, it!!) } }.reduce() .done({ l.error("cron-failed: time: $nowString, crons: ${matches.map { it.cronId() }}") }) { if (l.isDebugEnabled) l.debug("cron-successful: time: $nowString, crons: ${matches.map { it.cronId() }}") } } } } catch (e: Exception) { l.error("error in running cron", e.rootCause()!!) } }, 0, 1, SECONDS) } private val parsers = ConcurrentHashMap() internal fun parser(type: CronType) = parsers.computeIfAbsent(type) { CronParser(CronDefinitionBuilder.instanceDefinitionFor(type)) } private val index = AtomicInteger() private val workers = newScheduledThreadPool(int("cron.runner.core.pool.size")) { Thread(it, "cron-runner#${index.incrementAndGet()}") } private fun updateCronExecutionTime( cron: Cron, executionTime: ZonedDateTime, event: Event ): ListenableFuture { val f = crons.submitToKey(cron.partition(), CronUpdateExecutionTimeEntryProcessor(cron.cronId(), executionTime.toString())) .listenable().transform { cron } return if (cron.tracingEnabled) { f.transformAsync { save { it.key = "${cron.cronId()}:${cron.toGranularity(executionTime)}" it.column = executionTime.toString(); it.value = event.toResponse().yaml() }.transform { cron } } } else f } } private fun Cron.partition() = module().hz.partitionService.getPartition(cronId()).partitionId private fun String.partition() = module().hz.partitionService.getPartition(this).partitionId object CronService { private val l = logger() fun upsert(cron: Cron) = response { if (l.isInfoEnabled) l.info("creating/updating cron: $cron") val cronId = cron.cronId() val pId = cron.partition() if (l.isDebugEnabled) l.debug("cron: $cronId hashed to partition: $pId") crons.executeOnKey(pId, CronEntryProcessor(cron.copy(lastUpdated = nowUTC(), lastExecutionTime = null).json())) if (l.isDebugEnabled) l.debug("cron: $cronId updated successfully") mapOf("status" to "OK") } fun delete(tenant: String, id: String, type: String) = response { val types = if (type == "*") CronType.values().toSet() else setOf(CronType.valueOf(type)) if (l.isInfoEnabled) l.info("deleting cron: $tenant/$id, types: $types") types.forEach { val cronId = Cron.cronId(tenant, id, it) val pId = cronId.partition() if (l.isDebugEnabled) l.debug("cron: $cronId hashed to partition: $pId") crons.executeOnKey(pId, CronDeleteEntryProcessor(cronId)) if (l.isDebugEnabled) l.debug("cron: $cronId deleted successfully") } mapOf("status" to "OK") } @JsonInclude(NON_EMPTY) data class CronDescription(val cron: Cron, val description: String?) fun get(tenant: String, id: String, describe: Boolean?) = response { crons.values.flatMap { it.crons.values.filter { it.tenant == tenant && it.id == id } } .map { CronDescription(it, describe?.run { it.describe() }) } } fun describe(cron: Cron) = response { CronDescription(cron, cron.describe()) } } ================================================ FILE: kafka/LICENSE.txt ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2018 Sandeep Malik Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: kafka/pom.xml ================================================ 4.0.0 bigben com.walmartlabs.bigben 1.0.7-SNAPSHOT bigben-kafka takari-jar Bigben:kafka com.walmartlabs.bigben bigben-lib org.apache.kafka kafka-clients 1.0.0 org.jetbrains.kotlin kotlin-maven-plugin ================================================ FILE: kafka/src/main/kotlin/com/walmartlabs/bigben/kafka/kafka-mocks.kt ================================================ /*- * #%L * bigben-kafka * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.kafka import com.google.common.util.concurrent.Futures import com.google.common.util.concurrent.Futures.immediateFailedFuture import com.google.common.util.concurrent.ListenableFuture import com.walmartlabs.bigben.entities.EventResponse import com.walmartlabs.bigben.processors.MessageProducerFactory import com.walmartlabs.bigben.utils.Json import com.walmartlabs.bigben.utils.commons.PropsLoader import org.apache.kafka.clients.consumer.Consumer import org.apache.kafka.clients.consumer.ConsumerRecord import org.apache.kafka.clients.consumer.MockConsumer import org.apache.kafka.clients.consumer.OffsetResetStrategy.EARLIEST import org.apache.kafka.clients.producer.MockProducer import org.apache.kafka.common.serialization.StringSerializer import java.util.concurrent.atomic.AtomicReference /** * Created by smalik3 on 6/28/18 */ class MockMessageProducerFactory : MessageProducerFactory { companion object { val LAST_MESSAGE = AtomicReference() } override fun create(tenant: String, props: Json) = object : KafkaMessageProducer(tenant, props) { override fun createProducer(props: Json) = MockProducer(true, StringSerializer(), StringSerializer()) override fun produce(e: EventResponse): ListenableFuture<*> { return if (props.containsKey("fail")) { immediateFailedFuture(Exception()) as ListenableFuture<*> } else super.produce(e).apply { LAST_MESSAGE.set(e) } } } } class MockKafkaProcessor(props: PropsLoader) : KafkaMessageProcessor(props) { lateinit var consumer: MockConsumer override fun createConsumer(): Consumer = MockConsumer(EARLIEST).apply { consumer = this } override fun process(cr: ConsumerRecord) = Futures.immediateFuture("" as Any)!! } ================================================ FILE: kafka/src/main/kotlin/com/walmartlabs/bigben/kafka/kafka-module.kt ================================================ /*- * #%L * bigben-kafka * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.kafka import com.google.common.util.concurrent.ListenableFuture import com.walmartlabs.bigben.utils.Json import com.walmartlabs.bigben.utils.commons.Module import com.walmartlabs.bigben.utils.commons.ModuleRegistry import com.walmartlabs.bigben.utils.commons.Props import com.walmartlabs.bigben.utils.commons.PropsLoader import com.walmartlabs.bigben.utils.done import com.walmartlabs.bigben.utils.logger import com.walmartlabs.bigben.utils.reduce import com.walmartlabs.bigben.utils.retriable import com.walmartlabs.bigben.utils.rootCause import com.walmartlabs.bigben.utils.transform import org.apache.kafka.clients.consumer.CommitFailedException import org.apache.kafka.clients.consumer.Consumer import org.apache.kafka.clients.consumer.ConsumerRebalanceListener import org.apache.kafka.clients.consumer.ConsumerRecord import org.apache.kafka.clients.consumer.KafkaConsumer import org.apache.kafka.clients.consumer.OffsetAndMetadata import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.errors.WakeupException import java.lang.Thread.currentThread import java.util.concurrent.Executors.newFixedThreadPool import java.util.concurrent.atomic.AtomicBoolean import java.util.concurrent.atomic.AtomicInteger import java.util.concurrent.atomic.AtomicReference import java.util.concurrent.locks.Lock import java.util.concurrent.locks.ReentrantLock import kotlin.concurrent.withLock object KafkaModule : Module { private val l = logger() override fun init(registry: ModuleRegistry) { l.info("initializing kafka processor(s)") Props.list("kafka.consumers").forEach { @Suppress("UNCHECKED_CAST") val p = Props.parse(it as Json) require(p.exists("config.group.id")) { "group.id is required" } val index = AtomicInteger(0) val numConsumers = p.int("num.consumers") l.info("creating $numConsumers kafka consumers") newFixedThreadPool(numConsumers) { Thread(it, "kafkaProcessor[${p.string("config.group.id")}]#${index.getAndIncrement()}") }.apply { (1..numConsumers).forEach { l.debug("creating kafka consumer: $it") submit(try { "processor.impl.class".run { require(p.exists(this)) { "$this is required" } Class.forName(p.string(this)).let { require(KafkaMessageProcessor::class.java.isAssignableFrom(it)) { "processor class must extend ${KafkaMessageProcessor::class.java.simpleName}" } it.getConstructor(PropsLoader::class.java).newInstance(p) as Runnable } } } catch (e: Exception) { l.error("unexpected error in starting kafka processor", e.rootCause()) throw IllegalArgumentException(e) }) } } } } } abstract class KafkaMessageProcessor(private val props: PropsLoader) : Runnable { private val topics = props.string("topics").split(",") private val closed = AtomicBoolean() private val autoCommit = props.boolean("config.enable.auto.commit") private var numUnknownExceptionRetries = props.int("unknown.exception.retries", 5) companion object { private val l = logger() } abstract fun process(cr: ConsumerRecord): ListenableFuture open fun createConsumer(): Consumer = KafkaConsumer(props.map("config")) override fun run() { try { process() } catch (e: Exception) { l.error("error in running kafka processor", e.rootCause()) } } private fun process() { val consumer = createConsumer() if (l.isInfoEnabled) { l.info("starting the kafka consumer ${currentThread().name} for topic(s): $topics") if (!autoCommit) l.info("offsets will be committed manually") } val owned = AtomicReference?>() consumer.subscribe(topics, object : ConsumerRebalanceListener { override fun onPartitionsAssigned(partitions: MutableCollection) { if (l.isDebugEnabled) l.debug("partitions assigned: ${partitions.groupBy { it.topic() }.mapValues { it.value.map { it.partition() }.toSortedSet() }.toSortedMap()}") owned.set(partitions.toSet()) } override fun onPartitionsRevoked(partitions: MutableCollection) { if (l.isDebugEnabled) l.debug("partitions revoked: ${partitions.groupBy { it.topic() }.mapValues { it.value.map { it.partition() }.toSortedSet() }.toSortedMap()}") owned.set(null) } }) val tasks = mutableListOf<() -> Unit>() val inPoll = AtomicBoolean(false) val taskLock: Lock = ReentrantLock() while (!closed.get()) { try { taskLock.withLock { if (l.isDebugEnabled) l.debug("processing pending tasks") tasks.forEach { it() } tasks.clear() if (l.isDebugEnabled) l.debug("pending tasks processed successfully") } inPoll.set(true) if (l.isDebugEnabled) l.debug("starting the poll for topic(s): $topics") val records = consumer.poll(props.long("max.poll.wait.time")) inPoll.set(false) if (l.isDebugEnabled) l.debug("fetched ${records.count()} messages from topic(s): $topics") if (records.count() > 0) { val (offsets, range) = records.groupBy { TopicPartition(it.topic(), it.partition()) }.run { mapValues { OffsetAndMetadata(it.value.maxBy { it.offset() }!!.offset() + 1) } to mapValues { "[${it.value.minBy { it.offset() }!!.offset()}-${it.value.maxBy { it.offset() }!!.offset()}]" } .mapKeys { "${it.key.topic()}[${it.key.partition()}]" }.toSortedMap() } val partitions = records.partitions().apply { if (l.isDebugEnabled) l.debug("pausing the partitions ${groupBy { it.topic() }.mapValues { it.value.map { it.partition() }.toSortedSet() }.toSortedMap()}") consumer.pause(this) }; { if (l.isDebugEnabled) l.debug("resuming the partitions ${partitions.groupBy { it.topic() }.mapValues { it.value.map { it.partition() }.toSortedSet() }.toSortedMap()}") consumer.resume(partitions intersect (owned.get() ?: emptySet())) val ownedSnapshot = owned.get() if (!autoCommit && ownedSnapshot != null) { val filtered = offsets.filterKeys { it in ownedSnapshot } if (l.isDebugEnabled) l.debug("committing offsets $filtered") try { consumer.commitSync(filtered) } catch (e: CommitFailedException) { l.warn("bulk commit failed for offsets: $filtered, trying to each owned partition commit one by one") offsets.forEach { // no snapshot here if (owned.get() != null && it.key in owned.get()!!) { try { consumer.commitSync(mapOf(it.key to it.value)) } catch (e: Exception) { l.warn("error in committing offset for ${it.key}, ignoring") } } else l.info("partition ${it.key} is no more owned by this consumer, ignoring the offset commit") } } } }.apply { if (l.isDebugEnabled) l.debug("submitting records for processing: $range") records.map { { process(it) }.retriable( "${it.topic()}/${it.partition()}/${it.offset()}/${it.key()}", maxRetries = props.int("message.retry.max.count") ) }.reduce().transform { this }.done({ l.error("error in processing messages: $range", it.rootCause()) taskLock.withLock { tasks += this } if (l.isDebugEnabled) l.debug("adding tasks for partition resume and offset commits") if (inPoll.get()) { if (l.isDebugEnabled) l.debug("waking up consumer stuck in poll") consumer.wakeup() } }) { if (l.isDebugEnabled) { l.debug("messages processed successfully: $range") l.debug("adding tasks for partition resume and offset commits") } taskLock.withLock { tasks += this } if (inPoll.get()) { if (l.isDebugEnabled) l.debug("waking up consumer stuck in poll") consumer.wakeup() } } } } } catch (e: Exception) { val rc = e.rootCause() if (rc is WakeupException) { if (closed.get()) { l.info("consumer has been closed for topic(s): $topics") } } else { if (numUnknownExceptionRetries-- > 0) l.warn("unknown exception, ignoring", rc) else l.error( "unknown exception, giving up after $numUnknownExceptionRetries retries, closing the consumer", rc ) closed.set(true) } } } } } ================================================ FILE: kafka/src/main/kotlin/com/walmartlabs/bigben/kafka/kafka-processor.kt ================================================ /*- * #%L * bigben-kafka * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.kafka import com.google.common.util.concurrent.Futures.immediateFailedFuture import com.google.common.util.concurrent.Futures.immediateFuture import com.google.common.util.concurrent.ListenableFuture import com.walmartlabs.bigben.BigBen import com.walmartlabs.bigben.api.EventReceiver import com.walmartlabs.bigben.entities.EventRequest import com.walmartlabs.bigben.entities.EventStatus.* import com.walmartlabs.bigben.entities.Mode.UPSERT import com.walmartlabs.bigben.extns.event import com.walmartlabs.bigben.processors.ProcessorRegistry import com.walmartlabs.bigben.utils.commons.PropsLoader import com.walmartlabs.bigben.utils.fromJson import com.walmartlabs.bigben.utils.logger import com.walmartlabs.bigben.utils.rootCause import com.walmartlabs.bigben.utils.transformAsync import org.apache.kafka.clients.consumer.ConsumerRecord class ProcessorImpl(props: PropsLoader) : KafkaMessageProcessor(props) { companion object { private val l = logger() } private val badMessageMarker = immediateFuture(null) private val eventReceiver = BigBen.module() private val processorRegistry = BigBen.module() override fun process(cr: ConsumerRecord): ListenableFuture { return ((try { EventRequest::class.java.fromJson(cr.value()) } catch (e: Exception) { l.warn("bad message format, dropping: ${cr.value()}, error: ${e.rootCause()?.message}"); null })?.run { if (l.isDebugEnabled) l.debug("received audit event: $this") try { if (mode == UPSERT) eventReceiver.addEvent(this).transformAsync { if (it!!.eventStatus == TRIGGERED) processorRegistry.invoke(it.event()) else if (it.eventStatus == ERROR || it.eventStatus == REJECTED) { l.warn("event request is rejected or had error, event response: $it") } immediateFuture(it) } else eventReceiver.removeEvent(id!!, tenant!!) } catch (e: Exception) { val rc = e.rootCause()!! l.error("failed to process message: $cr", rc) immediateFailedFuture(rc) } } ?: badMessageMarker).run { @Suppress("UNCHECKED_CAST") this as ListenableFuture } } } ================================================ FILE: kafka/src/main/kotlin/com/walmartlabs/bigben/kafka/kafka-producer.kt ================================================ /*- * #%L * bigben-kafka * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.kafka import com.google.common.util.concurrent.ListenableFuture import com.google.common.util.concurrent.SettableFuture import com.walmartlabs.bigben.entities.EventDeliveryOption.FULL_EVENT import com.walmartlabs.bigben.entities.EventResponse import com.walmartlabs.bigben.processors.MessageProducer import com.walmartlabs.bigben.processors.MessageProducerFactory import com.walmartlabs.bigben.utils.Json import com.walmartlabs.bigben.utils.commons.Module import com.walmartlabs.bigben.utils.commons.ModuleRegistry import com.walmartlabs.bigben.utils.json import com.walmartlabs.bigben.utils.logger import com.walmartlabs.bigben.utils.rootCause import org.apache.kafka.clients.producer.KafkaProducer import org.apache.kafka.clients.producer.Producer import org.apache.kafka.clients.producer.ProducerRecord /** * Created by smalik3 on 6/25/18 */ class KafkaMessageProducerFactory : MessageProducerFactory, Module { override fun init(registry: ModuleRegistry) { } override fun create(tenant: String, props: Json) = KafkaMessageProducer(tenant, props) } open class KafkaMessageProducer(private val tenant: String, props: Json) : MessageProducer { companion object { val l = logger() } private val kafkaProducer = this.createProducer(props) private val topic = require(props.containsKey("topic")) { "no topic in props" }.run { props["topic"]!!.toString() } protected open fun createProducer(props: Json): Producer = KafkaProducer(props).apply { if (l.isInfoEnabled) l.info("kafka producer for tenant $tenant created successfully") } override fun produce(e: EventResponse): ListenableFuture<*> { if (l.isDebugEnabled) l.debug("producer:begin: tenant: $tenant, topic: $topic, event: ${e.id}") return SettableFuture.create().apply { val content = if (e.deliveryOption == null || e.deliveryOption == FULL_EVENT) e.json() else e.payload kafkaProducer.send(ProducerRecord(topic, e.id, content)) { r, exception -> if (exception != null) { l.error("producer:error: tenant: $tenant, topic: $topic, event: ${e.id}, failure", exception.rootCause()) setException(exception.rootCause()!!) } else { if (l.isDebugEnabled) l.debug("successfully published, event: ${e.tenant}/${e.id}, topic: ${r.topic()}, partition: ${r.partition()}, offset: ${r.offset()}") set(e) } } } } } ================================================ FILE: lib/LICENSE.txt ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2018 Sandeep Malik Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: lib/pom.xml ================================================ 4.0.0 com.walmartlabs.bigben bigben 1.0.7-SNAPSHOT bigben-lib takari-jar BigBen:lib com.walmartlabs.bigben bigben-commons com.ning async-http-client org.jetbrains.kotlin kotlin-reflect org.jetbrains.kotlin kotlin-maven-plugin ================================================ FILE: lib/src/main/kotlin/com/walmartlabs/bigben/BigBen.kt ================================================ /*- * #%L * BigBen:lib * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben import com.walmartlabs.bigben.entities.EntityProvider import com.walmartlabs.bigben.utils.commons.ModuleRegistry import com.walmartlabs.bigben.utils.commons.Props import com.walmartlabs.bigben.utils.commons.Props.load import com.walmartlabs.bigben.utils.logger import com.walmartlabs.bigben.utils.rootCause /** * Created by smalik3 on 6/24/18 */ object BigBen { private val l = logger() val registry = ModuleRegistry() inline fun module() = registry.module() inline fun entityProvider() = registry.module>() fun init() { Initializer } private object Initializer { init { System.getProperty("bigben.configs")?.run { val configs = this.split(",") l.info("using configs: $configs") load(*configs.toTypedArray()) } ?: { l.warn("no 'bigben.configs' system property set, using the default: file://bigben.yaml") load("file://bigben.yaml") }() l.info("initiating module registration") try { BigBen.registry.loadModules(Props) } catch (e: Throwable) { l.error("error in loading modules, system will exit now", e.rootCause()) //exitProcess(1) throw ExceptionInInitializerError(e.rootCause()) } l.info("module registration is complete") l.info("BigBen initialized successfully") } } } ================================================ FILE: lib/src/main/kotlin/com/walmartlabs/bigben/api/EventReceiver.kt ================================================ /*- * #%L * BigBen:lib * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.api import com.google.common.util.concurrent.Futures.immediateFuture import com.google.common.util.concurrent.ListenableFuture import com.hazelcast.map.EntryBackupProcessor import com.hazelcast.map.EntryProcessor import com.hazelcast.nio.ObjectDataInput import com.hazelcast.nio.ObjectDataOutput import com.walmartlabs.bigben.BigBen.entityProvider import com.walmartlabs.bigben.BigBen.module import com.walmartlabs.bigben.core.ScheduleScanner.Companion.BUCKET_CACHE import com.walmartlabs.bigben.entities.* import com.walmartlabs.bigben.entities.EventDeliveryOption.PAYLOAD_ONLY import com.walmartlabs.bigben.entities.EventStatus.* import com.walmartlabs.bigben.extns.* import com.walmartlabs.bigben.hz.HzObjectFactory.ObjectId.EVENT_RECEIVER_ADD_EVENT import com.walmartlabs.bigben.processors.ProcessorRegistry import com.walmartlabs.bigben.utils.* import com.walmartlabs.bigben.utils.commons.Props import com.walmartlabs.bigben.utils.hz.Hz import java.time.ZonedDateTime import kotlin.collections.MutableMap.MutableEntry /** * Created by smalik3 on 2/26/18 */ class EventReceiver(val hz: Hz) { companion object { private val l = logger() internal val CACHED_PROCESSOR = CountIncrementer() } private val scanInterval = Props.int("events.schedule.scan.interval.minutes") private val lapseOffset = Props.int("events.receiver.lapse.offset.minutes") init { if (l.isInfoEnabled) l.info("using event lapseOffset: {} minutes", lapseOffset) } fun addEvent(eventRequest: EventRequest): ListenableFuture { return validate(eventRequest)?.let { it } ?: { val eventTime = ZonedDateTime.parse(eventRequest.eventTime) val bucketId = utc(bucketize(eventTime.toInstant().toEpochMilli(), scanInterval)) fetch { it.xrefId = eventRequest.id; it.tenant = eventRequest.tenant } .transformAsync { if (it != null) { if (it.eventTime == eventTime) { if (l.isDebugEnabled) l.debug("{}, event update received, no change in event time", eventRequest.id) save { e -> e.bucketId = it.bucketId; e.shard = it.shard; e.eventTime = it.eventTime; e.id = it.eventId; e.payload = eventRequest.payload }.transform { if (l.isDebugEnabled) l.debug("{}, event updated successfully", eventRequest.id) eventRequest.toResponse().apply { eventId = it!!.id; eventStatus = UPDATED } } } else { if (l.isDebugEnabled) l.debug("event update received, event time changed, add new event -> update existing look up -> delete old event") val oldLookup = it addEvent0(eventRequest, bucketId, eventTime).transformAsync { addLookup0(eventRequest, bucketId, it!!.shard!!, it.id!!, eventTime).transformAsync { removeEvent0(oldLookup) }.transform { eventRequest.toResponse().apply { eventId = it!!.eventId; eventStatus = UPDATED } } } } } else { addEvent0(eventRequest, bucketId, eventTime).transformAsync { addLookup0(eventRequest, it!!.bucketId!!, it.shard!!, it.id!!, it.eventTime!!).transform { if (l.isDebugEnabled) l.debug("{}, add-event: successful", it!!.xrefId) eventRequest.toResponse().apply { eventId = it!!.eventId; eventStatus = ACCEPTED } } } } }.catching { l.error("failed to add event: {}", eventRequest.id, it.rootCause()) eventRequest.toResponse().apply { eventStatus = ERROR } } }() } private fun addLookup0(eventRequest: EventRequest, bucketId: ZonedDateTime, shard: Int, eventId: String, eventTime: ZonedDateTime): ListenableFuture { return save { it.tenant = eventRequest.tenant it.xrefId = eventRequest.id it.bucketId = bucketId it.shard = shard it.eventTime = eventTime it.eventId = eventId if (l.isDebugEnabled) l.debug("{}, add-event: event-lookup-table: insert", eventRequest.id) } } private fun addEvent0(eventRequest: EventRequest, bucketId: ZonedDateTime, eventTime: ZonedDateTime): ListenableFuture { return hz.hz.getMap(BUCKET_CACHE).let { it.submitToKey(bucketId, CACHED_PROCESSOR).listenable().transformAsync { val count = it as Long save { if (l.isDebugEnabled) l.debug("{}, add-event: event-table: insert", eventRequest.id) it.id = eventId(eventRequest) it.eventTime = eventTime it.shard = ((count - 1) / Props.int("events.receiver.shard.size")).toInt() it.status = UN_PROCESSED it.tenant = eventRequest.tenant it.xrefId = eventRequest.id it.bucketId = eventTime.bucket() it.payload = eventRequest.payload } } } } private fun removeEvent0(eventLookup: EventLookup): ListenableFuture { return { remove { it.eventTime = eventLookup.eventTime; it.id = eventLookup.eventId; it.shard = eventLookup.shard; it.bucketId = eventLookup.bucketId } }.retriable( "delete-event-${eventLookup.xrefId}", Props.int("events.receiver.delete.max.retries"), Props.int("events.receiver.delete.initial.delay"), Props.int("events.receiver.delete.backoff.multiplier") ).transform { eventLookup } } fun removeEvent(id: String, tenant: String): ListenableFuture { val eventResponse = EventResponse().apply { this.id = id; this.tenant = tenant } return fetch { it.xrefId = id; it.tenant = tenant }.transformAsync { el -> if (el == null) immediateFuture(eventResponse) else { if (l.isDebugEnabled) l.debug("removing event: {}/{}", tenant, id) remove { it.eventTime = el.eventTime; it.shard = el.shard; it.id = el.eventId; it.bucketId = el.bucketId }.transformAsync { if (l.isDebugEnabled) l.debug("removing event look up: {}/{}", tenant, id) remove { it.tenant = el.tenant; it.xrefId = el.xrefId }.transform { if (l.isDebugEnabled) l.debug("event removed successfully : {}/{}", tenant, id) eventResponse.apply { eventStatus = DELETED; eventId = it?.eventId } } } } }.catching { l.error("error in removing the event: {}/{}", tenant, id, it.rootCause()) eventResponse.error = Error(500, it.rootCause()?.message) eventResponse.apply { eventStatus = ERROR } } } private fun validate(eventRequest: EventRequest): ListenableFuture? { if (eventRequest.tenant == null) { val eventResponse = eventRequest.toResponse() eventResponse.eventStatus = REJECTED eventResponse.error = Error(400, "tenant not present") l.error("event rejected, tenant missing, {}", eventRequest.json()) return immediateFuture(eventResponse) } if (eventRequest.eventTime == null) { val eventResponse = eventRequest.toResponse() eventResponse.eventStatus = REJECTED eventResponse.error = Error(400, "event time not present") l.error("event rejected, event time not present, {} ", eventRequest.json()) return immediateFuture(eventResponse) } if (eventRequest.tenant!! !in module().registeredTenants()) { val eventResponse = eventRequest.toResponse() eventResponse.eventStatus = REJECTED eventResponse.error = Error(400, "tenant not registered / unknown tenant: ${eventRequest.tenant}") l.error("event rejected, unknown tenant. Did you register one in the processors.config?, {}", eventRequest.json()) return immediateFuture(eventResponse) } if (eventRequest.deliveryOption == PAYLOAD_ONLY && eventRequest.payload == null) { val eventResponse = eventRequest.toResponse() eventResponse.eventStatus = REJECTED eventResponse.error = Error(400, "payload must not be null for deliveryOption $PAYLOAD_ONLY") l.error("event rejected, null payload for '$PAYLOAD_ONLY' option: $eventRequest") return immediateFuture(eventResponse) } try { ZonedDateTime.parse(eventRequest.eventTime) } catch (e: Exception) { val eventResponse = eventRequest.toResponse() eventResponse.eventStatus = REJECTED eventResponse.error = Error(400, "event time can not be parsed. Must be in ISO 8601 format.") l.error("event rejected, bad event time format, {}", eventRequest.json()) return immediateFuture(eventResponse) } if (ZonedDateTime.parse(eventRequest.eventTime).isBefore(nowUTC().plusMinutes(lapseOffset.toLong()))) { val eventResponse = eventRequest.toResponse() eventResponse.eventStatus = TRIGGERED eventResponse.triggeredAt = nowUTC().toString() if (l.isDebugEnabled) l.debug("lapsed event received, marking it {}, eventRequest: {}", TRIGGERED, eventRequest.json()) return immediateFuture(eventResponse) } return null } internal class CountIncrementer : Idso(EVENT_RECEIVER_ADD_EVENT), EntryProcessor, EntryBackupProcessor { companion object { private val l = logger() } override fun getBackupProcessor() = this override fun processBackup(entry: MutableEntry?) { process(entry!!) } override fun process(entry: MutableEntry): Long? { val b = if (entry.value == null) entityProvider().let { it.raw(it.selector(Bucket::class.java)) } else entry.value!! b.count = (b.count ?: 0) + 1L b.updatedAt = nowUTC() if (b.status == null) b.status = UN_PROCESSED entry.setValue(b) if (l.isDebugEnabled) l.debug("bucket-id: {}, old-count: {}, new-count: {} ", entry.key, b.count!! - 1, b.count) return b.count } override fun writeData(out: ObjectDataOutput) { } override fun readData(`in`: ObjectDataInput) { } } } ================================================ FILE: lib/src/main/kotlin/com/walmartlabs/bigben/api/EventService.kt ================================================ /*- * #%L * BigBen:lib * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.api import com.google.common.base.Throwables.getStackTraceAsString import com.walmartlabs.bigben.BigBen.module import com.walmartlabs.bigben.entities.* import com.walmartlabs.bigben.entities.EventStatus.* import com.walmartlabs.bigben.extns.* import com.walmartlabs.bigben.processors.ProcessorConfig import com.walmartlabs.bigben.processors.ProcessorRegistry import com.walmartlabs.bigben.tasks.StatusTask import com.walmartlabs.bigben.utils.* import com.walmartlabs.bigben.utils.hz.Hz import com.walmartlabs.bigben.utils.hz.Service import java.io.Serializable import java.util.concurrent.Callable /** * Created by smalik3 on 2/27/18 */ class EventService( private val hz: Hz, private val service: Service, private val receiver: EventReceiver ) { companion object { private val l = logger() } fun clusterStats() = response { hz.hz.getExecutorService("default").submitToAllMembers(StatusTask(service.name)) .mapKeys { it.key.address.toString() } .mapValues { it.value.result { "Error: ${getStackTraceAsString(it)}" } } } fun schedule(events: List) = response { events.map { if (it.mode == Mode.UPSERT) receiver.addEvent(it) else receiver.removeEvent(it.id!!, it.tenant!!) } .reduce().result { emptyList() }.run { filter { it.eventStatus == TRIGGERED }.map { module()(it.event()) } .done({ l.error("error in triggering lapsed events:", it.rootCause()) }) { it!!.forEach { l.warn( "event was triggered immediately (likely lapsed), event bucketId: {}, tenant: {}, " + "eventTime: {}, currentTime: {}", it.xrefId, it.tenant, it.eventTime, nowUTC() ) } } count { it.eventStatus == REJECTED }.let { when { it == events.size -> APIResponse(this, 400) it > 0 -> APIResponse(this, 206) else -> APIResponse(this) } } } } fun registerProcessor(config: ProcessorConfig) = response { if (l.isInfoEnabled) l.info("saving the tenant config: $config") require(config.tenant != null) { "tenant must not be null" } save { it.key = "tenants"; it.column = config.tenant; it.value = config.json() } if (l.isInfoEnabled) l.info("broadcasting the tenant config to all members: $config") hz.hz.getExecutorService("default").submitToAllMembers(ProcessRegisterTask(config)) .mapValues { it.value.listenable() }.values.toList().reduce().result { throw RuntimeException("") } module().registeredConfigs() } fun registeredTenants() = response { module().registeredConfigs() } fun find(id: String, tenant: String) = response { find(EventRequest().apply { this.id = id; this.tenant = tenant }, false) } fun dryrun(id: String, tenant: String) = response { find(EventRequest().apply { this.id = id; this.tenant = tenant }, true) } private fun find(eventRequest: EventRequest, fire: Boolean): EventResponse? { val eventResponse = eventRequest.toResponse() return if (eventRequest.id != null && eventRequest.id!!.trim().isNotEmpty()) { fetch { it.xrefId = eventRequest.id; it.tenant = eventRequest.tenant }.result { null } ?.let { el -> fetch { it.id = el.eventId; it.eventTime = el.eventTime; it.shard = el.shard; it.bucketId = el.bucketId }.result { null }?.run { eventResponse.also { it.eventId = id; it.eventTime = eventTime?.toString(); it.payload = payload it.eventStatus = status; if (status != UN_PROCESSED && status != null) it.triggeredAt = processedAt?.toString(); it.deliveryOption = deliveryOption(this) if (error != null) it.error = com.walmartlabs.bigben.entities.Error(500, error) }.also { if (fire) module()(this) } } } } else { throw IllegalArgumentException("null id") } } class ProcessRegisterTask(private val config: ProcessorConfig) : Serializable, Callable { override fun call() = module().register(config) } } ================================================ FILE: lib/src/main/kotlin/com/walmartlabs/bigben/core/BucketManager.kt ================================================ /*- * #%L * BigBen:lib * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.core import com.google.common.collect.HashMultimap import com.google.common.collect.Multimap import com.google.common.util.concurrent.Futures.immediateFuture import com.google.common.util.concurrent.ListenableFuture import com.google.common.util.concurrent.ListenableScheduledFuture import com.google.common.util.concurrent.MoreExecutors.listeningDecorator import com.walmartlabs.bigben.BigBen.entityProvider import com.walmartlabs.bigben.core.ScheduleScanner.Companion.BUCKET_CACHE import com.walmartlabs.bigben.entities.Bucket import com.walmartlabs.bigben.entities.EventStatus import com.walmartlabs.bigben.entities.EventStatus.* import com.walmartlabs.bigben.extns.toSet import com.walmartlabs.bigben.utils.* import com.walmartlabs.bigben.utils.commons.Props.int import com.walmartlabs.bigben.utils.hz.Hz import java.time.ZonedDateTime import java.util.* import java.util.concurrent.ConcurrentHashMap import java.util.concurrent.ScheduledThreadPoolExecutor import java.util.concurrent.ThreadFactory import java.util.concurrent.TimeUnit.SECONDS import java.util.concurrent.atomic.AtomicInteger /** * Created by smalik3 on 2/21/18 */ class BucketManager(private val maxBuckets: Int, private val maxProcessingTime: Int, private val bucketWidth: Int, hz: Hz) { companion object { private val l = logger() private val index = AtomicInteger() val scheduler = listeningDecorator(ScheduledThreadPoolExecutor(4, ThreadFactory { Thread(it, "BucketManager-${index.getAndIncrement()}") }))!! internal fun emptyBucket(bucketId: ZonedDateTime) = entityProvider().let { it.raw(it.selector(Bucket::class.java)).apply { this.bucketId = bucketId; count = 0L; status = EMPTY } } } private val shardSize = int("events.receiver.shard.size") private val statusSyncer = StatusSyncer() private val buckets = ConcurrentHashMap() private val cache = hz.hz.getMap(BUCKET_CACHE) @Volatile private var bucketsLoader: BucketsLoader? = null private val eventStatuses = setOf(null, EMPTY, UN_PROCESSED, PROCESSED) fun getProcessableShardsForOrBefore(bucketId: ZonedDateTime): ListenableFuture> { if (bucketsLoader == null) { if (l.isInfoEnabled) l.info("starting the background load of previous buckets") val fetchSize = int("buckets.background.load.fetch.size") bucketsLoader = BucketsLoader(maxBuckets - 1, fetchSize, bucketWidth, bucketId) { buckets[it.bucketId!!] = when (it.status) { in eventStatuses -> BucketSnapshot.with(it.bucketId!!, it.count!!, shardSize, it.status ?: UN_PROCESSED) ERROR -> { require(it.failedShards != null && it.failedShards!!.isNotEmpty()) { "${it.bucketId} is marked $ERROR but has no failed shards information" } if (l.isInfoEnabled) l.info("bucket ${it.bucketId} has shard failures: ${it.failedShards}, scheduling them for reprocessing") BucketSnapshot(it.bucketId!!, it.count!!, BitSet(), it.failedShards!!.fold(BitSet()) { b, i -> b.apply { set(i) } }) } else -> throw IllegalArgumentException("invalid bucket status: $it") } }.apply { run() } } return HashMultimap.create().let { shards -> cache.getAsync(bucketId).listenable().transform { val bucket = it ?: emptyBucket(bucketId) if (buckets.putIfAbsent(bucketId, BucketSnapshot.with(bucketId, bucket.count!!, shardSize, bucket.status!!)) != null) { l.warn("bucket with bucketId {} already existed in the cache, this is highly unusual", bucketId) } buckets.entries.filter { e -> e.value.awaiting.cardinality() > 0 }.forEach { e -> e.value.awaiting.stream().forEach { s -> shards.put(e.key, s) } } if (l.isInfoEnabled) l.info("processable shards at bucket: {}, are => {}", bucketId, shards) if (!shards.containsKey(bucketId)) { if (l.isInfoEnabled) l.info("no events in the bucket: {}", bucketId) }; shards }.catching { e -> shards.also { l.warn("error in loading bucket: {}, will be retried again during next scan", bucketId, e.rootCause()) } } } } internal fun registerForProcessing(pairs: Collection>) { pairs.forEach { p -> buckets[p.first]!!.processing(p.second) } purgeIfNeeded() startShardsTimer(pairs) } private fun startShardsTimer(pairs: Collection>): ListenableScheduledFuture<*> { return pairs.sortedWith(Comparator { p1, p2 -> p1.first.compareTo(p2.first).let { if (it != 0) it else p1.second.compareTo(p2.second) } }).map { "${it.first}[${it.second}]" }.toList().let { if (l.isDebugEnabled) l.debug("starting processing timer for shards: {}", it) scheduler.schedule({ checkShardsStatus(pairs, it) }, maxProcessingTime.toLong(), SECONDS) } } @Synchronized private fun checkShardsStatus(pairs: Collection>, shards: List) { try { pairs.forEach { val bd = buckets[it.first] if (bd != null && bd.processing.get(it.second)) { l.warn("bulk timer for shard: {}[{}] expired, marking the shard as failure", it.first, it.second) bd.done(it.second, ERROR) } } } catch (e: Exception) { l.error("error in timing out the shards for processing, shards: {}", shards, e) } } @Synchronized internal fun shardDone(bucketId: ZonedDateTime, shard: Int?, status: EventStatus) { val bd = buckets[bucketId] if (bd == null) { l.warn("bucket {} not found in cache, might have been purged, ignoring this call", bucketId) return } bd.done(shard!!, status) } private val noOp = immediateFuture(null) @Synchronized internal fun bucketProcessed(bucketId: ZonedDateTime, status: EventStatus): ListenableFuture { val bd = buckets[bucketId] if (bd == null) { l.warn("bucket {} not found in cache, this is extremely unusual", bucketId) return noOp } bd.processing.clear() if (status == PROCESSED) { if (l.isInfoEnabled) l.info("bucket {} done, marking it as {}, all shards done", bucketId, status) bd.awaiting.clear() } else if (status == ERROR) l.warn("bucket {} done, marking it as {}, failed shards are: {}", bucketId, status, bd.awaiting) return statusSyncer.syncBucket(bucketId, status, true, bd.awaiting.toSet() + bd.processing.toSet()) } fun purgeIfNeeded() { when { buckets.size <= maxBuckets -> if (l.isDebugEnabled) l.debug("nothing to purge") else -> { if (l.isDebugEnabled) l.debug("initiating purge check for buckets: {}", this.buckets) val task = { buckets.keys.sorted().take(buckets.size - maxBuckets).map { b -> buckets[b]!!.let { if (it.processing.cardinality() > 0) { if (l.isDebugEnabled) l.debug("skipping purge of bucket {}, shards are still being processed", b) immediateFuture(it) } else { if (l.isDebugEnabled) l.debug("purging bucket snapshot: {}", it) val bs = buckets.remove(it.id) when { it.count == 0L -> immediateFuture(it) it.awaiting.cardinality() == 0 -> { if (l.isDebugEnabled) l.debug("bucket {} is processed", b) statusSyncer.syncBucket(b, PROCESSED, false, bs!!.awaiting.toSet() + bs.processing.toSet()).transform { bs } } else -> { l.warn("bucket {} is marked error as final status", b) statusSyncer.syncBucket(b, ERROR, false, bs!!.awaiting.toSet() + bs.processing.toSet()).transform { bs } } } } } }.reduce() } task.retriable().done({ l.error("error in purging snapshots", it.rootCause()) }) { if (l.isInfoEnabled) l.info("purged buckets: {}", it?.map { it!!.id }) } } } } } ================================================ FILE: lib/src/main/kotlin/com/walmartlabs/bigben/core/BucketSnapshot.kt ================================================ /*- * #%L * BigBen:lib * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.core import com.walmartlabs.bigben.entities.EventStatus import com.walmartlabs.bigben.entities.EventStatus.ERROR import com.walmartlabs.bigben.entities.EventStatus.PROCESSED import com.walmartlabs.bigben.utils.logger import java.time.ZonedDateTime import java.util.* internal data class BucketSnapshot(val id: ZonedDateTime, val count: Long, val processing: BitSet, val awaiting: BitSet) { companion object { private val l = logger() private val EMPTY = BitSet() fun with(id: ZonedDateTime, count: Long, shardSize: Int, status: EventStatus): BucketSnapshot { val shards = (if (count % shardSize == 0L) count / shardSize else count / shardSize + 1).toInt() val awaiting = if (count == 0L || PROCESSED == status) EMPTY else { BitSet(shards).apply { set(0, shards) } } when { count == 0L -> if (l.isDebugEnabled) l.debug("bucket: {} => empty, no events", id) awaiting === EMPTY -> if (l.isDebugEnabled) l.debug("bucket: {} => already done", id) else -> { if (l.isDebugEnabled) l.debug("bucket: {} => has {} events, resulting in {} shards", id, count, shards) } } return BucketSnapshot(id, count, BitSet(), awaiting) } } fun processing(shard: Int) = apply { awaiting.clear(shard); processing.set(shard) } fun done(shard: Int, status: EventStatus) { processing.clear(shard) when (status) { PROCESSED -> { if (l.isInfoEnabled) l.info("shard: {}[{}] finished successfully", id, shard) awaiting.clear(shard) } ERROR -> { if (l.isInfoEnabled) l.info("shard: {}[{}] finished with error", id, shard) awaiting.set(shard) } else -> throw IllegalArgumentException("invalid status value: $status") } } } ================================================ FILE: lib/src/main/kotlin/com/walmartlabs/bigben/core/BucketsLoader.kt ================================================ /*- * #%L * BigBen:lib * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.core import com.google.common.util.concurrent.ListenableScheduledFuture import com.walmartlabs.bigben.core.BucketManager.Companion.scheduler import com.walmartlabs.bigben.entities.Bucket import com.walmartlabs.bigben.extns.fetch import com.walmartlabs.bigben.utils.commons.Props import com.walmartlabs.bigben.utils.commons.TaskExecutor import com.walmartlabs.bigben.utils.done import com.walmartlabs.bigben.utils.logger import com.walmartlabs.bigben.utils.rootCause import java.time.ZonedDateTime import java.util.concurrent.TimeUnit.SECONDS import java.util.concurrent.atomic.AtomicBoolean import java.util.concurrent.atomic.AtomicReference /** * Created by smalik3 on 2/22/18 */ class BucketsLoader(private val lookbackRange: Int, private val fetchSize: Int, private val bucketWidth: Int, private val bucketId: ZonedDateTime, private val consumer: (Bucket) -> Unit) : Runnable { companion object { private val l = logger() } private val waitInterval = Props.int("buckets.background.load.wait.interval.seconds") private val runningJob = AtomicReference>() private val taskExecutor = TaskExecutor(setOf(Exception::class.java)) override fun run() { l.info("starting the background load of buckets at a rate of {} buckets per {} seconds until {} buckets are loaded", fetchSize, waitInterval, lookbackRange) runningJob.set(scheduler.schedule({ load(0) }, 0, SECONDS)) } private fun load(fromIndex: Int) { if (fromIndex >= lookbackRange) { if (l.isInfoEnabled) l.info("lookback range reached, bucket loading is finished") } else { if (l.isInfoEnabled) l.info("initiating background load of buckets from index: {}", fromIndex) val currentBucketIndex = AtomicReference() val atLeastOne = AtomicBoolean() (1..fetchSize).forEach { val bucketIndex = fromIndex + it if (bucketIndex <= lookbackRange) { currentBucketIndex.set(bucketIndex) val bId = bucketId.minusSeconds((bucketIndex * bucketWidth).toLong()) if (l.isDebugEnabled) l.debug("loading bucket: {}, failures will be retried {} times, every {} seconds", bId, lookbackRange - bucketIndex + 1, bucketWidth) taskExecutor.async("bucket-load:$bId", lookbackRange - bucketIndex + 1, bucketWidth, 1) { fetch { it.bucketId = bId } } .done({ l.error("error in loading bucket {}, system is giving up", bId, it.rootCause()) }) { if (l.isDebugEnabled) l.debug("bucket {} loaded successfully", bId) consumer(it.apply { atLeastOne.set(true) } ?: BucketManager.emptyBucket(bId)) } } } runningJob.set(scheduler.schedule({ load(currentBucketIndex.get()) }, (if (!atLeastOne.get()) 0 else waitInterval).toLong(), SECONDS)) } } } ================================================ FILE: lib/src/main/kotlin/com/walmartlabs/bigben/core/ScheduleScanner.kt ================================================ /*- * #%L * BigBen:lib * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.core import com.google.common.collect.Iterators import com.google.common.collect.LinkedHashMultimap import com.google.common.collect.Multimap import com.google.common.util.concurrent.ListenableFuture import com.google.common.util.concurrent.MoreExecutors.listeningDecorator import com.hazelcast.core.IExecutorService import com.hazelcast.core.Member import com.walmartlabs.bigben.entities.EventStatus.ERROR import com.walmartlabs.bigben.entities.EventStatus.PROCESSED import com.walmartlabs.bigben.entities.ShardStatus import com.walmartlabs.bigben.entities.ShardStatusList import com.walmartlabs.bigben.extns.bucketize import com.walmartlabs.bigben.extns.nextScan import com.walmartlabs.bigben.extns.nowUTC import com.walmartlabs.bigben.tasks.BulkShardTask import com.walmartlabs.bigben.utils.* import com.walmartlabs.bigben.utils.commons.Props.int import com.walmartlabs.bigben.utils.hz.Hz import com.walmartlabs.bigben.utils.hz.Service import java.lang.Runtime.getRuntime import java.time.Instant import java.time.ZoneOffset.UTC import java.time.ZonedDateTime import java.time.temporal.ChronoUnit import java.util.concurrent.Executors.newFixedThreadPool import java.util.concurrent.ScheduledThreadPoolExecutor import java.util.concurrent.TimeUnit.MILLISECONDS import java.util.concurrent.TimeUnit.MINUTES import java.util.concurrent.atomic.AtomicInteger import java.util.concurrent.atomic.AtomicReference import kotlin.system.exitProcess /** * Created by smalik3 on 2/23/18 */ class ScheduleScanner(private val hz: Hz) : Service { companion object { internal const val BUCKET_CACHE = "bucketCache" const val EVENT_SCHEDULER = "event_scheduler" private val l = logger() private val index = AtomicInteger() private val shardIndexer = AtomicInteger() private val scheduler = listeningDecorator(ScheduledThreadPoolExecutor(getRuntime().availableProcessors()) { r -> Thread(r, "InternalScheduler#" + index.getAndIncrement()) }) private val shardSubmitters = listeningDecorator(newFixedThreadPool(int("events.num.shard.submitters")) { r -> Thread(r, "ShardSubmitter#" + shardIndexer.getAndIncrement()) }) } private val isShutdown = AtomicReference(false) private lateinit var bucketManager: BucketManager private val bucketWidth = int("events.schedule.scan.interval.minutes") @Volatile private lateinit var lastScan: ZonedDateTime override val name: String = "ScheduleScanner" override fun init() { if (l.isInfoEnabled) l.info("initing the event scheduler") val lookbackRange = int("buckets.backlog.check.limit") bucketManager = BucketManager(lookbackRange + 1, 2 * bucketWidth * 60, bucketWidth * 60, hz) } override fun execute() { if (l.isInfoEnabled) l.info("executing the EventScheduleScanner") val scanInterval = int("events.schedule.scan.interval.minutes") if (l.isInfoEnabled) l.info("calculating the next scan bucketId") val now = nowUTC() val nextScan = nextScan(now, scanInterval) val delay = ChronoUnit.MILLIS.between(now, nextScan) val bucket = ZonedDateTime.ofInstant(Instant.ofEpochMilli(bucketize(now.toInstant().toEpochMilli(), scanInterval)), UTC) lastScan = bucket.minusMinutes(bucketWidth.toLong()) if (l.isInfoEnabled) l.info("first-scan at: {}, for bucket: {}, next-scan at: {}, " + "initial-delay: {} ms, subsequent-scans: after every {} minutes", now, bucket, nextScan, delay, scanInterval) scheduler.scheduleAtFixedRate({ this.scan() }, delay, MILLISECONDS.convert(scanInterval.toLong(), MINUTES), MILLISECONDS) if (l.isInfoEnabled) l.info("executing first time scan") scan() } private fun scan() { try { if (isShutdown.get()) { if (l.isInfoEnabled) l.info("system is shutdown, no more schedules will be processed") return } val currentBucketId = lastScan.plusMinutes(bucketWidth.toLong()) lastScan = currentBucketId scan(currentBucketId, bucketManager) } catch (e: Throwable) { l.error("error in running the scheduler", e.rootCause()) if (e is Error) { l.error("system will exit now") exitProcess(1) } } } fun scan(currentBucketId: ZonedDateTime, bucketManager: BucketManager) { if (l.isInfoEnabled) l.info("scanning the schedule(s) for bucket: {}", currentBucketId) bucketManager.getProcessableShardsForOrBefore(currentBucketId).done({ l.error("error in processing bucket: {}", currentBucketId, it!!.rootCause()) }) { try { if (it!!.isEmpty) { if (l.isInfoEnabled) l.info("nothing to schedule for bucket: {}", currentBucketId) return@done } shardSubmitters.submit { try { if (l.isDebugEnabled) l.debug("{}, shards to be processed: => {}", currentBucketId, it) calculateDistro(it).asMap().run { if (l.isInfoEnabled) l.info("{}, schedule distribution: => {}", currentBucketId, mapKeys { it.key.address.toString() }.mapValues { it.value.joinToString(",") { "${it.first}[${it.second}]" } }.toSortedMap()) val iterator = Iterators.cycle(keys) val executorService = hz.hz.getExecutorService(EVENT_SCHEDULER) entries.map { { submitShards(executorService, iterator.next(), it.value, currentBucketId, bucketManager) }.retriable("shards-submit", int("events.submit.max.retries"), int("events.submit.initial.delay"), int("events.submit.backoff.multiplier")).transform { it!!.list } }.reduce().done({ l.error("schedule for bucket {} finished abnormally", currentBucketId, it.rootCause()) }) { if (l.isDebugEnabled) l.debug("schedule for bucket {} finished normally => {}", currentBucketId, it) val buckets = it!!.map { it!! }.flatten().filterNotNull().groupBy { it.bucketId!! }.mapValues { it.value.fold(false) { hasError, ss -> hasError || (ss.status == ERROR) } } if (l.isDebugEnabled) l.debug("bucket-scan: {}, final buckets with statuses to be persisted: {}", currentBucketId, buckets) buckets.map { bucketManager.bucketProcessed(it.key, if (it.value) ERROR else PROCESSED) }.done({ l.error("bucket-scan: {}, failed to update the scan-status: {}", currentBucketId, buckets.keys, it.rootCause()) }) { if (l.isDebugEnabled) l.debug("bucket-scan: {}, successfully updated the scan-status: {}", currentBucketId, buckets.keys) } } } } catch (e: Exception) { l.error("error in processing bucket: {}", currentBucketId, e.rootCause()) } } } catch (e: Exception) { l.error("error in processing bucket: {}", currentBucketId, e.rootCause()) } } } private fun calculateDistro(shards: Multimap): LinkedHashMultimap> { val members = hz.hz.cluster.members.toMutableSet().apply { remove(hz.hz.cluster.localMember) }.toList().shuffled().toMutableList().apply { add(hz.hz.cluster.localMember) } val entries = shards.entries().toList() return LinkedHashMultimap.create>().apply { val size = members.size for (i in entries.indices) { val e = entries[i] put(members[i % size], e.key to e.value) } } } private fun submitShards(executorService: IExecutorService, member: Member, shardsData: Collection>, bucket: ZonedDateTime, bucketManager: BucketManager): ListenableFuture { if (l.isDebugEnabled) l.debug("{}, submitting for execution to member {}, shards: {}", bucket, member.socketAddress, shardsData) bucketManager.registerForProcessing(shardsData) return executorService.submitToMember(BulkShardTask(shardsData), member).listenable().catching { ShardStatusList(shardsData.map { ShardStatus(it.first, it.second, ERROR) }) }.done({ l.error("{}, member {} finished abnormally for shards: {}", bucket, member.socketAddress, shardsData, it) shardsData.forEach { bucketManager.shardDone(it.first, it.second, ERROR) } }) { if (l.isDebugEnabled) l.debug("{}, member {} finished normally for shards: {}", bucket, member.socketAddress, it) it?.list?.forEach { bucketManager.shardDone(it!!.bucketId!!, it.shard!!, it.status!!) } } } override fun destroy() { if (l.isInfoEnabled) l.info("destroying the event scheduler") } } ================================================ FILE: lib/src/main/kotlin/com/walmartlabs/bigben/core/StatusSyncer.kt ================================================ /*- * #%L * BigBen:lib * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.core import com.google.common.util.concurrent.ListenableFuture import com.walmartlabs.bigben.entities.Bucket import com.walmartlabs.bigben.entities.Event import com.walmartlabs.bigben.entities.EventStatus import com.walmartlabs.bigben.extns.nowUTC import com.walmartlabs.bigben.extns.save import com.walmartlabs.bigben.utils.done import com.walmartlabs.bigben.utils.logger import com.walmartlabs.bigben.utils.retriable import java.time.ZonedDateTime internal class StatusSyncer { companion object { private val l = logger() } fun syncBucket(bucketId: ZonedDateTime, status: EventStatus, setProcessedAt: Boolean, failedShards: Set?): ListenableFuture { if (l.isDebugEnabled) l.debug("bucket {} is done, syncing status as {}", bucketId, status) return { save { it.bucketId = bucketId; it.status = status if (setProcessedAt) it.processedAt = nowUTC() if (failedShards != null && failedShards.isNotEmpty()) it.failedShards = failedShards else it.failedShards = null } }.retriable().done({ l.error("bucket {} could not be synced with status {}, after multiple retries", bucketId, status, it) }) { if (l.isInfoEnabled) l.info("bucket {} is successfully synced as {}", bucketId, status) } } fun syncShard(bucketId: ZonedDateTime, shard: Int, eventTime: ZonedDateTime, eventId: String, status: EventStatus, payload: String?): ListenableFuture { if (l.isDebugEnabled) l.debug("shard {}[{}] is done, syncing status as {}, payload: {}", bucketId, shard, status, payload) return save { it.id = eventId; it.eventTime = eventTime; it.status = status; if (payload != null) it.payload = payload }.done({ l.error("shard {}[{}] could not be synced with status {}, after multiple retries", bucketId, shard, status, it) }) { if (l.isInfoEnabled) l.info("shard {}[{}] is successfully synced with status {}", bucketId, shard, status) } } } ================================================ FILE: lib/src/main/kotlin/com/walmartlabs/bigben/entities/EntityProvider.kt ================================================ /*- * #%L * BigBen:lib * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.entities import com.google.common.util.concurrent.ListenableFuture import com.walmartlabs.bigben.extns.epoch import java.time.ZonedDateTime /** * Created by smalik3 on 2/25/18 */ interface EntityProvider { fun selector(type: Class): T fun raw(selector: T): T fun fetch(selector: T): ListenableFuture fun kvs(selector: KV): ListenableFuture> fun save(selector: T): ListenableFuture fun remove(selector: T): ListenableFuture fun unwrap(): Any? } interface EventLoader { fun load(bucketId: ZonedDateTime, shard: Int, fetchSize: Int, eventTime: ZonedDateTime = epoch(), eventId: String = "", context: Any? = null): ListenableFuture>> } ================================================ FILE: lib/src/main/kotlin/com/walmartlabs/bigben/entities/entities.kt ================================================ /*- * #%L * BigBen:lib * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.entities import com.hazelcast.nio.ObjectDataInput import com.hazelcast.nio.ObjectDataOutput import com.hazelcast.nio.serialization.IdentifiedDataSerializable import com.walmartlabs.bigben.entities.EventDeliveryOption.FULL_EVENT import com.walmartlabs.bigben.entities.Mode.UPSERT import com.walmartlabs.bigben.hz.HzObjectFactory import com.walmartlabs.bigben.hz.HzObjectFactory.ObjectId.SHARD_STATUS import com.walmartlabs.bigben.hz.HzObjectFactory.ObjectId.SHARD_STATUS_LIST import com.walmartlabs.bigben.utils.json import java.time.Instant.ofEpochMilli import java.time.ZoneOffset.UTC import java.time.ZonedDateTime import java.time.ZonedDateTime.ofInstant /** * Created by smalik3 on 2/21/18 */ enum class EventStatus { PROCESSED, ERROR, UN_PROCESSED, PROCESSING, TRIGGERED, EMPTY, REJECTED, ACCEPTED, UPDATED, DELETED } interface Bucket : IdentifiedDataSerializable { var bucketId: ZonedDateTime? var status: EventStatus? var count: Long? var processedAt: ZonedDateTime? var updatedAt: ZonedDateTime? var failedShards: Set? } interface Event : EventResponseMixin { var eventTime: ZonedDateTime? var bucketId: ZonedDateTime? var shard: Int? var id: String? var status: EventStatus? var error: String? var tenant: String? var processedAt: ZonedDateTime? var xrefId: String? var payload: String? var deliveryOption: EventDeliveryOption? } interface EventLookup { var tenant: String? var xrefId: String? var bucketId: ZonedDateTime? var shard: Int? var eventTime: ZonedDateTime? var eventId: String? var payload: String? } interface KV { var key: String? var column: String? var value: String? } abstract class Idso(private val objectId: HzObjectFactory.ObjectId) : IdentifiedDataSerializable { override fun getFactoryId() = HzObjectFactory.BIGBEN_FACTORY_ID override fun getId() = objectId.ordinal } abstract class IdsoCallable(private val objectId: HzObjectFactory.ObjectId) : IdentifiedDataSerializable { override fun getFactoryId() = HzObjectFactory.BIGBEN_FACTORY_ID override fun getId() = objectId.ordinal override fun writeData(out: ObjectDataOutput?) { } override fun readData(`in`: ObjectDataInput?) { } } data class ShardStatus(var bucketId: ZonedDateTime? = null, var shard: Int? = null, var status: EventStatus? = null) : Idso(SHARD_STATUS) { override fun writeData(out: ObjectDataOutput) { out.writeLong(bucketId!!.toInstant().toEpochMilli()) out.writeInt(shard!!) out.writeByte(status!!.ordinal) } override fun readData(`in`: ObjectDataInput) { bucketId = ofInstant(ofEpochMilli(`in`.readLong()), UTC) shard = `in`.readInt() status = EventStatus.values()[`in`.readByte().toInt()] } } data class ShardStatusList(var list: List? = null) : Idso(SHARD_STATUS_LIST) { override fun writeData(out: ObjectDataOutput) { out.writeInt(list!!.size) list!!.forEach { out.writeObject(it) } } override fun readData(`in`: ObjectDataInput) { list = (1..`in`.readInt()).map { `in`.readObject() } } } enum class Mode { UPSERT, REMOVE } enum class EventDeliveryOption { FULL_EVENT, PAYLOAD_ONLY } open class EventRequest( var id: String? = null, var eventTime: String? = null, var tenant: String? = null, var payload: String? = null, var mode: Mode = UPSERT, var deliveryOption: EventDeliveryOption? = FULL_EVENT ) { override fun toString() = "EventRequest(${json()})" } class EventResponse( id: String? = null, eventTime: String? = null, tenant: String? = null, mode: Mode = UPSERT, payload: String? = null, var eventId: String? = null, var triggeredAt: String? = null, var eventStatus: EventStatus? = null, var error: Error? = null, deliveryOption: EventDeliveryOption? = FULL_EVENT ) : EventRequest(id = id, eventTime = eventTime, tenant = tenant, mode = mode, payload = payload, deliveryOption = deliveryOption) { override fun toString() = "EventResponse(${json()})" } data class Error(val code: Int, val message: String?) interface EventResponseMixin { var eventResponse: EventResponse? } ================================================ FILE: lib/src/main/kotlin/com/walmartlabs/bigben/extns/_api_response_extns.kt ================================================ /*- * #%L * BigBen:lib * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.extns import com.walmartlabs.bigben.BigBen.module import com.walmartlabs.bigben.utils.hz.Hz import com.walmartlabs.bigben.utils.json import com.walmartlabs.bigben.utils.logger import com.walmartlabs.bigben.utils.rootCause import com.walmartlabs.bigben.utils.stackTraceAsString import java.time.LocalDateTime import java.time.ZoneOffset import java.time.format.DateTimeParseException /** * Created by smalik3 on 6/29/18 */ private val l = logger("API") data class APIResponse( val entity: Any, val status: Int = 200, val headers: Map> = mutableMapOf() ) { fun header(name: String, value: String) = apply { (headers[name] ?: mutableListOf()).add(value) } } fun response(f: () -> Any?): APIResponse { val begin = LocalDateTime.now() val r = try { f()?.run { this as? APIResponse ?: APIResponse(this, 200) } ?: APIResponse(mapOf("status" to "not found"), 404) } catch (e: Exception) { val t = e.rootCause()!! l.error("error in processing request", t) val status = if (t is IllegalArgumentException || t is DateTimeParseException) 400 else 500 val message = "please contact engineering team with the below error signature" APIResponse( mutableMapOf("message" to (t.message?.let { """${t.message}${if (status == 500) " ($message)" else ""}""" } ?: "Unexpected error, $message")).apply { if (status == 500) { this["error"] = mapOf( "stack" to t.stackTraceAsString()!!, "node" to module().hz.cluster.localMember.address.host, "start_time" to begin, "duration" to (LocalDateTime.now().toInstant(ZoneOffset.UTC).toEpochMilli() - begin.toInstant(ZoneOffset.UTC).toEpochMilli()) ).json() } }, status ) } val end = LocalDateTime.now() r.header("Start-Time", begin.toString()).header("End-Time", end.toString()) .header("Duration", "${end.toInstant(ZoneOffset.UTC).toEpochMilli() - begin.toInstant(ZoneOffset.UTC).toEpochMilli()} ms") .header("Node", module().hz.cluster.localMember.address.host) return r } ================================================ FILE: lib/src/main/kotlin/com/walmartlabs/bigben/extns/_bigben_extns.kt ================================================ /*- * #%L * BigBen:lib * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.extns import com.walmartlabs.bigben.BigBen.entityProvider import com.walmartlabs.bigben.entities.Event import com.walmartlabs.bigben.entities.EventDeliveryOption import com.walmartlabs.bigben.entities.EventDeliveryOption.FULL_EVENT import com.walmartlabs.bigben.entities.EventDeliveryOption.PAYLOAD_ONLY import com.walmartlabs.bigben.entities.EventRequest import com.walmartlabs.bigben.entities.EventResponse import java.time.ZonedDateTime import java.util.* import java.util.UUID.randomUUID import java.util.stream.Collectors fun EventRequest.toResponse() = EventResponse( tenant = tenant, eventTime = eventTime, id = id, mode = mode, payload = payload, deliveryOption = deliveryOption ) fun Event.toResponse() = eventResponse?.let { it } ?: EventResponse( id = xrefId, eventId = id, triggeredAt = processedAt?.toString(), tenant = tenant, eventTime = eventTime?.toString(), payload = payload, eventStatus = status, deliveryOption = deliveryOption(this) ) fun EventResponse.event() = entityProvider().let { it.raw(it.selector(Event::class.java)) }.also { val t = ZonedDateTime.parse(triggeredAt) it.tenant = tenant; it.xrefId = id; it.eventTime = ZonedDateTime.parse(eventTime)!!; it.payload = payload it.id = eventId; it.bucketId = t.bucket(); it.processedAt = t; if (eventId == null) it.deliveryOption = deliveryOption } fun BitSet.toSet(): MutableSet = stream().boxed().collect(Collectors.toSet())!! fun eventId(req: EventRequest) = if (req.deliveryOption == FULL_EVENT) randomUUID().toString() else "a-${randomUUID()}" fun deliveryOption(event: Event): EventDeliveryOption? { return when { event.id != null -> if (event.id!!.startsWith("a-")) PAYLOAD_ONLY else FULL_EVENT event.deliveryOption != null -> event.deliveryOption else -> null } } ================================================ FILE: lib/src/main/kotlin/com/walmartlabs/bigben/extns/_do_extns.kt ================================================ /*- * #%L * BigBen:lib * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.extns import com.google.common.util.concurrent.ListenableFuture import com.walmartlabs.bigben.BigBen.entityProvider import com.walmartlabs.bigben.entities.KV inline fun fetch(selector: (T) -> Unit): ListenableFuture { return entityProvider().let { it.fetch(it.selector(T::class.java).apply { selector(this) }) } } inline fun kvs(selector: (KV) -> Unit): ListenableFuture> { return entityProvider().let { it.kvs(it.selector(KV::class.java).apply { selector(this) }) } } inline fun save(selector: (T) -> Unit): ListenableFuture { return entityProvider().let { it.save(it.selector(T::class.java).apply { selector(this) }) } } inline fun remove(selector: (T) -> Unit): ListenableFuture { return entityProvider().let { it.remove(it.selector(T::class.java).apply { selector(this) }) } } ================================================ FILE: lib/src/main/kotlin/com/walmartlabs/bigben/extns/_time_extns.kt ================================================ /*- * #%L * BigBen:lib * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.extns import com.walmartlabs.bigben.utils.commons.Props import java.time.Instant.EPOCH import java.time.Instant.ofEpochMilli import java.time.ZoneOffset.UTC import java.time.ZonedDateTime import java.time.ZonedDateTime.now import java.time.ZonedDateTime.ofInstant import java.time.temporal.ChronoField.MILLI_OF_SECOND import java.time.temporal.ChronoField.MINUTE_OF_HOUR import java.time.temporal.ChronoUnit.HOURS import java.time.temporal.ChronoUnit.MINUTES import java.util.* /** * Created by smalik3 on 2/21/18 */ fun bucketize(instant: Long, bucketWidth: Int): Long { val epoch = ofEpochMilli(instant) val hours = epoch.truncatedTo(HOURS).toEpochMilli() val mins = epoch.truncatedTo(MINUTES).toEpochMilli() val delta = (mins - hours) / (60 * 1000) val boundary = delta / bucketWidth * bucketWidth return hours + boundary * 60 * 1000 } fun nextScan(zdt: ZonedDateTime, scanInterval: Int): ZonedDateTime { val minZdt = zdt.toInstant().truncatedTo(MINUTES).atZone(UTC) val currentMinutes = minZdt.get(MINUTE_OF_HOUR) val offset = scanInterval - currentMinutes % scanInterval return zdt.plusMinutes(offset.toLong()).withSecond(0).with(MILLI_OF_SECOND, 0).withNano(0) } fun utc(millis: Long): ZonedDateTime { return ofInstant(ofEpochMilli(millis), UTC) } private val epochZdt = ofInstant(EPOCH, UTC) fun epoch(): ZonedDateTime { return epochZdt } fun ZonedDateTime.toDate(): Date { return Date(toInstant().toEpochMilli()) } fun Date.toZdt(): ZonedDateTime { return utc(time) } fun nowUTC(): ZonedDateTime { return now(UTC) } fun ZonedDateTime?.bucket() = utc(bucketize(this?.toInstant()?.toEpochMilli() ?: throw NullPointerException("null time"), Props.int("events.schedule.scan.interval.minutes"))) ================================================ FILE: lib/src/main/kotlin/com/walmartlabs/bigben/hz/BucketStore.kt ================================================ /*- * #%L * BigBen:lib * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.hz import com.hazelcast.core.MapStore import com.walmartlabs.bigben.BigBen.entityProvider import com.walmartlabs.bigben.entities.Bucket import com.walmartlabs.bigben.extns.fetch import com.walmartlabs.bigben.utils.logger import com.walmartlabs.bigben.utils.reduce import com.walmartlabs.bigben.utils.retriable import java.time.ZonedDateTime import java.util.concurrent.TimeUnit.MINUTES /** * Created by smalik3 on 3/3/18 */ class BucketStore : MapStore { private val l = logger() private val provider = entityProvider() override fun deleteAll(keys: MutableCollection?) { throw UnsupportedOperationException("not supported") } override fun load(key: ZonedDateTime): Bucket? { return { fetch { it.bucketId = key } }.retriable("load-bucket: $key").get(1, MINUTES) } override fun loadAll(keys: Collection): Map { return keys.map { k -> { fetch { it.bucketId = k } }.retriable("load-bucket: $k") }.reduce().get(1, MINUTES).associate { it!!.bucketId!! to it } } override fun store(key: ZonedDateTime, value: Bucket) { if (l.isDebugEnabled) l.debug("saving bucket: {}", key); { provider.save(value.apply { bucketId = key }) }.retriable("save-bucket: $key").get(1, MINUTES) } override fun storeAll(map: Map) { if (l.isDebugEnabled) l.debug("saving buckets: {}", map.keys) map.entries.map { e -> { provider.save(e.run { value.bucketId = key; value }) }.retriable("save-bucket: ${e.key}") } } override fun loadAllKeys(): Iterable? = null override fun delete(key: ZonedDateTime) { throw UnsupportedOperationException("not supported") } } ================================================ FILE: lib/src/main/kotlin/com/walmartlabs/bigben/hz/HzObjectFactory.kt ================================================ /*- * #%L * BigBen:lib * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.hz import com.hazelcast.nio.serialization.DataSerializableFactory import com.hazelcast.nio.serialization.IdentifiedDataSerializable import com.walmartlabs.bigben.BigBen.entityProvider import com.walmartlabs.bigben.api.EventReceiver.Companion.CACHED_PROCESSOR import com.walmartlabs.bigben.entities.Bucket import com.walmartlabs.bigben.entities.ShardStatus import com.walmartlabs.bigben.entities.ShardStatusList import com.walmartlabs.bigben.hz.HzObjectFactory.ObjectId.* import com.walmartlabs.bigben.tasks.BulkShardTask import com.walmartlabs.bigben.tasks.ShutdownTask import com.walmartlabs.bigben.tasks.StatusTask /** * Created by smalik3 on 3/10/18 */ class HzObjectFactory : DataSerializableFactory { companion object { const val BIGBEN_FACTORY_ID = 1 } enum class ObjectId { EVENT_RECEIVER_ADD_EVENT, BULK_EVENT_TASK, SHUTDOWN_TASK, CLUSTER_STATUS_TASK, SHARD_STATUS, SHARD_STATUS_LIST, BUCKET } override fun create(typeId: Int): IdentifiedDataSerializable { return when (values()[typeId]) { EVENT_RECEIVER_ADD_EVENT -> CACHED_PROCESSOR BULK_EVENT_TASK -> BulkShardTask() SHUTDOWN_TASK -> ShutdownTask() SHARD_STATUS -> ShardStatus() SHARD_STATUS_LIST -> ShardStatusList() CLUSTER_STATUS_TASK -> StatusTask() BUCKET -> entityProvider().let { it.raw(it.selector(Bucket::class.java)) } } } } ================================================ FILE: lib/src/main/kotlin/com/walmartlabs/bigben/modules.kt ================================================ /*- * #%L * BigBen:lib * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben import com.walmartlabs.bigben.api.EventReceiver import com.walmartlabs.bigben.api.EventService import com.walmartlabs.bigben.core.ScheduleScanner import com.walmartlabs.bigben.utils.commons.Module import com.walmartlabs.bigben.utils.commons.ModuleRegistry import com.walmartlabs.bigben.utils.commons.Props import com.walmartlabs.bigben.utils.hz.ClusterSingleton import com.walmartlabs.bigben.utils.hz.Hz import com.walmartlabs.bigben.utils.logger /** * Created by smalik3 on 9/18/18 */ object EventModule : Module { private val l = logger() override fun init(registry: ModuleRegistry) { val hz = registry.module() l.info("initializing event receiver") registry.register(EventReceiver(hz)) l.info("initializing event service") registry.register(EventService(hz, registry.module(), registry.module())) } } object SchedulerModule : Module { private val l = logger() override fun init(registry: ModuleRegistry) { val scheduler = ScheduleScanner(registry.module()) registry.register(scheduler) if (Props.boolean("events.scheduler.enabled")) { l.info("initializing cluster singleton") ClusterSingleton(scheduler, registry.module()) } else { l.info("skipping initializing cluster scheduler") } l.info("Scheduler module initialized successfully") } } ================================================ FILE: lib/src/main/kotlin/com/walmartlabs/bigben/processors/no_ops.kt ================================================ /*- * #%L * BigBen:lib * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.processors import com.google.common.util.concurrent.Futures.immediateFuture import com.google.common.util.concurrent.ListenableFuture import com.walmartlabs.bigben.entities.Event import com.walmartlabs.bigben.entities.EventResponse import com.walmartlabs.bigben.utils.Json /** * Created by smalik3 on 6/25/18 */ class NoOpCustomClassProcessor(tenant: String, props: Json) : EventProcessor { override fun invoke(t: Event): ListenableFuture { return immediateFuture(t) } } class NoOpMessageProducerFactory : MessageProducerFactory { override fun create(tenant: String, props: Json): MessageProducer { return object : MessageProducer { override fun produce(e: EventResponse): ListenableFuture<*> { return immediateFuture(e) } } } } ================================================ FILE: lib/src/main/kotlin/com/walmartlabs/bigben/processors/processors.kt ================================================ /*- * #%L * BigBen:lib * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.processors import com.google.common.base.Throwables.getStackTraceAsString import com.google.common.cache.CacheBuilder import com.google.common.net.HttpHeaders.ACCEPT import com.google.common.net.HttpHeaders.CONTENT_TYPE import com.google.common.net.MediaType.ANY_TYPE import com.google.common.net.MediaType.JSON_UTF_8 import com.google.common.util.concurrent.Futures.immediateFailedFuture import com.google.common.util.concurrent.ListenableFuture import com.google.common.util.concurrent.SettableFuture import com.ning.http.client.AsyncCompletionHandler import com.ning.http.client.AsyncHttpClient import com.ning.http.client.Response import com.walmartlabs.bigben.entities.Event import com.walmartlabs.bigben.entities.EventDeliveryOption.FULL_EVENT import com.walmartlabs.bigben.entities.EventDeliveryOption.PAYLOAD_ONLY import com.walmartlabs.bigben.entities.EventResponse import com.walmartlabs.bigben.entities.EventStatus.* import com.walmartlabs.bigben.extns.deliveryOption import com.walmartlabs.bigben.extns.kvs import com.walmartlabs.bigben.extns.nowUTC import com.walmartlabs.bigben.extns.toResponse import com.walmartlabs.bigben.processors.ProcessorConfig.Type.* import com.walmartlabs.bigben.utils.* import com.walmartlabs.bigben.utils.commons.Module import com.walmartlabs.bigben.utils.commons.ModuleRegistry import com.walmartlabs.bigben.utils.commons.Props import com.walmartlabs.bigben.utils.commons.Props.boolean import com.walmartlabs.bigben.utils.commons.Props.int import com.walmartlabs.bigben.utils.commons.Props.map import java.io.Serializable import java.lang.String.format import java.util.concurrent.ConcurrentHashMap import java.util.concurrent.ExecutionException /** * Created by smalik3 on 2/24/18 */ typealias EventProcessor = (t: T) -> ListenableFuture data class ProcessorConfig(var tenant: String? = null, var type: Type? = null, var props: Json? = null) : Serializable { enum class Type { MESSAGING, HTTP, CUSTOM_CLASS } } object ProcessorRegistry : EventProcessor, Module { private val l = logger() private val ASYNC_HTTP_CLIENT = AsyncHttpClient() private val configs: MutableMap private val processorCache = CacheBuilder.newBuilder().build>() private val messageProducerFactory: MessageProducerFactory = Class.forName(Props.string("messaging.producer.factory.class")).newInstance() as MessageProducerFactory init { if (l.isInfoEnabled) l.info("loading configs") configs = ConcurrentHashMap(kvs { it.key = "tenants" }.result { l.error("error in loading tenant configs", it); throw it.rootCause()!! } .map { ProcessorConfig::class.java.fromJson(it.value!!) }.associate { it.tenant!! to it }) if (l.isInfoEnabled) l.info("configs parsed: {}", configs) } override fun init(registry: ModuleRegistry) { if (boolean("events.processor.eager.loading", false)) { if (l.isInfoEnabled) l.info("creating the processors right away") configs.forEach { getOrCreate(it.value) } if (l.isInfoEnabled) l.info("all processors created") } else if (l.isInfoEnabled) l.info("processors will be created when required") l.info("ProcessorRegistry module loaded successfully") } override fun invoke(e: Event): ListenableFuture { try { e.status = TRIGGERED e.error = null e.processedAt = nowUTC() return { getOrCreate(configs[e.tenant]).invoke(e) }.retriable( "processor-e-id: ${e.id}", int("events.processor.max.retries"), int("events.processor.initial.delay"), int("events.processor.backoff.multiplier") ).apply { transform { if (TRIGGERED == e.status) { e.status = e.error?.let { ERROR } ?: PROCESSED } }.catching { l.error( "error in processing event by processor after multiple retries, will be retried later if within " + "'buckets.backlog.check.limit', e-id: ${e.xrefId}", it.rootCause() ) e.status = ERROR e.error = it?.let { getStackTraceAsString(it) } ?: "null error" } } } catch (ex: Exception) { e.status = ERROR e.error = getStackTraceAsString(ex.rootCause()!!) return immediateFailedFuture(ex.rootCause()!!) } } private fun getOrCreate(processorConfig: ProcessorConfig?): EventProcessor { val eventContent = fun(e: Event): String { return when (deliveryOption(e)) { FULL_EVENT -> e.toResponse().json() PAYLOAD_ONLY -> e.payload!! else -> e.toResponse().json() } } return try { when (processorConfig?.type) { MESSAGING -> processorCache.get(processorConfig.tenant!!) { if (l.isInfoEnabled) l.info("creating message processor for tenant: ${processorConfig.tenant}") val mp = messageProducerFactory.create(processorConfig.tenant!!, map("kafka.producer.config").mapKeys { it.key.removePrefix("kafka.producer.config.") } + processorConfig.props!!) object : EventProcessor { override fun invoke(e: Event): ListenableFuture { if (l.isDebugEnabled) if (l.isDebugEnabled) l.debug("tenant: ${processorConfig.tenant}, processing event: ${e.xrefId}") return mp.produce(e.toResponse()).transform { if (l.isDebugEnabled) l.debug("tenant: ${processorConfig.tenant}, event produced successfully: ${e.xrefId}"); e } } } } HTTP -> { processorCache.get(processorConfig.tenant!!) { { SettableFuture.create().apply { try { val builder = ASYNC_HTTP_CLIENT.preparePost(processorConfig.props!!["url"].toString()).setBody(eventContent(it)) @Suppress("UNCHECKED_CAST") (processorConfig.props!!["headers"] as? Map)?.let { if (l.isDebugEnabled) l.debug("adding custom headers: {}", it) it.forEach { builder.setHeader(it.key, it.value) } } builder.setHeader(ACCEPT, ANY_TYPE.toString()).setHeader(CONTENT_TYPE, JSON_UTF_8.toString()) if (l.isDebugEnabled) l.debug("tenant: ${processorConfig.tenant}, processing event: ${it.xrefId}") builder.execute(object : AsyncCompletionHandler() { override fun onCompleted(response: Response): Response { val code = response.statusCode if (code in 200..299 || code in 400..499) { if (code < 400) { if (l.isDebugEnabled) l.debug(format("event processed successfully, response code: {}, response body: {}, event: {}", code, response.responseBody, it.xrefId)) } else { l.warn(format("got a 'bad request' response with status code: {}, event will not be retried anymore, event: {}", code, it.xrefId)) it.error = response.responseBody } set(it) } else { setException(RuntimeException(response.responseBody)) } return response } override fun onThrowable(t: Throwable) { setException(t.rootCause()!!) } }) } catch (ex: Exception) { setException(ex.rootCause()!!) } } } } } CUSTOM_CLASS -> processorCache.get(processorConfig.tenant!!) custom@{ try { @Suppress("UNCHECKED_CAST") (Class.forName(processorConfig.props!!["eventProcessorClass"].toString()) as Class>).run { constructors.forEach { when { it.parameterCount == 0 -> return@custom it.newInstance() as EventProcessor it.parameterCount == 2 && it.parameterTypes[0] == String::class.java && Map::class.java.isAssignableFrom(it.parameterTypes[1]) -> return@custom it.newInstance(processorConfig.tenant, processorConfig.props) as EventProcessor } } throw IllegalArgumentException( "no suitable constructor found for custom processor: $this, " + "either a no-args constructor or a constructor with parameters (String, Map) is required" ) } } catch (ex: Exception) { throw RuntimeException(ex.rootCause()) } } null -> throw IllegalArgumentException("null processor type: $processorConfig") } } catch (e: ExecutionException) { throw RuntimeException(e) } } fun register(config: ProcessorConfig?): ProcessorConfig? { require(config != null) { "null processor config" } require(config.tenant != null && config.tenant!!.trim().isNotEmpty()) { "null or empty tenantId" } require(config.type != null) { "null processor type" } require(config.props != null && !config.props!!.isEmpty()) { "null or empty properties" } if (l.isInfoEnabled) l.info("registering new processor") val previous = configs.put(config.tenant!!, config) processorCache.invalidate(config.tenant!!) return previous } fun registeredTenants(): Set { return configs.keys.toSet() } fun registeredConfigs(): Map { return configs } } interface MessageProducerFactory { fun create(tenant: String, props: Json): MessageProducer } interface MessageProducer { fun produce(e: EventResponse): ListenableFuture<*> } interface MessageProcessor { fun init() } ================================================ FILE: lib/src/main/kotlin/com/walmartlabs/bigben/tasks/tasks.kt ================================================ /*- * #%L * BigBen:lib * ======================================= * Copyright (C) 2016 - 2018 Walmart Inc. * ======================================= * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.walmartlabs.bigben.tasks import com.google.common.base.Throwables.getStackTraceAsString import com.google.common.util.concurrent.AsyncCallable import com.google.common.util.concurrent.Futures.immediateFuture import com.google.common.util.concurrent.ListenableFuture import com.google.common.util.concurrent.MoreExecutors.listeningDecorator import com.hazelcast.core.HazelcastInstance import com.hazelcast.core.HazelcastInstanceAware import com.hazelcast.nio.ObjectDataInput import com.hazelcast.nio.ObjectDataOutput import com.hazelcast.nio.serialization.IdentifiedDataSerializable import com.walmartlabs.bigben.BigBen.entityProvider import com.walmartlabs.bigben.BigBen.module import com.walmartlabs.bigben.entities.* import com.walmartlabs.bigben.entities.EventStatus.ERROR import com.walmartlabs.bigben.entities.EventStatus.PROCESSED import com.walmartlabs.bigben.extns.epoch import com.walmartlabs.bigben.hz.HzObjectFactory import com.walmartlabs.bigben.hz.HzObjectFactory.ObjectId.BULK_EVENT_TASK import com.walmartlabs.bigben.hz.HzObjectFactory.ObjectId.SHUTDOWN_TASK import com.walmartlabs.bigben.processors.EventProcessor import com.walmartlabs.bigben.utils.* import com.walmartlabs.bigben.utils.commons.Props.int import com.walmartlabs.bigben.utils.hz.ClusterSingleton import java.lang.System.currentTimeMillis import java.time.Instant import java.time.ZoneOffset.UTC import java.time.ZonedDateTime import java.util.* import java.util.concurrent.Callable import java.util.concurrent.ScheduledThreadPoolExecutor import java.util.concurrent.ThreadFactory import java.util.concurrent.ThreadPoolExecutor.CallerRunsPolicy import java.util.concurrent.TimeUnit.MILLISECONDS import java.util.concurrent.atomic.AtomicInteger import kotlin.math.max import kotlin.math.min /** * Created by smalik3 on 2/23/18 */ class BulkShardTask(private var shards: Collection>? = null) : Callable, IdentifiedDataSerializable, HazelcastInstanceAware, Idso(BULK_EVENT_TASK) { companion object { private val l = logger() private val NO_OP = immediateFuture>(ArrayList()) } private lateinit var hz: HazelcastInstance override fun call(): ShardStatusList { try { return ShardStatusList(execute().get()) } catch (e: Exception) { l.error("error in processing events", e.rootCause()) throw RuntimeException(e) } } private fun execute(): ListenableFuture> { val shards = shards!! if (shards.isEmpty()) return NO_OP if (l.isDebugEnabled) l.debug("{}, executing bulk event task for buckets/shards on node: {}", shards.map { "${it.first}/${it.second}" }, hz.cluster.localMember.socketAddress) val fetchSizeHint = int("events.tasks.max.events.in.memory") / shards.size if (l.isInfoEnabled) l.info("starting processing of ${shards.sortedBy { it.first }}") return shards.map { s -> try { ShardTask(s, fetchSizeHint, module(), module()).call().done( { l.error("error in executing shard: bucket: {}, shard: {}", s.first, s.second, it.rootCause()) }) { if (l.isInfoEnabled) l.info("shard processed, bucket: {}, shard: {}", s.first, s.second) }.catching { l.error("error in executing shard, returning an ERROR status bucket: {}, shard: {}", s.first, s.second, it.rootCause()) ShardStatus(s.first, s.second, ERROR) } } catch (e: Exception) { l.error("error in submitting shard for execution: bucket: {}, shard: {}", s.first, s.second, e.rootCause()) immediateFuture(ShardStatus(s.first, s.second, ERROR)) } }.reduce() } override fun writeData(out: ObjectDataOutput) { out.writeInt(shards!!.size) shards!!.forEach { out.writeLong(it.first.toInstant().toEpochMilli()) out.writeInt(it.second) } } override fun readData(`in`: ObjectDataInput) { shards = (1..`in`.readInt()).map { ZonedDateTime.ofInstant(Instant.ofEpochMilli(`in`.readLong()), UTC) to `in`.readInt() }.toList() } override fun setHazelcastInstance(hazelcastInstance: HazelcastInstance) { this.hz = hazelcastInstance } } class ShardTask(private val p: Pair, fetchSizeHint: Int, private val processor: EventProcessor, private val loader: EventLoader) : Callable> { companion object { private val l = logger() private val index = AtomicInteger() private val scheduler = listeningDecorator(ScheduledThreadPoolExecutor( int("events.tasks.scheduler.worker.threads"), ThreadFactory { Thread(it, "event-processor#" + index.getAndIncrement()) }, CallerRunsPolicy())) } private val executionKey = "${p.first}[${p.second}]" private val fetchSize = max(10, min(fetchSizeHint, 400)) override fun call(): ListenableFuture { if (l.isDebugEnabled) l.debug("{}, processing shard with fetch size: {}", executionKey, fetchSize) return loadAndProcess(epoch(), "", null).transform { it!!.second.fold(false) { b, e -> b || e.status == ERROR }.run { if (l.isDebugEnabled) { if (this) l.debug("{}, errors in processing shard", executionKey) else l.debug("{}, shard processed successfully", executionKey) } ShardStatus(p.first, p.second, if (this) ERROR else PROCESSED) } } } private fun loadAndProcess(eventTime: ZonedDateTime, eventId: String, context: Any?): ListenableFuture>> { return loader.load(p.first, p.second, fetchSize, eventTime, eventId, context).transformAsync { rp -> val events = rp!!.second if (events.isEmpty()) immediateFuture(rp.first to events) else events.filter { it.status != PROCESSED }.map { e -> schedule(e).done( { l.error("{}/{}/{} event has error in processing", executionKey, e.eventTime, e.id, it.rootCause()) } ) { if (l.isDebugEnabled) l.debug("{}/{}/{} event is processed successfully", executionKey, e.eventTime, e.id) } }.reduce().transformAsync { if (events.size >= fetchSize) loadAndProcess(events.last().eventTime!!, events.last().id!!, rp.first) else immediateFuture(rp.first to events) } } } private fun schedule(e: Event): ListenableFuture { val delay = e.eventTime!!.toInstant().toEpochMilli() - currentTimeMillis() return if (delay <= 0) { if (l.isDebugEnabled) l.debug("{}, event {} time has expired, processing immediately", executionKey, e.id) process(e).transformAsync { save(e) } } else { if (l.isDebugEnabled) l.debug("{}, scheduling event '{}' after delay {}, at {}", executionKey, e.id, delay, e.eventTime!!) AsyncCallable { processor(e) }.scheduleAsync(delay, MILLISECONDS, scheduler).transformAsync { save(it!!) } } } private fun process(e: Event): ListenableFuture { return try { if (l.isDebugEnabled) l.debug("{}, processing event: {}", executionKey, e.id) processor.invoke(e).apply { transform { if (l.isDebugEnabled) l.debug("{}, processed event: {}", executionKey, e.id) }.catching { ex -> l.error("{}, error in processing event, marking it {}", executionKey, ERROR, ex.rootCause()) e.status?.let { e.status = ERROR; e.error = getStackTraceAsString(ex!!) } } } } catch (t: Throwable) { l.error("{}, error in processing event: {}", executionKey, e.id, t.rootCause()) e.status = ERROR e.error = getStackTraceAsString(t.rootCause()!!) immediateFuture(e) } } private fun save(e: Event): ListenableFuture { if (l.isDebugEnabled) l.debug("{}, saving event: {} to the DB, the status is '{}'", executionKey, e.id, e.status) return entityProvider().save(e) } } internal class ShutdownTask : IdsoCallable(SHUTDOWN_TASK), Callable { override fun call(): Boolean { TODO() } } internal class StatusTask(private var serviceName: String? = null) : Idso(HzObjectFactory.ObjectId.CLUSTER_STATUS_TASK), Callable { override fun call() = if (ClusterSingleton.ACTIVE_SERVICES.contains(serviceName)) "Master" else "Slave" override fun writeData(out: ObjectDataOutput) = out.writeUTF(serviceName) override fun readData(`in`: ObjectDataInput) = `in`.let { serviceName = it.readUTF() } } ================================================ FILE: lib/src/main/resources/hz.template.xml ================================================ slf4j ${group.name bigben} ${group.password bigben-pass} ${management.url http://localhost:8080/mancenter} ${network.port 5701} 0 ${network.members 127.0.0.1} OBJECT com.walmartlabs.bigben.hz.BucketStore ${map.store.writeDelay 60} ${map.store.batchSize 1000} ${map.store.writeCoalescing true} ${map.backupCount 1} ${map.asyncBackupCount 0} ${map.ttl 0} ${map.maxIdleSeconds 0} ${map.evictionPolicy LRU} ${map.MaxSize 1000000} ${map.evictionPercentage 25} ${map.evictionCheckMillis 2000} ${map.mergePolicy com.hazelcast.map.merge.LatestUpdateMapMergePolicy} OBJECT com.walmartlabs.bigben.cron.CronMapStore ${cron.backupCount 1} ${cron.async.backupCount 0} 0 NONE com.walmartlabs.bigben.hz.HzObjectFactory ================================================ FILE: pom.xml ================================================ 4.0.0 com.walmartlabs walmartlabs-pom 1 com.walmartlabs.bigben bigben pom 1.0.7-SNAPSHOT commons lib cron cassandra kafka app BigBen:parent http://walmartlabs.com UTF-8 1.3.11 1.7.25 24.1.1-jre 2.10.0.pr1 3.8.6 1.9.31 3.5.1 2.9.4.1 3.7 1.2 1.3.0 true jcenter http://jcenter.bintray.com Apache License, Version 2.0 https://www.apache.org/licenses/LICENSE-2.0.txt repo wmt WalmartLabs Open Source Developers opensource@walmartlabs.com Contributor -8 smalik3 Sandeep Malik smalik@walmartlabs.com Project Lead -8 sandeep.malik Sandeep Malik sandeep.malik@gmail.com Project Lead -8 com.walmartlabs.bigben bigben-commons 1.0.7-SNAPSHOT com.walmartlabs.bigben bigben-lib 1.0.7-SNAPSHOT com.walmartlabs.bigben bigben-cassandra 1.0.7-SNAPSHOT com.walmartlabs.bigben bigben-kafka 1.0.7-SNAPSHOT com.walmartlabs.bigben bigben-cron 1.0.7-SNAPSHOT org.slf4j slf4j-api ${slf4j-api.version} com.google.guava guava ${guava.version} com.fasterxml.jackson.core jackson-databind ${jackson.version} com.fasterxml.jackson.module jackson-module-kotlin ${jackson-module-kotlin.version} com.hazelcast hazelcast ${hazelcast.version} com.ning async-http-client ${async-http-client.version} org.apache.commons commons-lang3 ${commons-lang3.version} org.apache.commons commons-text ${commons-text.version} org.jetbrains.kotlin kotlin-reflect ${kotlin.version} org.testng testng 6.14.3 test io.ktor ktor-server-core ${ktor.version} io.ktor ktor-server-netty ${ktor.version} io.netty netty-codec-http2 4.1.24.Final io.ktor ktor-jackson ${ktor.version} io.github.microutils kotlin-logging 1.6.22 io.ktor ktor-client-core ${ktor.version} test io.ktor ktor-client-apache ${ktor.version} test org.jetbrains.kotlin kotlin-stdlib-jdk8 ${kotlin.version} org.jetbrains.kotlin kotlin-test ${kotlin.version} test kotlin-maven-plugin org.jetbrains.kotlin ${kotlin.version} compile compile 1.8 ${project.basedir}/src/main/kotlin ${project.basedir}/src/main/java test-compile test-compile 1.8 ${project.basedir}/src/test/kotlin ${project.basedir}/src/test/java org.apache.maven.plugins maven-compiler-plugin ${maven-compiler-plugin.version} default-compile none default-testCompile none java-compile compile compile java-test-compile test-compile testCompile 1.8 1.8 org.apache.maven.plugins maven-javadoc-plugin 2.10.1 ${java.home}/../bin/javadoc attach-javadocs jar -Xdoclint:none org.apache.maven.plugins maven-surefire-plugin 2.22.0 ${skipTests} ${project.build.directory} org.apache.maven.plugins maven-dependency-plugin 3.0.2 install install analyze-only org.apache.maven.plugins maven-release-plugin 2.5.3 true org.codehaus.mojo exec-maven-plugin 1.4.0 exec maven org.codehaus.mojo license-maven-plugin 1.16 2016 Walmart Inc. false Walmart Inc. ${project.basedir}/LICENSE.txt true true false apache_v2 ======================================= src/ first update-file-header update-project-license process-sources scm:git:https://github.com/walmartlabs/bigben.git scm:git:https://github.com/walmartlabs/bigben HEAD scm:git:https://github.com/walmartlabs/bigben.git ================================================ FILE: run_bigben_standalone.sh ================================================ #!/usr/bin/env bash echo Building BigBen mvn clean install cd app/target echo Starting BigBen java -jar bigben.jar