[
  {
    "path": ".gitignore",
    "content": "cover.out\n.goxc.json\n/.idea\n*.iml\nguble\nguble-cli/guble-cli\n"
  },
  {
    "path": ".travis.yml",
    "content": "language: go\ngo:\n  - tip\nsudo: required\nservices:\n  - docker\n  - postgresql\nbefore_script:\n  - psql -c 'create database guble;' -U postgres\nbefore_install:\n  - go get github.com/wadey/gocovmerge\n  - go get github.com/mattn/goveralls\n  - go get golang.org/x/tools/cmd/cover\nscript:\n  - GO_TEST_DISABLED=true go test -v ./...\nafter_success:\n  - scripts/generate_coverage.sh\n  - goveralls -coverprofile=full_cov.out -service=travis-ci\n  - if [ \"$TRAVIS_BRANCH\" == \"master\" ]; then\n      GOOS=linux go build -a --ldflags '-linkmode external -extldflags \"-static\"' . ;\n      GOOS=linux go build -a --ldflags '-linkmode external -extldflags \"-static\"' -o ./guble-cli/guble-cli ./guble-cli ;\n      docker build -t smancke/guble . ;\n      docker login -e=\"$DOCKER_EMAIL\" -u=\"$DOCKER_USERNAME\" -p=\"$DOCKER_PASSWORD\" ;\n      docker push smancke/guble ;\n    fi\n\nenv:\n  global:\n  - secure: V9+UswYO6l0EuekA5YBviUdz0OcWfT3QsY1Bgoml8lmWP3/Rdq0fpxGh1hHUWt1pyAl3Aymw5Sc9DU/STmb5k6YjimS649Hu3jZ2AJfjLxh8ZA+vTgFiQc4mN4FqDAFhnPVB/aOSQhGyRlWalxikNy3nhcJrN+uWOpzRqzg0icNOdfTKpSH1cRJO0Ja34f4AEmLuNvGUAyZVpLuZJFL5mE9sJ1G1baqgFf/kTQ67jF+Ezg+1AY+NYaYwd5PUGFgIKf/qVT5Wqtrff26Yxzr/hECEBypAvNmCdLSoV/qyzZvzUTgYZTPmUnDks0uUEup9YzEQZ9XxwIQyHSXZ9D6h2vZxyr0TlZvBtdzWNiLHjBSISF8ZzOthI7NIi/e4YRYlqCF3apZuRo6o2fneHqzonza0OpJQdCKACXgycFe0ZTXk1o7SdT1d1JgeFckmL0kS8H2N4E/DaIAPq8zaC4bOlaYaUYt6vXNwEKK99q0X97gLJFdrBBY7lzKs9bbVa7b2Dhkh67PUt6WhoHUjLSN+9jTn+oda8VEKtXxyaWM6AsCRHgBiy0VaxuHbU2k1mpSCLdBfJGbrDITA4+nyPopv/oky4xHX1FGSMGFw73Ejafu9Xo0cpvIpVcNjeagUugQ5ThPQMSua9hxSZJx6alIUhptDUesiYHJAWUVPQi4N/3A=\n  - secure: SHIH8wBBTWslUnXeIPa3XpPugTX2IgKu3CB0OAbEE9e1nkAop1bbbas5chJgSA276xseBriH7aBSPe25XB4q9JM0YDelC7pK7dmSiLQMiAYvBb8SiGpfTAArBen4hiJiYaJo9hAE51Q4tjZ03vlIvTCYFjJ0rsBoTnbk9W6iVNEQfKzo9KfVBshYcS4BswwBgPSGtck/V3I3oASTmWpPdCGhhDipuOA9UG4hbnyWyeDqi1Mf0Dukggya4Qg+Z2o3WFI5qKGN/L6Qulgse9Rszrlikas5g2iDP11e9eO/tn/2nipIGZd/0xgCcG4tfcoqVn0PzOIOLE33vgqDrUvaaIsmVL/h0nQvC+EhVjgtrNcV/c3gDFH/3GaFX/J2wtT7396CpNCJbje/5fo9pFKS/QXjyqeIRrjq5Rux59RkZNoZIYyXbgM2UW3F8ebHFgaLd4+3Ec67zelxvixJWP1s2iDkZ2C4M7eHSBSvwpM0leebXPDOXeInCPspD5AWkhmo29m7X6J6fT7lwkfbSTyvCAQCKMzuRIsxMaAdxMCco5eVMam8CaAqZoAL/8RbnC+G9BiknyNDxx/W3qLfbnTpXlljKIBapNRYiut0RglrcPjpGAhHwbefXNwjb8AHxzx/GnU3GIHzjkQXCGDGMLJ6cPm/Iik2tVZD/eqgRxGqWNA=\n  - secure: BNjkm6Hb8go2xem0JLsSkFhACUgwxhBhqgsEfcpzJG1+gIC0ZZtvK5ARBusOgEytmT1tsyDbT99FT1MJ3LsucNB4EixLU/8UEoY80r/QD67eK4dKzUIiQdsPmJTUMJnzfTqgyQF2byilu6tHSHWL+MwFVmaQh04R1T1Zo0LyZMFhWjWIGx2lNhHbsLQWjb7KFLLlYx9lg4POf4eTTnrhhdJHTpUOmoty57+jf+Sen+hPOanGGsSajo6GTqN2SMmsLOCykwytsSUA0ZZ/QuEsL+1htm0vpQXqsfUxQ3KAIbyDUVTrSQsAPvULM1ymlEyIeFEeABTCVNUQb8sMpc/5VbKTNd/jEhM6oidZfakLnx3RV6kZCtrHMbkHh6ta8KcxTpt7TcnyGjTnMD5jCVjgAM99j8x7QMfAd+boRr05intzHB8GFv0IDYq9tZ93/umQHyqX8ctN+kNpmy0kSshusd3QPZ+FeZrMgWhfKvYkrjEZ1Pd/wWaqb4Pv0DUfqvlwYKshvCtH7u7TCP63Nbnt2rY+CNgfXWBDfPkxIDoF8UrXIHEZXY2C5JOGfEtS27AjUin46vHFQKr/oaYYMiUXVu25mbTkNNR67Q/6yxD0f7VqVFmWAmT1pWdDd6Gc0uT4fsP8H8Le1PAciPjMGvUFIegQK7W9TnnvDA1w0IsADwE= # DOCKER_PASSWORD\n\n"
  },
  {
    "path": "Dockerfile",
    "content": "FROM alpine\nCOPY ./guble ./guble-cli/guble-cli /usr/local/bin/\nRUN mkdir -p /var/lib/guble\nVOLUME [\"/var/lib/guble\"]\nENTRYPOINT [\"/usr/local/bin/guble\"]\nEXPOSE 8080\n"
  },
  {
    "path": "LICENSE",
    "content": "The MIT License (MIT)\nCopyright (c) 2015 Sebastian Mancke\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "# Guble Messaging Server\n\nGuble is a simple user-facing messaging and data replication server written in Go.\n\n[![Codacy Badge](https://api.codacy.com/project/badge/Grade/f3b9a351201b416db4fe6df8faea363b)](https://www.codacy.com/app/cosminrentea/guble?utm_source=github.com&utm_medium=referral&utm_content=smancke/guble&utm_campaign=badger)\n[![Release](https://img.shields.io/github/release/smancke/guble.svg)](https://github.com/smancke/guble/releases/latest)\n[![Docker](https://img.shields.io/docker/pulls/smancke/guble.svg)](https://hub.docker.com/r/smancke/guble/)\n[![Build Status](https://api.travis-ci.org/smancke/guble.svg?branch=master)](https://travis-ci.org/smancke/guble)\n[![Go Report Card](https://goreportcard.com/badge/github.com/smancke/guble)](https://goreportcard.com/report/github.com/smancke/guble)\n[![codebeat badge](https://codebeat.co/badges/7f317892-0a7b-4e31-97f4-a530cf779889)](https://codebeat.co/projects/github-com-smancke-guble)\n[![Coverage Status](https://coveralls.io/repos/smancke/guble/badge.svg?branch=master&service=github)](https://coveralls.io/github/smancke/guble?branch=master)\n[![GoDoc](https://godoc.org/github.com/smancke/guble?status.svg)](https://godoc.org/github.com/smancke/guble)\n[![Awesome-Go](https://camo.githubusercontent.com/13c4e50d88df7178ae1882a203ed57b641674f94/68747470733a2f2f63646e2e7261776769742e636f6d2f73696e647265736f726875732f617765736f6d652f643733303566333864323966656437386661383536353265336136336531353464643865383832392f6d656469612f62616467652e737667)](https://awesome-go.com)\n\n# Overview\nGuble is in an early state (release 0.4). \nIt is already working well and is very useful, but the protocol, API and storage formats \nmay still change (until reaching 0.7). \nIf you intend to use guble, please get in contact with us.\n\nThe goal of guble is to be a simple and fast message bus for user interaction and replication of data between multiple devices:\n* Very easy consumption of messages with web and mobile clients\n* Fast realtime messaging, as well as playback of messages from a persistent commit log\n* Reliable and scalable over multiple nodes\n* User-aware semantics to easily support messaging scenarios between people using multiple devices\n* Batteries included: usable as front-facing server, without the need of a proxy layer\n* Self-contained: no mandatory dependencies to other services\n\n## Working Features (0.4)\n\n* Publishing and subscription of messages to topics and subtopics\n* Persistent message store with transparent live and offline fetching\n* WebSocket and REST APIs for message publishing\n* Commandline client and Go client library\n* Firebase Cloud Messaging (FCM) adapter: delivery of messages as FCM push notifications\n* Docker images for server and client\n* Simple Authentication and Access-Management\n* Clean shutdown\n* Improved logging using [logrus](https://github.com/Sirupsen/logrus) and logstash formatter\n* Health-Check with Endpoint\n* Collection of Basic Metrics, with Endpoint\n* Added Postgresql as KV Backend\n* Load testing with 5000 messages per instance\n* Support for Apple Push Notification services (a new connector alongside Firebase)\n* Upgrade, cleanup, abstraction, documentation, and test coverage of the Firebase connector\n* GET list of subscribers / list of topics per subscriber (userID , deviceID) \n* Support for SMS-sending using Nexmo (a new connector alongside Firebase)\n\n## Throughput\nMeasured on an old notebook with i5-2520M, dual core and SSD. Message payload was 'Hello Word'.\nLoad driver and server were set up on the same machine, so 50% of the cpu was allocated to the load driver.\n\n* End-2-End: Delivery of ~35.000 persistent messages per second\n* Fetching: Receive of ~70.000 persistent messages per second\n\nDuring the tests, the memory consumption of the server was around ~25 MB.\n\n## Table of Contents\n\n- [Roadmap](#roadmap)\n  - [Roadmap Release 0.5](#roadmap-release-05)\n  - [Roadmap Release 0.6](#roadmap-release-06)\n  - [Roadmap Release 0.7](#roadmap-release-07)\n- [Guble Docker Image](#guble-docker-image)\n  - [Start the Guble Server](#start-the-guble-server)\n  - [Connecting with the Guble Client](#connecting-with-the-guble-client)\n- [Build and Run](#build-and-run)\n  - [Build and Start the Server](#build-and-start-the-server)\n    - [Configuration](#configuration)\n  - [Run All Tests](#run-all-tests)\n- [Clients](#clients)\n- [Protocol Reference](#protocol-reference)\n  - [REST API](#rest-api)\n    - [Headers](#headers)\n  - [WebSocket Protocol](#websocket-protocol)\n    - [Message Format](#message-format)\n    - [Client Commands](#client-commands)\n    - [Server Status Messages](#server-status-messages)\n  - [Topics](#topics)\n    - [Subtopics](#subtopics)\n\n# Roadmap\nThis is the current (and fast changing) roadmap and todo list:\n\n## Roadmap Release 0.5\n* Replication across multiple servers (in a Guble cluster)\n* Acknowledgement of message delivery for connectors\n* Storing the sequence-Id of topics in KV store, if we turn off persistence\n* Filtering of messages in guble server (e.g. sent by the REST client) according to URL parameters: UserID, DeviceID, Connector name\n* Updating README to show subscribe/unsubscribe/get/posting, health/metrics \n\n## Roadmap Release 0.6\n* Make notification messages optional by client configuration\n* Correct behaviour of receive command with `maxCount` on subtopics\n* Cancel of fetch in the message store and multiple concurrent fetch commands for the same topic\n* Configuration of different persistence strategies for topics\n* Delivery semantics: user must read on one device / deliver only to one device / notify if not connected, etc.\n* User-specific persistent subscriptions across all clients of the user\n* Client: (re-)setup of subscriptions after client reconnect\n* Message size limit configurable by the client with fetching by URL\n\n## Roadmap Release 0.7\n* HTTPS support in the service\n* Minimal example: chat application\n* Stable JavaScript client: https://github.com/smancke/guble-js\n* (TBD) Improved authentication and access-management\n* (TBD) Add Consul as KV Backend\n* (TBD) Index-based search of messages using [GoLucene](https://github.com/balzaczyy/golucene)\n\n# Guble Docker Image\nWe are providing Docker images of the server and client for your convenience.\n\n## Start the Guble Server\nThere is an automated Docker build for the master at the Docker Hub.\nTo start the server with Docker simply type:\n```\ndocker run -p 8080:8080 smancke/guble\n```\n\nTo see available configuration options:\n```\ndocker run smancke/guble --help\n```\n\nAll options can be supplied on the commandline or by a corresponding environment variable with the prefix `GUBLE_`.\nSo to let guble be more verbose, you can either use:\n```\ndocker run smancke/guble --log=info\n```\nor\n```\ndocker run -e GUBLE_LOG=info smancke/guble\n```\n\nThe Docker image has a volume mount point at `/var/lib/guble`, so if you want to bind-mount the persistent storage from your host you should use:\n```\ndocker run -p 8080:8080 -v /host/storage/path:/var/lib/guble smancke/guble\n```\n\n## Connecting with the Guble Client\nThe Docker image includes the guble commandline client `guble-cli`.\nYou can execute it within a running guble container and connect to the server:\n```\ndocker run -d --name guble smancke/guble\ndocker exec -it guble /usr/local/bin/guble-cli\n```\nVisit the [`guble-cli` documentation](https://github.com/smancke/guble/tree/master/guble-cli) for more details.\n\n# Build and Run\nSince Go makes it very easy to build from source, you can compile guble using a single command.\nA prerequisite is having an installed Go environment and an empty directory:\n```\nsudo apt-get install golang\nmkdir guble && cd guble\nexport GOPATH=`pwd`\n```\n\n## Build and Start the Server\nBuild and start guble with the following commands (assuming that directory `/var/lib/guble` is already created with read-write rights for the current user):\n```\ngo get github.com/smancke/guble\nbin/guble --log=info\n```\n\n### Configuration\n\n|CLI Option|Env Variable|Values|Default|Description|\n|--- |--- |--- |--- |--- |\n|`--env`|GUBLE_ENV|development &#124; integration &#124; preproduction &#124; production|development|Name of the environment on which the application is running. Used mainly for logging|\n|`--health-endpoint`|GUBLE_HEALTH_ENDPOINT|resource/path/to/healthendpoint|/admin/healthcheck|The health endpoint to be used by the HTTP server.Can be disabled by setting the value to \"\"|\n|`--http`|GUBLE_HTTP_LISTEN|format: [host]:port||The address to for the HTTP server to listen on|\n|`--kvs`|GUBLE_KVS|memory &#124; file &#124; postgres|file|The storage backend for the key-value store to use|\n|`--log`|GUBLE_LOG|panic &#124; fatal &#124; error &#124; warn &#124; info &#124; debug|error|The log level in which the process logs|\n|`--metrics-endpoint`|GUBLE_METRICS_ENDPOINT|resource/path/to/metricsendpoint|/admin/metrics|The metrics endpoint to be used by the HTTP server.Can be disabled by setting the value to \"\"|\n|`--ms`|GUBLE_MS|memory &#124; file|file|The message storage backend|\n|`--profile`|GUBLE_PROFILE|cpu &#124; mem &#124; block||The profiler to be used|\n|`--storage-path`|GUBLE_STORAGE_PATH|path/to/storage|/var/lib/guble|The path for storing messages and key-value data like subscriptions if defined.The path must exists!|\n\n\n#### APNS\n\n|CLI Option|Env Variable|Values|Default|Description|\n|--- |--- |--- |--- |--- |\n|`--apns`|GUBLE_APNS|true &#124; false|false|Enable the APNS module in general as well as the connector to the development endpoint|\n|`--apns-production`|GUBLE_APNS_PRODUCTION|true &#124; false|false|Enables the connector to the apns production endpoint, requires the apns option to be set|\n|`--apns-cert-file`|GUBLE_APNS_CERT_FILE|path/to/cert/file||The APNS certificate file name, use this as an alternative to the certificate bytes option|\n|`--apns-cert-bytes`|GUBLE_APNS_CERT_BYTES|cert-bytes-as-hex-string||The APNS certificate bytes, use this as an alternative to the certificate file option|\n|`--apns-cert-password`|GUBLE_APNS_CERT_PASSWORD|password||The APNS certificate password|\n|`--apns-app-topic`|GUBLE_APNS_APP_TOPIC|topic||The APNS topic (as used by the mobile application)|\n|`--apns-prefix`|GUBLE_APNS_PREFIX|prefix|/apns/|The APNS prefix / endpoint|\n|`--apns-workers`|GUBLE_APNS_WORKERS|number of workers|Number of CPUs|The number of workers handling traffic with APNS (default: number of CPUs)|\n\n\n#### SMS\n\n|CLI Option|Env Variable|Values|Default |Description|\n|--- |--- |--- |--- |--- |\n|`sms`|GUBLE_SMS|true &#124; false|false |Enable the SMS gateway|\n|`sms_api_key`|GUBLE_SMS_API_KEY|api key||The Nexmo API Key for Sending sms|\n|`sms_api_secret`|GUBLE_SMS_API_SECRET|api secret||The Nexmo API Secret for Sending sms|\n|`sms_topic`|GUBLE_SMS_TOPIC|topic|/sms|The topic for sms route|\n|`sms_workers`|GUBLE_SMS_WORKERS|number of workers|Number of CPUs|The number of workers handling traffic with Nexmo sms endpoint|\n\n#### FCM\n\n|CLI Option|Env Variable|Values|Default|Description|\n|--- |--- |--- |--- |--- |\n|`--fcm|GUBLE_FCM`|true &#124; false|false|Enable the Google Firebase Cloud Messaging connector|\n|`--fcm-api-key`|GUBLE_FCM_API_KEY|api key||The Google API Key for Google Firebase Cloud Messaging|\n|`--fcm-workers`|GUBLE_FCM_WORKERS|number of workers|Number of CPUs|The number of workers handling traffic with Firebase Cloud Messaging|\n|`--fcm-endpoint`|GUBLE_FCM_ENDPOINT|format: url-schema|https://fcm.googleapis.com/fcm/send|The Google Firebase Cloud Messaging endpoint|\n|`--fcm-prefix`|GUBLE_FCM_PREFIX|prefix|/fcm/|The FCM prefix / endpoint|\n\n#### Postgres\n\n|CLI Option|Env Variable|Values|Default|Description|\n|--- |--- |--- |--- |--- |\n|`--pg-host`|GUBLE_PG_HOST|hostname|localhost|The PostgreSQL hostname|\n|`--pg-port`|GUBLE_PG_PORT|port|5432|The PostgreSQL port|\n|`--pg-user`|GUBLE_PG_USER|user|guble|The PostgreSQL user|\n|`--pg-password`|GUBLE_PG_PASSWORD|password|guble|The PostgreSQL password|\n|`--pg-dbname`|GUBLE_PG_DBNAME|database|guble|The PostgreSQL database name|\n\n\n## Run All Tests\n```\ngo get -t github.com/smancke/guble/...\ngo test github.com/smancke/guble/...\n```\n\n# Clients\nThe following clients are available:\n* __Commandline Client__: https://github.com/smancke/guble/tree/master/guble-cli\n* __Go client library__: https://github.com/smancke/guble/tree/master/client\n* __JavaScript library__: (in early stage) https://github.com/smancke/guble-js\n\n# Protocol Reference\n\n## REST API\nCurrently there is a minimalistic REST API, just for publishing messages.\n\n```\nPOST /api/message/<topic>\n```\nURL parameters:\n* __userId__: The PublisherUserId\n* __messageId__: The PublisherMessageId\n\n### Headers\nYou can set fields in the header JSON of the message by providing the corresponding HTTP headers with the prefix `X-Guble-`.\n\nCurl example with the resulting message:\n```\ncurl -X POST -H \"x-Guble-Key: Value\" --data Hello 'http://127.0.0.1:8080/api/message/foo?userId=marvin&messageId=42'\n```\nResults in:\n```\n16,/foo,marvin,VoAdxGO3DBEn8vv8,42,1451236804\n{\"Key\":\"Value\"}\nHello\n```\n\n## WebSocket Protocol\nThe communication with the guble server is done by ordinary WebSockets, using a binary encoding.\n\n### Message Format\nAll payload messages sent from the server to the client are using the following format:\n```\n<path:string>,<sequenceId:int64>,<publisherUserId:string>,<publisherApplicationId:string>,<publisherMessageId:string>,<messagePublishingTime:unix-timestamp>\\n\n[<application headers json>]\\n\n<body>\n\nexample 1:\n/foo/bar,42,user01,phone1,id123,1420110000\n{\"Content-Type\": \"text/plain\", \"Correlation-Id\": \"7sdks723ksgqn\"}\nHello World\n\nexample 2:\n/foo/bar,42,user01,54sdcj8sd7,id123,1420110000\n\nanyByteData\n```\n\n* All text formats are assumed to be UTF-8 encoded.\n* Message `sequenceId`s are `int64`, and distinct within a topic.\n  The message `sequenceId`s are strictly monotonically increasing depending on the message age, but there is no guarantee for the right order while transmitting.\n\n### Client Commands\nThe client can send the following commands.\n\n#### Send\nPublish a message to a topic:\n```\n> <path> [<publisherMessageId>]\\n\n[<header>\\n]..\n\\n\n<body>\n\nexample:\n> /foo\n\nHello World\n```\n\n#### Subscribe/Receive\nReceive messages from a path (e.g. a topic or subtopic).\nThis command can be used to subscribe for incoming messages on a topic,\nas well as for replaying the message history.\n```\n+ <path> [<startId>[,<maxCount>]]\n```\n* `path`: the topic to receive the messages from\n* `startId`: the message id to start the replay\n** If no `startId` is given, only future messages will be received (simple subscribe).\n** If the `startId` is negative, it is interpreted as relative count of last messages in the history.\n* `maxCount`: the maximum number of messages to replay\n\n__Note__: Currently, the fetching of stored messages does not recognize subtopics.\n\nExamples:\n```\n+ /foo         # Subscribe to all future messages matching /foo\n+ /foo/bar     # Subscribe to all future messages matching /foo/bar\n\n+ /foo 0       # Receive all message from the topic and subscribe for further incoming messages.\n\n+ /foo 42      # Receive all message with message ids >= 42\n               # from the topic and subscribe for further incoming messages.\n\n+ /foo 0 20    # Receive the first (oldest) 20 messages within the topic and stop.\n               # (If the topic has less messages, it will stop after receiving all existing ones.)\n\n+ /foo -20     # Receive the last (newest) 20 messages from the topic and then\n               # subscribe for further incoming messages.\n\n+ /foo -20 20  # Receive the last (newest) 20 messages within the topic and stop.\n               # (If the topic has less messages, it will stop after receiving all existing ones.)\n```\n\n#### Unsubscribe/Cancel\nCancel further receiving of messages from a path (e.g. a topic or subtopic).\n\n```\n- <path>\n\nexample:\n- /foo\n- /foo/bar\n```\n\n### Server Status Messages\nThe server sends status messages to the client. All positive status messages start with `>`.\nStatus messages reporting an error start with `!`. Status messages are in the following format.\n\n```\n'#'<msgType> <Explanation text>\\n\n<json data>\n```\n\n#### Connection Message\n```\n#ok-connected You are connected to the server.\\n\n{\"ApplicationId\": \"the app id\", \"UserId\": \"the user id\", \"Time\": \"the server time as unix timestamp \"}\n```\n\nExample:\n```\n#connected You are connected to the server.\n{\"ApplicationId\": \"phone1\", \"UserId\": \"user01\", \"Time\": \"1420110000\"}\n```\n\n#### Send Success Notification\nThis notification confirms, that the messaging system has successfully received the message and now starts transmitting it to the subscribers:\n\n```\n#send <publisherMessageId>\n{\"sequenceId\": \"sequence id\", \"path\": \"/foo\", \"publisherMessageId\": \"publishers message id\", \"messagePublishingTime\": \"unix-timestamp\"}\n```\n\n#### Receive Success Notification\nDepending on the type of `+` (receive) command, up to three different notification messages will be sent back.\nBe aware, that a server may send more receive notifications that you would have expected in first place, e.g. when:\n* Additional messages are stored, while the first fetching is in progress\n* The server decides to meanwhile stop the online subscription and change to fetching,\n  because your client is too slow to read all incoming messages.\n\n1. When the fetch operation starts:\n\n    ```\n    #fetch-start <path> <count>\n    ```\n    * `path`: the topic path\n    * `count`: the number of messages that will be returned\n\n2. When the fetch operation is done:\n\n    ```\n    #fetch-done <path>\n    ```\n    * `path`: the topic path\n3. When the subscription to new messages was taken:\n\n    ```\n    #subscribed-to <path>\n    ```\n    * `path`: the topic path\n\n#### Unsubscribe Success Notification\nAn unsubscribe/cancel operation is confirmed by the following notification:\n```\n#canceled <path>\n```\n\n#### Send Error Notification\nThis message indicates, that the message could not be delivered.\n```\n!error-send <publisherMessageId> <error text>\n{\"sequenceId\": \"sequence id\", \"path\": \"/foo\", \"publisherMessageId\": \"publishers message id\", \"messagePublishingTime\": \"unix-timestamp\"}\n```\n\n#### Bad Request\nThis notification has the same meaning as the http 400 Bad Request.\n```\n!error-bad-request unknown command 'sdcsd'\n```\n\n#### Internal Server Error\nThis notification has the same meaning as the http 500 Internal Server Error.\n```\n!error-server-internal this computing node has problems\n```\n\n## Topics\n\nMessages can be hierarchically routed by topics, so they are represented by a path, separated by `/`.\nThe server takes care, that a message only gets delivered once, even if it is matched by multiple\nsubscription paths.\n\n### Subtopics\nThe path delimiter gives the semantic of subtopics. \nWith this, a subscription to a parent topic (e.g. `/foo`)\nalso results in receiving all messages of the subtopics (e.g. `/foo/bar`).\n"
  },
  {
    "path": "api/swagger.yaml",
    "content": "swagger: '2.0'\n\ninfo:\n  version: \"0.0.1\"\n  title: Guble API\n  \nschemes:\n  - http\n  \npaths:\n  \n  /api/subscribers/{topic}:\n    get:\n      produces:\n        - application/json\n      tags:\n        - REST\n        - APNS\n        - FCM\n\n      description: |\n        Get subscribers registered for a topic\n\n      parameters:\n       - name: topic\n         in: path\n         type: string\n         required: true\n         description: name of the subscribtion topic\n\n      responses:\n        200:\n          description: successful response\n          schema:\n            type: array\n            items:\n              $ref: '#/definitions/Subscriber'\n        500:\n          description: unknown error\n\n  /api/message/{topic}:\n    post:\n      consumes:\n        - application/json\n      tags:\n        - REST\n        - SMS\n        - APNS\n        - FCM\n      description: |\n        Send message to a connector\n\n      parameters:\n        - name: topic\n          in: path\n          type: string\n          required: true\n          description: |\n            Name of the subscribtion topic.\n            'sms' is a special topic to send a sms message and\n            must not be used neither as APNS nor as FCM topic.\n\n        - name: message\n          in: body\n          required: true\n          description: a json message in the format expected by the connector\n          schema:\n            type: object\n\n        - name: userId\n          in: header\n          required: false\n          type: string\n\n        - name: x-guble\n          in: header\n          required: false\n          type: string\n          description: x-guble- is a generic header prefix\n\n        - name: filterConnector\n          in: query\n          description: |\n            Specifies a connector which should handle message.\n            As the message is in the connector specific format,\n            the parameter should be treated as mandatory.\n          required: false\n          type: string\n          enum:\n           - apns\n           - fcm\n\n        - name: filterUserID\n          in: query\n          description: Specifies a subscribed user which should received the notification.\n          required: false\n          type: string\n\n        - in: query\n          name: filterDeviceToken\n          description: Specifies a device token which should received the notification.\n          required: false\n          type: string\n\n      responses:\n        200:\n          description: successful response\n\n        400:\n          description: malformed request\n\n        500:\n          description: unknown error\n\n\n  /apns/{device_token}/{user_id}/{topic}:\n    post:\n      tags:\n        - APNS\n\n      description: |\n        Create APN subscription\n        \n      parameters:\n      - name: device_token\n        in: path\n        type: string\n        required: true\n        description: device token which mobile device received from APNS\n\n      - name: user_id\n        in: path\n        type: string\n        required: true\n        description: customer uuid or 'anonymous' \n\n      - name: topic\n        in: path\n        type: string\n        required: true\n        description: name of the subscribtion topic \n        \n      responses:\n        200:\n          description: successful response\n        500:\n          description: unknown error          \n          \n    delete:\n      tags:\n        - APNS\n\n      description: |\n        Delete APN subscription\n        \n      parameters:\n      - name: device_token\n        in: path\n        type: string\n        required: true\n        description: device token which mobile device received from APNS\n\n      - name: user_id\n        in: path\n        type: string\n        required: true\n        description: customer uuid or 'anonymous' \n\n      - name: topic\n        in: path\n        type: string\n        required: true\n        description: name of the subscribtion topic \n        \n      responses:\n        200:\n          description: successful response\n        404:\n          description: subscription not found\n        500:\n          description: unknown error          \n  \n  /apns/:\n    get:\n      tags:\n        - APNS\n      description: |\n         Return the list of APNS subscriptions\n      parameters:\n        - in: query\n          name: device_token\n          description: device token which mobile device received from APNS \n          required: false\n          type: string\n        - in: query\n          name: user_id\n          description: device token \n          required: false\n          type: string\n        \n      responses:\n        200:\n          description: list of topics\n          schema:\n            type: array\n            items: {\n              type: string\n            }\n        400:\n          description: missing filters\n        500:\n          description: unknown error\n          \n          \n          \n  /apns/substitute/:\n    post:\n      tags:\n        - APNS\n      description: |\n         Substitutes field value of the APNS subscriber object.\n         Provided old value must match the current value stored in the object for operation\n         to be succcessful\n      parameters:\n        - name: body\n          in: body\n          required: true\n          schema:\n            $ref: '#/definitions/SubstitutionRequest'\n            \n      responses:\n        200:\n          description: successful response\n          schema:\n            $ref: '#/definitions/SubstitutionResponse'\n        400:\n          description: invalid substitution request\n          schema:\n            $ref: '#/definitions/ErrorResponse'\n        500:\n          description: unknown error          \n          schema:\n            $ref: '#/definitions/ErrorResponse'\n\n  /fcm/{device_token}/{user_id}/{topic}:\n    post:\n      tags:\n        - FCM\n\n      description: |\n        Create FCM subscription\n\n      parameters:\n      - name: device_token\n        in: path\n        type: string\n        required: true\n        description: device token which mobile device received from FCM\n\n      - name: user_id\n        in: path\n        type: string\n        required: true\n        description: customer uuid or 'anonymous'\n\n      - name: topic\n        in: path\n        type: string\n        required: true\n        description: name of the subscribtion topic\n\n      responses:\n        200:\n          description: successful response\n        500:\n          description: unknown error\n\n    delete:\n      tags:\n        - FCM\n\n      description: |\n        Delete FCM subscription\n\n      parameters:\n      - name: device_token\n        in: path\n        type: string\n        required: true\n        description: device token which mobile device received from FCM\n\n      - name: user_id\n        in: path\n        type: string\n        required: true\n        description: customer uuid or 'anonymous'\n\n      - name: topic\n        in: path\n        type: string\n        required: true\n        description: name of the subscribtion topic\n\n      responses:\n        200:\n          description: successful response\n        404:\n          description: subscription not found\n        500:\n          description: unknown error\n\n  /fcm/:\n    get:\n      tags:\n        - FCM\n      description: |\n         Return the list of subscriptions\n      parameters:\n        - in: query\n          name: device_token\n          description: device token which mobile device received from FCM\n          required: false\n          type: string\n        - in: query\n          name: user_id\n          description: device token\n          required: false\n          type: string\n\n      responses:\n        200:\n          description: list of topics\n          schema:\n            type: array\n            items: {\n              type: string\n            }\n        400:\n          description: missing filters\n        500:\n          description: unknown error\n\n\n\n  /fcm/substitute/:\n    post:\n      tags:\n        - FCM\n      description: |\n         Substitutes field value of the FCMC subscriber object.\n         Provided old value must match the current value stored in the object for operation\n         to be succcessful\n      parameters:\n        - name: body\n          in: body\n          required: true\n          schema:\n            $ref: '#/definitions/SubstitutionRequest'\n\n      responses:\n        200:\n          description: successful response\n          schema:\n            $ref: '#/definitions/SubstitutionResponse'\n        400:\n          description: invalid substitution request\n          schema:\n            $ref: '#/definitions/ErrorResponse'\n        500:\n          description: unknown error\n          schema:\n            $ref: '#/definitions/ErrorResponse'\n\n\n  /admin/healtcheck:\n    get:\n      produces:\n        - application/json\n      tags:\n        - ADMIN\n\n      description: Application health check\n\n      responses:\n        200:\n          description: successful response\n        500:\n          description: unknown error\n\n  /admin/metrics:\n    get:\n      produces:\n        - application/json\n      tags:\n        - ADMIN\n\n      description: Application metrics\n\n      responses:\n        200:\n          description: successful response\n          schema:\n            type: object\n        500:\n          description: unknown error\n\n  /stream/:\n    get:\n      tags:\n        - WEBSOCKET\n\n      description: Web socket interface\n\n      responses:\n        201:\n          description: Response code is 101 after protocol was switched\ndefinitions:\n  ErrorResponse:\n     type: object\n     properties:\n        error:\n          description: error message\n          type: string\n\n  Subscriber:\n     type: object\n     required:\n      - connector\n      - device_token\n      - user_id\n\n     properties:\n        connector:\n          description: name of the connector\n          type: string\n          enum:\n            - apns\n            - fcm\n\n        device_token:\n          description: device token\n          type: string\n\n        user_id:\n          description: customer uuid or 'anonymous'\n          type: string\n\n  SubstitutionRequest:\n     type: object\n     required:\n      - field\n      - old_value\n      - new_value\n     properties:\n        field:\n          description: field name\n          type: string\n          enum:\n            - device_token\n            - user_id\n        old_value:\n          description: old value\n          type: string\n        new_value:\n          description: new value\n          type: string\n\n  SubstitutionResponse:\n     type: object\n     properties:\n        modified:\n          description: number of modified entries\n          type: integer\n"
  },
  {
    "path": "client/client.go",
    "content": "package client\n\nimport (\n\t\"github.com/smancke/guble/protocol\"\n\n\tlog \"github.com/Sirupsen/logrus\"\n\t\"github.com/gorilla/websocket\"\n\n\t\"net/http\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar logger = log.WithFields(log.Fields{\n\t\"module\": \"client\",\n})\n\ntype WSConnection interface {\n\tWriteMessage(messageType int, data []byte) error\n\tReadMessage() (messageType int, p []byte, err error)\n\tClose() error\n}\n\nfunc DefaultConnectionFactory(url string, origin string) (WSConnection, error) {\n\tlogger.WithField(\"url\", url).Info(\"Connecting to\")\n\n\theader := http.Header{\"Origin\": []string{origin}}\n\tconn, _, err := websocket.DefaultDialer.Dial(url, header)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlogger.WithField(\"url\", url).Info(\"Connected to\")\n\n\treturn conn, nil\n}\n\ntype WSConnectionFactory func(url string, origin string) (WSConnection, error)\n\ntype Client interface {\n\tStart() error\n\tClose()\n\n\tSubscribe(path string) error\n\tUnsubscribe(path string) error\n\n\tSend(path string, body string, header string) error\n\tSendBytes(path string, body []byte, header string) error\n\n\tWriteRawMessage(message []byte) error\n\tMessages() chan *protocol.Message\n\tStatusMessages() chan *protocol.NotificationMessage\n\tErrors() chan *protocol.NotificationMessage\n\n\tSetWSConnectionFactory(WSConnectionFactory)\n\tIsConnected() bool\n}\n\ntype client struct {\n\tmu                  sync.RWMutex\n\tws                  WSConnection\n\tmessages            chan *protocol.Message\n\tstatusMessages      chan *protocol.NotificationMessage\n\terrors              chan *protocol.NotificationMessage\n\turl                 string\n\torigin              string\n\tshouldStopChan      chan bool\n\tshouldStopFlag      bool\n\tautoReconnect       bool\n\twSConnectionFactory func(url string, origin string) (WSConnection, error)\n\t// flag, to indicate if the client is connected\n\tconnected bool\n}\n\n// Open is a shortcut for New() and Start()\nfunc Open(url, origin string, channelSize int, autoReconnect bool) (Client, error) {\n\tc := New(url, origin, channelSize, autoReconnect)\n\tc.SetWSConnectionFactory(DefaultConnectionFactory)\n\treturn c, c.Start()\n}\n\n// New creates a new client, without starting the connection\nfunc New(url, origin string, channelSize int, autoReconnect bool) Client {\n\treturn &client{\n\t\tmessages:       make(chan *protocol.Message, channelSize),\n\t\tstatusMessages: make(chan *protocol.NotificationMessage, channelSize),\n\t\terrors:         make(chan *protocol.NotificationMessage, channelSize),\n\t\turl:            url,\n\t\torigin:         origin,\n\t\tshouldStopChan: make(chan bool, 1),\n\t\tautoReconnect:  autoReconnect,\n\t}\n}\n\nfunc (c *client) SetWSConnectionFactory(connection WSConnectionFactory) {\n\tc.wSConnectionFactory = connection\n}\n\nfunc (c *client) IsConnected() bool {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\treturn c.connected\n}\n\nfunc (c *client) setIsConnected(connected bool) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tc.connected = connected\n}\n\n// Connect and start the read go routine.\n// If an error occurs on first connect, it will be returned.\n// Further connection errors will only be logged.\nfunc (c *client) Start() error {\n\tvar err error\n\tc.ws, err = c.wSConnectionFactory(c.url, c.origin)\n\tc.setIsConnected(err == nil)\n\n\tif c.IsConnected() {\n\t\tgo c.readLoop()\n\t} else if c.autoReconnect {\n\t\tgo c.startWithReconnect()\n\t}\n\treturn err\n}\n\nfunc (c *client) startWithReconnect() {\n\tfor {\n\t\tif c.IsConnected() {\n\t\t\terr := c.readLoop()\n\t\t\tif err == nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tif c.shouldStop() {\n\n\t\t\treturn\n\t\t}\n\n\t\tvar err error\n\t\tc.ws, err = c.wSConnectionFactory(c.url, c.origin)\n\t\tif err != nil {\n\t\t\tc.setIsConnected(false)\n\n\t\t\tlogger.WithError(err).Error(\"Error on connect, retry in 50 ms\")\n\n\t\t\ttime.Sleep(time.Millisecond * 50)\n\t\t} else {\n\t\t\tc.setIsConnected(true)\n\t\t\tlogger.Warn(\"Reconnected again\")\n\t\t}\n\t}\n}\n\nfunc (c *client) readLoop() error {\n\tfor {\n\t\t_, msg, err := c.ws.ReadMessage()\n\t\tif err != nil {\n\t\t\tc.setIsConnected(false)\n\t\t\tif c.shouldStop() {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tlogger.WithError(err).Error(\"Error when reading from websocket\")\n\n\t\t\tc.errors <- clientErrorMessage(err.Error())\n\t\t\treturn err\n\t\t}\n\n\t\tlogger.WithField(\"msg\", string(msg)).Debug(\"Raw >\")\n\t\tc.handleIncomingMessage(msg)\n\t}\n}\n\nfunc (c *client) shouldStop() bool {\n\tif c.shouldStopFlag {\n\t\treturn true\n\t}\n\tselect {\n\tcase <-c.shouldStopChan:\n\t\tc.shouldStopFlag = true\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\nfunc (c *client) handleIncomingMessage(msg []byte) {\n\tparsed, err := protocol.Decode(msg)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"Error on parsing of incoming message\")\n\t\tc.errors <- clientErrorMessage(err.Error())\n\t\treturn\n\t}\n\n\tswitch message := parsed.(type) {\n\tcase *protocol.Message:\n\t\tc.messages <- message\n\tcase *protocol.NotificationMessage:\n\t\tif message.IsError {\n\t\t\tselect {\n\t\t\tcase c.errors <- message:\n\t\t\tdefault:\n\t\t\t}\n\t\t} else {\n\t\t\tselect {\n\t\t\tcase c.statusMessages <- message:\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (c *client) Subscribe(path string) error {\n\tcmd := &protocol.Cmd{\n\t\tName: protocol.CmdReceive,\n\t\tArg:  path,\n\t}\n\terr := c.ws.WriteMessage(websocket.BinaryMessage, cmd.Bytes())\n\treturn err\n}\n\nfunc (c *client) Unsubscribe(path string) error {\n\tcmd := &protocol.Cmd{\n\t\tName: protocol.CmdCancel,\n\t\tArg:  path,\n\t}\n\terr := c.ws.WriteMessage(websocket.BinaryMessage, cmd.Bytes())\n\treturn err\n}\n\nfunc (c *client) Send(path string, body string, header string) error {\n\treturn c.SendBytes(path, []byte(body), header)\n}\n\nfunc (c *client) SendBytes(path string, body []byte, header string) error {\n\tcmd := &protocol.Cmd{\n\t\tName:       protocol.CmdSend,\n\t\tArg:        path,\n\t\tBody:       body,\n\t\tHeaderJSON: header,\n\t}\n\n\treturn c.WriteRawMessage(cmd.Bytes())\n}\n\nfunc (c *client) WriteRawMessage(message []byte) error {\n\treturn c.ws.WriteMessage(websocket.BinaryMessage, message)\n}\n\nfunc (c *client) Messages() chan *protocol.Message {\n\treturn c.messages\n}\n\nfunc (c *client) StatusMessages() chan *protocol.NotificationMessage {\n\treturn c.statusMessages\n}\n\nfunc (c *client) Errors() chan *protocol.NotificationMessage {\n\treturn c.errors\n}\n\nfunc (c *client) Close() {\n\tc.shouldStopChan <- true\n\tc.ws.Close()\n}\n\nfunc clientErrorMessage(message string) *protocol.NotificationMessage {\n\treturn &protocol.NotificationMessage{\n\t\tIsError: true,\n\t\tName:    \"clientError\",\n\t\tArg:     message,\n\t}\n}\n"
  },
  {
    "path": "client/client_test.go",
    "content": "package client\n\nimport (\n\t\"github.com/smancke/guble/testutil\"\n\n\t\"fmt\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gorilla/websocket\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\nvar aNormalMessage = `/foo/bar,42,user01,phone01,{},1420110000,0\n\nHello World`\n\nvar aSendNotification = \"#send\"\n\nvar anErrorNotification = \"!error-send\"\n\nfunc MockConnectionFactory(connectionMock *MockWSConnection) func(string, string) (WSConnection, error) {\n\treturn func(url string, origin string) (WSConnection, error) {\n\t\treturn connectionMock, nil\n\t}\n}\n\nfunc TestConnectErrorWithoutReconnection(t *testing.T) {\n\ta := assert.New(t)\n\n\t// given a client\n\tc := New(\"url\", \"origin\", 1, false)\n\n\t// which raises an error on connect\n\tcallCounter := 0\n\tc.SetWSConnectionFactory(func(url string, origin string) (WSConnection, error) {\n\t\ta.Equal(\"url\", url)\n\t\ta.Equal(\"origin\", origin)\n\t\tcallCounter++\n\t\treturn nil, fmt.Errorf(\"emulate connection error\")\n\t})\n\n\t// when we start\n\terr := c.Start()\n\n\t// then\n\ta.Error(err)\n\ta.Equal(1, callCounter)\n}\n\nfunc TestConnectErrorWithoutReconnectionUsingOpen(t *testing.T) {\n\ta := assert.New(t)\n\n\tc, err := Open(\"url\", \"origin\", 1, false)\n\n\t// which raises an error on connect\n\tcallCounter := 0\n\tc.SetWSConnectionFactory(func(url string, origin string) (WSConnection, error) {\n\t\ta.Equal(\"url\", url)\n\t\ta.Equal(\"origin\", origin)\n\t\tcallCounter++\n\t\treturn nil, fmt.Errorf(\"emulate connection error\")\n\t})\n\n\ta.Error(err)\n}\n\nfunc TestConnectErrorWithReconnection(t *testing.T) {\n\tctrl, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\ta := assert.New(t)\n\n\t// given a client\n\tc := New(\"url\", \"origin\", 1, true)\n\n\t// which raises an error twice and then allows to connect\n\tcallCounter := 0\n\tconnMock := NewMockWSConnection(ctrl)\n\tconnMock.EXPECT().ReadMessage().Do(func() { time.Sleep(time.Second) })\n\tc.SetWSConnectionFactory(func(url string, origin string) (WSConnection, error) {\n\t\ta.Equal(\"url\", url)\n\t\ta.Equal(\"origin\", origin)\n\t\tif callCounter <= 2 {\n\t\t\tcallCounter++\n\t\t\treturn nil, fmt.Errorf(\"emulate connection error\")\n\t\t}\n\t\treturn connMock, nil\n\t})\n\n\t// when we start\n\terr := c.Start()\n\n\t// then we get an error, first\n\ta.Error(err)\n\ta.False(c.IsConnected())\n\n\t// when we wait for two iterations and 10ms buffer time to connect\n\ttime.Sleep(time.Millisecond * 110)\n\n\t// then we got connected\n\ta.True(c.IsConnected())\n\ta.Equal(3, callCounter)\n}\n\nfunc TestStopableClient(t *testing.T) {\n\tctrl, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\ta := assert.New(t)\n\n\t// given a client\n\tc := New(\"url\", \"origin\", 1, true)\n\n\t// with a closeable connection\n\tconnMock := NewMockWSConnection(ctrl)\n\tclose := make(chan bool, 1)\n\tconnMock.EXPECT().ReadMessage().\n\t\tDo(func() { <-close }).\n\t\tReturn(0, []byte{}, fmt.Errorf(\"expected close error\"))\n\n\tconnMock.EXPECT().Close().Do(func() {\n\t\tclose <- true\n\t})\n\n\tc.SetWSConnectionFactory(MockConnectionFactory(connMock))\n\n\t// when we start\n\terr := c.Start()\n\n\t// than we are connected\n\ta.NoError(err)\n\ta.True(c.IsConnected())\n\n\t// when we clode\n\tc.Close()\n\ttime.Sleep(time.Millisecond * 1)\n\n\t// than the client returns\n\ta.False(c.IsConnected())\n}\n\nfunc TestReceiveAMessage(t *testing.T) {\n\tctrl, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\ta := assert.New(t)\n\n\t// given a client\n\tc := New(\"url\", \"origin\", 10, false)\n\n\t// with a closeable connection\n\tconnMock := NewMockWSConnection(ctrl)\n\tclose := make(chan bool, 1)\n\n\t// normal message\n\tcall1 := connMock.EXPECT().ReadMessage().\n\t\tReturn(4, []byte(aNormalMessage), nil)\n\tcall2 := connMock.EXPECT().ReadMessage().\n\t\tReturn(4, []byte(aSendNotification), nil)\n\tcall3 := connMock.EXPECT().ReadMessage().\n\t\tReturn(4, []byte(\"---\"), nil)\n\tcall4 := connMock.EXPECT().ReadMessage().\n\t\tReturn(4, []byte(anErrorNotification), nil)\n\tcall5 := connMock.EXPECT().ReadMessage().\n\t\tDo(func() { <-close }).\n\t\tReturn(0, []byte{}, fmt.Errorf(\"expected close error\")).\n\t\tAnyTimes()\n\n\tcall5.After(call4)\n\tcall4.After(call3)\n\tcall3.After(call2)\n\tcall2.After(call1)\n\n\tc.SetWSConnectionFactory(MockConnectionFactory(connMock))\n\n\tconnMock.EXPECT().Close().Do(func() {\n\t\tclose <- true\n\t})\n\n\t// when we start\n\terr := c.Start()\n\ta.NoError(err)\n\ta.True(c.IsConnected())\n\n\t// than we receive the expected message\n\tselect {\n\tcase m := <-c.Messages():\n\t\ta.Equal(aNormalMessage, string(m.Bytes()))\n\tcase <-time.After(time.Millisecond * 10):\n\t\ta.Fail(\"timeout while waiting for message\")\n\t}\n\n\t// and we receive the notification\n\tselect {\n\tcase m := <-c.StatusMessages():\n\t\ta.Equal(aSendNotification, string(m.Bytes()))\n\tcase <-time.After(time.Millisecond * 10):\n\t\ta.Fail(\"timeout while waiting for message\")\n\t}\n\n\t// parse error\n\tselect {\n\tcase m := <-c.Errors():\n\t\ta.True(strings.HasPrefix(string(m.Bytes()), \"!clientError \"))\n\tcase <-time.After(time.Millisecond * 10):\n\t\ta.Fail(\"timeout while waiting for message\")\n\t}\n\n\t// and we receive the error notification\n\tselect {\n\tcase m := <-c.Errors():\n\t\ta.Equal(anErrorNotification, string(m.Bytes()))\n\tcase <-time.After(time.Millisecond * 10):\n\t\ta.Fail(\"timeout while waiting for message\")\n\t}\n\n\tc.Close()\n}\n\nfunc TestSendAMessage(t *testing.T) {\n\tctrl, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\t//\ta := assert.New(t)\n\n\t// given a client\n\tc := New(\"url\", \"origin\", 1, true)\n\n\t// when expects a message\n\tconnMock := NewMockWSConnection(ctrl)\n\tconnMock.EXPECT().WriteMessage(websocket.BinaryMessage, []byte(\"> /foo\\n{}\\nTest\"))\n\tconnMock.EXPECT().\n\t\tReadMessage().\n\t\tReturn(websocket.BinaryMessage, []byte(aNormalMessage), nil).\n\t\tDo(func() {\n\t\t\ttime.Sleep(time.Millisecond * 50)\n\t\t}).\n\t\tAnyTimes()\n\tc.SetWSConnectionFactory(MockConnectionFactory(connMock))\n\n\tc.Start()\n\t// then the expectation is meet by sending it\n\tc.Send(\"/foo\", \"Test\", \"{}\")\n\t// stop client after 200ms\n\ttime.AfterFunc(time.Millisecond*200, func() { c.Close() })\n}\n\nfunc TestSendSubscribeMessage(t *testing.T) {\n\tctrl, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\t// given a client\n\tc := New(\"url\", \"origin\", 1, true)\n\n\t// when expects a message\n\tconnMock := NewMockWSConnection(ctrl)\n\tconnMock.EXPECT().WriteMessage(websocket.BinaryMessage, []byte(\"+ /foo\"))\n\tconnMock.EXPECT().\n\t\tReadMessage().\n\t\tReturn(websocket.BinaryMessage, []byte(aNormalMessage), nil).\n\t\tDo(func() {\n\t\t\ttime.Sleep(time.Millisecond * 50)\n\t\t}).\n\t\tAnyTimes()\n\tc.SetWSConnectionFactory(MockConnectionFactory(connMock))\n\n\tc.Start()\n\tc.Subscribe(\"/foo\")\n\n\t// stop client after 200ms\n\ttime.AfterFunc(time.Millisecond*200, func() { c.Close() })\n}\n\nfunc TestSendUnSubscribeMessage(t *testing.T) {\n\tctrl, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\t// given a client\n\tc := New(\"url\", \"origin\", 1, true)\n\n\t// when expects a message\n\tconnMock := NewMockWSConnection(ctrl)\n\tconnMock.EXPECT().WriteMessage(websocket.BinaryMessage, []byte(\"- /foo\"))\n\tconnMock.EXPECT().\n\t\tReadMessage().\n\t\tReturn(websocket.BinaryMessage, []byte(aNormalMessage), nil).\n\t\tDo(func() {\n\t\t\ttime.Sleep(time.Millisecond * 50)\n\t\t}).\n\t\tAnyTimes()\n\tc.SetWSConnectionFactory(MockConnectionFactory(connMock))\n\n\tc.Start()\n\tc.Unsubscribe(\"/foo\")\n\n\t// stop client after 200ms\n\ttime.AfterFunc(time.Millisecond*200, func() { c.Close() })\n}\n"
  },
  {
    "path": "client/mocks_client_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/smancke/guble/client (interfaces: WSConnection,Client)\n\npackage client\n\nimport (\n\t\"github.com/golang/mock/gomock\"\n\n\t\"github.com/smancke/guble/protocol\"\n)\n\n// Mock of WSConnection interface\ntype MockWSConnection struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockWSConnectionRecorder\n}\n\n// Recorder for MockWSConnection (not exported)\ntype _MockWSConnectionRecorder struct {\n\tmock *MockWSConnection\n}\n\nfunc NewMockWSConnection(ctrl *gomock.Controller) *MockWSConnection {\n\tmock := &MockWSConnection{ctrl: ctrl}\n\tmock.recorder = &_MockWSConnectionRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockWSConnection) EXPECT() *_MockWSConnectionRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockWSConnection) Close() error {\n\tret := _m.ctrl.Call(_m, \"Close\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockWSConnectionRecorder) Close() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Close\")\n}\n\nfunc (_m *MockWSConnection) ReadMessage() (int, []byte, error) {\n\tret := _m.ctrl.Call(_m, \"ReadMessage\")\n\tret0, _ := ret[0].(int)\n\tret1, _ := ret[1].([]byte)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}\n\nfunc (_mr *_MockWSConnectionRecorder) ReadMessage() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"ReadMessage\")\n}\n\nfunc (_m *MockWSConnection) WriteMessage(_param0 int, _param1 []byte) error {\n\tret := _m.ctrl.Call(_m, \"WriteMessage\", _param0, _param1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockWSConnectionRecorder) WriteMessage(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"WriteMessage\", arg0, arg1)\n}\n\n// Mock of Client interface\ntype MockClient struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockClientRecorder\n}\n\n// Recorder for MockClient (not exported)\ntype _MockClientRecorder struct {\n\tmock *MockClient\n}\n\nfunc NewMockClient(ctrl *gomock.Controller) *MockClient {\n\tmock := &MockClient{ctrl: ctrl}\n\tmock.recorder = &_MockClientRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockClient) EXPECT() *_MockClientRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockClient) Close() {\n\t_m.ctrl.Call(_m, \"Close\")\n}\n\nfunc (_mr *_MockClientRecorder) Close() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Close\")\n}\n\nfunc (_m *MockClient) Errors() chan *protocol.NotificationMessage {\n\tret := _m.ctrl.Call(_m, \"Errors\")\n\tret0, _ := ret[0].(chan *protocol.NotificationMessage)\n\treturn ret0\n}\n\nfunc (_mr *_MockClientRecorder) Errors() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Errors\")\n}\n\nfunc (_m *MockClient) IsConnected() bool {\n\tret := _m.ctrl.Call(_m, \"IsConnected\")\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}\n\nfunc (_mr *_MockClientRecorder) IsConnected() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"IsConnected\")\n}\n\nfunc (_m *MockClient) Messages() chan *protocol.Message {\n\tret := _m.ctrl.Call(_m, \"Messages\")\n\tret0, _ := ret[0].(chan *protocol.Message)\n\treturn ret0\n}\n\nfunc (_mr *_MockClientRecorder) Messages() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Messages\")\n}\n\nfunc (_m *MockClient) Send(_param0 string, _param1 string, _param2 string) error {\n\tret := _m.ctrl.Call(_m, \"Send\", _param0, _param1, _param2)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockClientRecorder) Send(arg0, arg1, arg2 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Send\", arg0, arg1, arg2)\n}\n\nfunc (_m *MockClient) SendBytes(_param0 string, _param1 []byte, _param2 string) error {\n\tret := _m.ctrl.Call(_m, \"SendBytes\", _param0, _param1, _param2)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockClientRecorder) SendBytes(arg0, arg1, arg2 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"SendBytes\", arg0, arg1, arg2)\n}\n\nfunc (_m *MockClient) SetWSConnectionFactory(_param0 WSConnectionFactory) {\n\t_m.ctrl.Call(_m, \"SetWSConnectionFactory\", _param0)\n}\n\nfunc (_mr *_MockClientRecorder) SetWSConnectionFactory(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"SetWSConnectionFactory\", arg0)\n}\n\nfunc (_m *MockClient) Start() error {\n\tret := _m.ctrl.Call(_m, \"Start\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockClientRecorder) Start() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Start\")\n}\n\nfunc (_m *MockClient) StatusMessages() chan *protocol.NotificationMessage {\n\tret := _m.ctrl.Call(_m, \"StatusMessages\")\n\tret0, _ := ret[0].(chan *protocol.NotificationMessage)\n\treturn ret0\n}\n\nfunc (_mr *_MockClientRecorder) StatusMessages() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"StatusMessages\")\n}\n\nfunc (_m *MockClient) Subscribe(_param0 string) error {\n\tret := _m.ctrl.Call(_m, \"Subscribe\", _param0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockClientRecorder) Subscribe(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Subscribe\", arg0)\n}\n\nfunc (_m *MockClient) Unsubscribe(_param0 string) error {\n\tret := _m.ctrl.Call(_m, \"Unsubscribe\", _param0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockClientRecorder) Unsubscribe(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Unsubscribe\", arg0)\n}\n\nfunc (_m *MockClient) WriteRawMessage(_param0 []byte) error {\n\tret := _m.ctrl.Call(_m, \"WriteRawMessage\", _param0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockClientRecorder) WriteRawMessage(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"WriteRawMessage\", arg0)\n}\n"
  },
  {
    "path": "guble-cli/README.md",
    "content": "# The guble command line client\n\nThis is the command line client for the guble messaging server. It is intended\nfor demonstration and debugging use.\n\n[![Build Status](https://api.travis-ci.org/smancke/guble.svg)](https://travis-ci.org/smancke/guble)\n\n\n## Starting the client with docker \nThe guble docker image has the command line client included. You can execute it within a running golang container and\nconnect to the server.\n```\ndocker run -d --name guble smancke/guble\ndocker exec -it guble /go/bin/guble-cli\n```\n\n\n## Building from source\n```\n\tgo get github.com/smancke/guble/guble-cli\n\tbin/guble-cli\n```\n\n## Start options\n```\nusage: guble-cli [--exit] [--verbose] [--url URL] [--user USER] [--log-info] [--log-debug] [COMMANDS [COMMANDS ...]]\n\npositional arguments:\n  commands\n\noptions:\n  --exit, -x              Exit after sending the commands\n  --verbose, -v           Display verbose server communication\n  --url URL               The websocket url to connect (ws://localhost:8080/stream/)\n  --user USER             The user name to connect with (guble-cli)\n  --log-info              Log on INFO level (false)\n  --log-debug             Log on DEBUG level (false)\n```\n\n## Commands in the client\nIn the running client, you can use the commands from the websocket api, e.g:\n```\n?           # prints some usage info\n+ /foo/bar  # subscribe to the topic /foo/bar\n+ /foo 0    # read from message 0 and subscribe to the topic /foo\n+ /foo 0 5  # read messages 0-5 from /foo\n+ /foo -5   # read the last 5 messages and subscribe to the topic /foo\n- /foo      # cancel the subscription for /foo\n\n> /foo         # send a message to /foo\n> /foo/bar 42  # send a message to /foo/bar with publisherid 42\n```\n\n\n\n"
  },
  {
    "path": "guble-cli/main.go",
    "content": "package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"os\"\n\t\"os/signal\"\n\t\"strings\"\n\t\"syscall\"\n\n\tlog \"github.com/Sirupsen/logrus\"\n\t\"github.com/smancke/guble/client\"\n\t\"github.com/smancke/guble/protocol\"\n\t\"gopkg.in/alecthomas/kingpin.v2\"\n)\n\nvar (\n\texit     = kingpin.Flag(\"exit\", \"Exit after sending the commands\").Short('x').Bool()\n\tcommands = kingpin.Arg(\"commands\", \"The commands to send after startup\").Strings()\n\tverbose  = kingpin.Flag(\"verbose\", \"Display verbose server communication\").Short('v').Bool()\n\turl      = kingpin.Flag(\"url\", \"The websocket url to connect to\").Default(\"ws://localhost:8080/stream/\").String()\n\tuser     = kingpin.Flag(\"user\", \"The user name to connect with (guble-cli)\").Short('u').Default(\"guble-cli\").String()\n\tlogLevel = kingpin.Flag(\"log\", \"Log level\").\n\t\t\tShort('l').\n\t\t\tDefault(log.ErrorLevel.String()).\n\t\t\tEnvar(\"GUBLE_LOG\").\n\t\t\tEnum(logLevels()...)\n\n\tlogger = log.WithField(\"app\", \"guble-cli\")\n)\n\nfunc logLevels() (levels []string) {\n\tfor _, level := range log.AllLevels {\n\t\tlevels = append(levels, level.String())\n\t}\n\treturn\n}\n\n// This is a minimal commandline client to connect through a websocket\nfunc main() {\n\tkingpin.Parse()\n\n\t// set log level\n\tlevel, err := log.ParseLevel(*logLevel)\n\tif err != nil {\n\t\tlogger.WithField(\"error\", err).Fatal(\"Invalid log level\")\n\t}\n\tlog.SetLevel(level)\n\n\torigin := \"http://localhost/\"\n\turl := fmt.Sprintf(\"%v/user/%v\", removeTrailingSlash(*url), *user)\n\tclient, err := client.Open(url, origin, 100, true)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tgo writeLoop(client)\n\tgo readLoop(client)\n\n\tfor _, cmd := range *commands {\n\t\tclient.WriteRawMessage([]byte(cmd))\n\t}\n\tif *exit {\n\t\treturn\n\t}\n\twaitForTermination(func() {})\n}\n\nfunc readLoop(client client.Client) {\n\tfor {\n\t\tselect {\n\t\tcase incomingMessage := <-client.Messages():\n\t\t\tif *verbose {\n\t\t\t\tfmt.Println(string(incomingMessage.Bytes()))\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"%v: %v\\n\", incomingMessage.UserID, incomingMessage.BodyAsString())\n\t\t\t}\n\t\tcase e := <-client.Errors():\n\t\t\tfmt.Println(\"ERROR: \" + string(e.Bytes()))\n\t\tcase status := <-client.StatusMessages():\n\t\t\tfmt.Println(string(status.Bytes()))\n\t\t\tfmt.Println()\n\t\t}\n\t}\n}\n\nfunc writeLoop(client client.Client) {\n\tshouldStop := false\n\tfor !shouldStop {\n\t\tfunc() {\n\t\t\tdefer protocol.PanicLogger()\n\t\t\treader := bufio.NewReader(os.Stdin)\n\t\t\ttext, err := reader.ReadString('\\n')\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif strings.TrimSpace(text) == \"\" {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif strings.TrimSpace(text) == \"?\" || strings.TrimSpace(text) == \"help\" {\n\t\t\t\tprintHelp()\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif strings.HasPrefix(text, \">\") {\n\t\t\t\tfmt.Print(\"header: \")\n\t\t\t\theader, err := reader.ReadString('\\n')\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\ttext += header\n\t\t\t\tfmt.Print(\"body: \")\n\t\t\t\tbody, err := reader.ReadString('\\n')\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\ttext += strings.TrimSpace(body)\n\t\t\t}\n\n\t\t\tif *verbose {\n\t\t\t\tlog.Printf(\"Sending: %v\\n\", text)\n\t\t\t}\n\t\t\tif err := client.WriteRawMessage([]byte(text)); err != nil {\n\t\t\t\tshouldStop = true\n\t\t\t\tlogger.WithError(err).Error(\"Error on Writing  message\")\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc waitForTermination(callback func()) {\n\tsigc := make(chan os.Signal)\n\tsignal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM)\n\tlog.Printf(\"%q\", <-sigc)\n\tcallback()\n\tos.Exit(0)\n}\n\nfunc printHelp() {\n\tfmt.Println(`\n## Commands\n?           # print this info\n\n+ /foo/bar  # subscribe to the topic /foo/bar\n+ /foo 0    # read from message 0 and subscribe to the topic /foo\n+ /foo 0 5  # read messages 0-5 from /foo\n+ /foo -5   # read the last 5 messages and subscribe to the topic /foo\n\n- /foo      # cancel the subscription for /foo\n\n> /foo         # send a message to /foo\n> /foo/bar 42  # send a message to /foo/bar with publisherid 42\n`)\n}\n\nfunc removeTrailingSlash(path string) string {\n\tif len(path) > 1 && path[len(path)-1] == '/' {\n\t\treturn path[:len(path)-1]\n\t}\n\treturn path\n}\n"
  },
  {
    "path": "guble-cli/main_test.go",
    "content": "package main\n\nimport (\n\t\"fmt\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"testing\"\n)\n\nfunc Test_PrintHelp(t *testing.T) {\n\texpectedHelpMessage := `\n## Commands\n?           # print this info\n\n+ /foo/bar  # subscribe to the topic /foo/bar\n+ /foo 0    # read from message 0 and subscribe to the topic /foo\n+ /foo 0 5  # read messages 0-5 from /foo\n+ /foo -5   # read the last 5 messages and subscribe to the topic /foo\n\n- /foo      # cancel the subscription for /foo\n\n> /foo         # send a message to /foo\n> /foo/bar 42  # send a message to /foo/bar with publisherid 42\n` + \"\\n\"\n\n\trescueStdout := os.Stdout\n\tr, w, _ := os.Pipe()\n\tos.Stdout = w\n\n\tprintHelp()\n\n\tw.Close()\n\tout, _ := ioutil.ReadAll(r)\n\tos.Stdout = rescueStdout\n\n\tresultMessage := fmt.Sprintf(\"%s\", out)\n\tassert.Equal(t, expectedHelpMessage, resultMessage)\n\n}\n\nfunc Test_removeTrailingSlash(t *testing.T) {\n\tcases := []struct {\n\t\texpected, path string\n\t}{\n\t\t{\"/foo/user/marvin\", \"/foo/user/marvin\"},\n\t\t{\"/foo/user/marvin\", \"/foo/user/marvin/\"},\n\t\t{\"/\", \"/\"},\n\t}\n\n\tfor i, c := range cases {\n\t\tassert.Equal(t, c.expected, removeTrailingSlash(c.path), fmt.Sprintf(\"Failed at  case no=%d\", i))\n\t}\n}\n"
  },
  {
    "path": "logformatter/logstash_formatter.go",
    "content": "package logformatter\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\n\t\"github.com/Sirupsen/logrus\"\n\t\"os\"\n\t\"time\"\n)\n\nconst (\n\tdefaultServiceName     = \"guble\"\n\tdefaultLogType         = \"application\"\n\tdefaultApplicationType = \"service\"\n)\n\n// LogstashFormatter generates json in logstash format.\n// Logstash site: http://logstash.net/\ntype LogstashFormatter struct {\n\n\t//Type of the fields\n\tType string // if not empty use for logstash type field.\n\n\t//Env is the environment on which the application is running\n\tEnv string\n\n\t//ServiceName will be by default guble\n\tServiceName string\n\n\t//ApplicationType will be  by default \"service\". Other values could be \"service\", \"system\", \"appserver\", \"webserver\"\n\tApplicationType string\n\n\t//LogType will be by default  application. Other possible values \"access\", \"error\", \"application\", \"system\"\n\tLogType string\n\n\t//TimestampFormat sets the format used for timestamps.\n\tTimestampFormat string\n}\n\n// Format the logrus entry to a byte slice, or return an error.\nfunc (f *LogstashFormatter) Format(entry *logrus.Entry) ([]byte, error) {\n\tfields := make(logrus.Fields)\n\n\tfor k, v := range entry.Data {\n\t\tswitch v := v.(type) {\n\t\tcase error:\n\t\t\t// Otherwise errors are ignored by `encoding/json`\n\t\t\t// https://github.com/Sirupsen/logrus/issues/137\n\t\t\t// https://github.com/sirupsen/logrus/issues/377\n\t\t\tfields[k] = v.Error()\n\t\tdefault:\n\t\t\tfields[k] = v\n\t\t}\n\t}\n\n\tif f.Env != \"\" {\n\t\tfields[\"environment\"] = f.Env\n\t}\n\n\ttimeStampFormat := f.TimestampFormat\n\tif timeStampFormat == \"\" {\n\t\ttimeStampFormat = time.RFC3339\n\t}\n\n\tfields[\"@timestamp\"] = entry.Time.Format(timeStampFormat)\n\n\tif f.ServiceName != \"\" {\n\t\tfields[\"service\"] = f.ServiceName\n\t} else {\n\t\tfields[\"service\"] = defaultServiceName\n\t}\n\n\tif f.ApplicationType != \"\" {\n\t\tfields[\"application_type\"] = f.ServiceName\n\t} else {\n\t\tfields[\"application_type\"] = defaultApplicationType\n\t}\n\n\tif f.LogType != \"\" {\n\t\tfields[\"log_type\"] = f.LogType\n\t} else {\n\t\tfields[\"log_type\"] = defaultLogType\n\t}\n\n\t// set level field, prefixing fields clashes\n\tif v, ok := entry.Data[\"loglevel\"]; ok {\n\t\tfields[\"fields.loglevel\"] = v\n\t}\n\tfields[\"loglevel\"] = entry.Level.String()\n\n\t//set host field, prefixing fields clashes\n\tif v, ok := entry.Data[\"host\"]; ok {\n\t\tfields[\"fields.host\"] = v\n\t}\n\tif hostname, err := os.Hostname(); err == nil {\n\t\tfields[\"host\"] = hostname\n\t}\n\n\t// set type field\n\tif f.Type != \"\" {\n\t\tif v, ok := entry.Data[\"type\"]; ok {\n\t\t\tfields[\"fields.type\"] = v\n\t\t}\n\t\tfields[\"type\"] = f.Type\n\t}\n\n\t// set message field, prefixing fields clashes\n\tif v, ok := entry.Data[\"msg\"]; ok {\n\t\tfields[\"fields.msg\"] = v\n\t}\n\tfields[\"msg\"] = entry.Message\n\n\tserialized, err := json.Marshal(fields)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to marshal fields to JSON, %v\", err)\n\t}\n\treturn append(serialized, '\\n'), nil\n}\n"
  },
  {
    "path": "logformatter/logstash_formatter_test.go",
    "content": "package logformatter\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"testing\"\n\n\t\"github.com/Sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestLogstashFormatter_Format(t *testing.T) {\n\ta := assert.New(t)\n\n\tlf := LogstashFormatter{Type: \"abc\", ServiceName: \"guble\", Env: \"prod\"}\n\n\tfields := logrus.Fields{\n\t\t\"msg\":   \"def\",\n\t\t\"level\": \"ijk\",\n\t\t\"type\":  \"lmn\",\n\t\t\"one\":   1,\n\t\t\"pi\":    3.14,\n\t\t\"bool\":  true,\n\t}\n\n\tentry := logrus.WithFields(fields)\n\tentry.Message = \"msg\"\n\tentry.Level = logrus.InfoLevel\n\n\tb, _ := lf.Format(entry)\n\n\tvar data map[string]interface{}\n\tdec := json.NewDecoder(bytes.NewReader(b))\n\tdec.UseNumber()\n\tdec.Decode(&data)\n\n\t// base fields\n\ta.Equal(\"application\", data[\"log_type\"])\n\ta.Equal(\"service\", data[\"application_type\"])\n\ta.Equal(\"guble\", data[\"service\"])\n\ta.Equal(\"prod\", data[\"environment\"])\n\n\ta.NotEmpty(data[\"@timestamp\"])\n\ta.NotEmpty(data[\"host\"])\n\ta.Equal(\"abc\", data[\"type\"])\n\ta.Equal(\"msg\", data[\"msg\"])\n\ta.Equal(\"info\", data[\"loglevel\"])\n\n\t// substituted fields\n\ta.Equal(\"def\", data[\"fields.msg\"])\n\ta.Equal(\"lmn\", data[\"fields.type\"])\n\n\t// formats\n\ta.Equal(json.Number(\"1\"), data[\"one\"])\n\ta.Equal(json.Number(\"3.14\"), data[\"pi\"])\n\ta.Equal(true, data[\"bool\"])\n}\n"
  },
  {
    "path": "main.go",
    "content": "package main\n\nimport (\n\t\"github.com/smancke/guble/server\"\n)\n\nfunc main() {\n\tserver.Main()\n}\n"
  },
  {
    "path": "protocol/cmd.go",
    "content": "package protocol\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"strings\"\n)\n\n// Valid command names\nconst (\n\tCmdSend    = \">\"\n\tCmdReceive = \"+\"\n\tCmdCancel  = \"-\"\n)\n\n// Cmd is a representation of a command, which the client sends to the server\ntype Cmd struct {\n\n\t// The name of the command\n\tName string\n\n\t// The argument line, following the commandName\n\tArg string\n\n\t// The header line, if the command has one\n\tHeaderJSON string\n\n\t// The command payload, if the command has such\n\tBody []byte\n}\n\n// ParseCmd parses a slice of bytes and return a *Cmd\nfunc ParseCmd(message []byte) (*Cmd, error) {\n\tmsg := &Cmd{}\n\n\tif len(message) == 0 {\n\t\treturn nil, fmt.Errorf(\"empty command\")\n\t}\n\n\tparts := strings.SplitN(string(message), \"\\n\", 3)\n\tfirstLine := strings.SplitN(parts[0], \" \", 2)\n\n\tmsg.Name = firstLine[0]\n\n\tif len(firstLine) > 1 {\n\t\tmsg.Arg = firstLine[1]\n\t}\n\n\tif len(parts) > 1 {\n\t\tmsg.HeaderJSON = parts[1]\n\t}\n\n\tif len(parts) > 2 {\n\t\tmsg.Body = []byte(parts[2])\n\t}\n\n\treturn msg, nil\n}\n\n// Bytes serializes the the command into a byte slice\nfunc (cmd *Cmd) Bytes() []byte {\n\tbuff := &bytes.Buffer{}\n\tbuff.WriteString(cmd.Name)\n\tbuff.WriteString(\" \")\n\tbuff.WriteString(cmd.Arg)\n\n\tif len(cmd.HeaderJSON) > 0 || len(cmd.Body) > 0 {\n\t\tbuff.WriteString(\"\\n\")\n\t}\n\n\tif len(cmd.HeaderJSON) > 0 {\n\t\tbuff.WriteString(cmd.HeaderJSON)\n\t}\n\n\tif len(cmd.Body) > 0 {\n\t\tbuff.WriteString(\"\\n\")\n\t\tbuff.Write(cmd.Body)\n\t}\n\n\treturn buff.Bytes()\n}\n"
  },
  {
    "path": "protocol/cmd_test.go",
    "content": "package protocol\n\nimport (\n\tassert \"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nvar aSendCommand = `> /foo\n{\"meta\": \"data\"}\nHello World`\n\nvar aSubscribeCommand = \"+ /foo/bar\"\n\nfunc TestParsingASendCommand(t *testing.T) {\n\tassert := assert.New(t)\n\n\tcmd, err := ParseCmd([]byte(aSendCommand))\n\tassert.NoError(err)\n\n\tassert.Equal(CmdSend, cmd.Name)\n\tassert.Equal(\"/foo\", cmd.Arg)\n\tassert.Equal(`{\"meta\": \"data\"}`, cmd.HeaderJSON)\n\tassert.Equal(\"Hello World\", string(cmd.Body))\n}\n\nfunc TestSerializeASendCommand(t *testing.T) {\n\tcmd := &Cmd{\n\t\tName:       CmdSend,\n\t\tArg:        \"/foo\",\n\t\tHeaderJSON: `{\"meta\": \"data\"}`,\n\t\tBody:       []byte(\"Hello World\"),\n\t}\n\n\tassert.Equal(t, aSendCommand, string(cmd.Bytes()))\n}\n\nfunc Test_Cmd_EmptyCommand_Error(t *testing.T) {\n\tassert := assert.New(t)\n\t_, err := ParseCmd([]byte{})\n\tassert.Error(err)\n}\n\nfunc TestParsingASubscribeCommand(t *testing.T) {\n\tassert := assert.New(t)\n\n\tcmd, err := ParseCmd([]byte(aSubscribeCommand))\n\tassert.NoError(err)\n\n\tassert.Equal(CmdReceive, cmd.Name)\n\tassert.Equal(\"/foo/bar\", cmd.Arg)\n\tassert.Equal(\"\", cmd.HeaderJSON)\n\tassert.Nil(cmd.Body)\n}\n\nfunc TestSerializeASubscribeCommand(t *testing.T) {\n\tcmd := &Cmd{\n\t\tName: CmdReceive,\n\t\tArg:  \"/foo/bar\",\n\t}\n\n\tassert.Equal(t, aSubscribeCommand, string(cmd.Bytes()))\n}\n"
  },
  {
    "path": "protocol/log.go",
    "content": "package protocol\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\tlog \"github.com/Sirupsen/logrus\"\n\t\"runtime\"\n\t\"strings\"\n)\n\nfunc PanicLogger() {\n\tif r := recover(); r != nil {\n\t\tlog.Printf(\"PANIC (%v): %v\", identifyLogOrigin(), r)\n\t\tlog.Printf(getStackTraceMessage(fmt.Sprintf(\"%v\", r)))\n\t}\n}\n\nfunc identifyLogOrigin() string {\n\tvar name, file string\n\tvar line int\n\tvar pc [16]uintptr\n\n\tn := runtime.Callers(3, pc[:])\n\tfor _, pc := range pc[:n] {\n\t\tfn := runtime.FuncForPC(pc)\n\t\tif fn == nil {\n\t\t\tcontinue\n\t\t}\n\t\tfile, line = fn.FileLine(pc)\n\t\tname = fn.Name()\n\t\tif !strings.HasPrefix(name, \"runtime.\") {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tswitch {\n\tcase name != \"\":\n\t\treturn fmt.Sprintf(\"%v:%v\", name, line)\n\tcase file != \"\":\n\t\treturn fmt.Sprintf(\"%v:%v\", file, line)\n\t}\n\n\treturn fmt.Sprintf(\"pc:%x\", pc)\n}\n\nfunc getStackTraceMessage(msg string) string {\n\tvar name, file string\n\tvar line int\n\tvar pc [16]uintptr\n\n\tn := runtime.Callers(3, pc[:])\n\tbuff := &bytes.Buffer{}\n\tbuff.WriteString(msg)\n\tbuff.WriteString(\"\\n\")\n\n\tfor _, pc := range pc[:n] {\n\t\tfn := runtime.FuncForPC(pc)\n\t\tif fn == nil {\n\t\t\tcontinue\n\t\t}\n\t\tfile, line = fn.FileLine(pc)\n\t\tname = fn.Name()\n\t\tswitch {\n\t\tcase name != \"\":\n\t\t\tbuff.WriteString(fmt.Sprintf(\"! %v:%v\\n\", name, line))\n\t\tcase file != \"\":\n\t\t\tbuff.WriteString(fmt.Sprintf(\"! %v:%v\\n\", file, line))\n\t\t}\n\t}\n\treturn string(buff.Bytes())\n}\n"
  },
  {
    "path": "protocol/log_test.go",
    "content": "package protocol\n\nimport (\n\t\"bytes\"\n\tlog \"github.com/Sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"os\"\n\t\"testing\"\n)\n\nfunc Test_log_functions_panic_logger(t *testing.T) {\n\ta := assert.New(t)\n\n\tw := bytes.NewBuffer([]byte{})\n\tlog.SetOutput(w)\n\tdefer log.SetOutput(os.Stderr)\n\n\traisePanic()\n\n\ta.Contains(w.String(), \"PANIC\")\n\ta.Contains(w.String(), \"raisePanic\")\n\ta.Contains(w.String(), \"Don't panic!\")\n}\n\nfunc raisePanic() {\n\tdefer PanicLogger()\n\tpanic(\"Don't panic!\")\n}\n"
  },
  {
    "path": "protocol/message.go",
    "content": "package protocol\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\n\tlog \"github.com/Sirupsen/logrus\"\n)\n\n// Message is a struct that represents a message in the guble protocol, as the server sends it to the client.\ntype Message struct {\n\n\t// The sequenceId of the message, which is given by the\n\t// server an is strictly monotonically increasing at least within a root topic.\n\tID uint64\n\n\t// The topic path\n\tPath Path\n\n\t// The user id of the message sender\n\tUserID string\n\n\t// The id of the sending application\n\tApplicationID string\n\n\t// Filters applied to this message. The message will be sent only to the\n\t// routes that match the filters\n\tFilters map[string]string\n\n\t// The time of publishing, as Unix Timestamp date\n\tTime int64\n\n\t// The header line of the message (optional). If set, then it has to be a valid JSON object structure.\n\tHeaderJSON string\n\n\t// The message payload\n\tBody []byte\n\n\t// Used in cluster mode to identify a guble node\n\tNodeID uint8\n}\n\ntype MessageDeliveryCallback func(*Message)\n\n// Metadata returns the first line of a serialized message, without the newline\nfunc (msg *Message) Metadata() string {\n\tbuff := &bytes.Buffer{}\n\tmsg.writeMetadata(buff)\n\treturn string(buff.Bytes())\n}\n\nfunc (msg *Message) String() string {\n\treturn fmt.Sprintf(\"%d\", msg.ID)\n}\n\nfunc (msg *Message) BodyAsString() string {\n\treturn string(msg.Body)\n}\n\n// Bytes serializes the message into a byte slice\nfunc (msg *Message) Bytes() []byte {\n\tbuff := &bytes.Buffer{}\n\n\tmsg.writeMetadata(buff)\n\n\tif len(msg.HeaderJSON) > 0 || len(msg.Body) > 0 {\n\t\tbuff.WriteString(\"\\n\")\n\t}\n\n\tif len(msg.HeaderJSON) > 0 {\n\t\tbuff.WriteString(msg.HeaderJSON)\n\t}\n\n\tif len(msg.Body) > 0 {\n\t\tbuff.WriteString(\"\\n\")\n\t\tbuff.Write(msg.Body)\n\t}\n\n\treturn buff.Bytes()\n}\n\nfunc (msg *Message) writeMetadata(buff *bytes.Buffer) {\n\tbuff.WriteString(string(msg.Path))\n\tbuff.WriteString(\",\")\n\tbuff.WriteString(strconv.FormatUint(msg.ID, 10))\n\tbuff.WriteString(\",\")\n\tbuff.WriteString(msg.UserID)\n\tbuff.WriteString(\",\")\n\tbuff.WriteString(msg.ApplicationID)\n\tbuff.WriteString(\",\")\n\tbuff.Write(msg.encodeFilters())\n\tbuff.WriteString(\",\")\n\tbuff.WriteString(strconv.FormatInt(msg.Time, 10))\n\tbuff.WriteString(\",\")\n\tbuff.WriteString(strconv.FormatUint(uint64(msg.NodeID), 10))\n}\n\nfunc (msg *Message) encodeFilters() []byte {\n\tif msg.Filters == nil {\n\t\treturn []byte{}\n\t}\n\tdata, err := json.Marshal(msg.Filters)\n\tif err != nil {\n\t\tlog.WithError(err).WithField(\"filters\", msg.Filters).Error(\"Error encoding filters\")\n\t\treturn []byte{}\n\t}\n\treturn data\n}\n\nfunc (msg *Message) decodeFilters(data []byte) {\n\tif len(data) == 0 {\n\t\treturn\n\t}\n\tmsg.Filters = make(map[string]string)\n\terr := json.Unmarshal(data, &msg.Filters)\n\tif err != nil {\n\t\tlog.WithError(err).WithField(\"data\", string(data)).Error(\"Error decoding filters\")\n\t}\n}\n\nfunc (msg *Message) SetFilter(key, value string) {\n\tif msg.Filters == nil {\n\t\tmsg.Filters = make(map[string]string, 1)\n\t}\n\tmsg.Filters[key] = value\n}\n\n// Valid constants for the NotificationMessage.Name\nconst (\n\tSUCCESS_CONNECTED     = \"connected\"\n\tSUCCESS_SEND          = \"send\"\n\tSUCCESS_FETCH_START   = \"fetch-start\"\n\tSUCCESS_FETCH_END     = \"fetch-end\"\n\tSUCCESS_SUBSCRIBED_TO = \"subscribed-to\"\n\tSUCCESS_CANCELED      = \"canceled\"\n\tERROR_SUBSCRIBED_TO   = \"error-subscribed-to\"\n\tERROR_BAD_REQUEST     = \"error-bad-request\"\n\tERROR_INTERNAL_SERVER = \"error-server-internal\"\n)\n\n// NotificationMessage is a representation of a status messages or error message, sent from the server\ntype NotificationMessage struct {\n\n\t// The name of the message\n\tName string\n\n\t// The argument line, following the messageName\n\tArg string\n\n\t// The optional json data supplied with the message\n\tJson string\n\n\t// Flag which indicates, if the notification is an error\n\tIsError bool\n}\n\n// Bytes serializes the notification message into a byte slice\nfunc (msg *NotificationMessage) Bytes() []byte {\n\tbuff := &bytes.Buffer{}\n\n\tif msg.IsError {\n\t\tbuff.WriteString(\"!\")\n\t} else {\n\t\tbuff.WriteString(\"#\")\n\t}\n\tbuff.WriteString(msg.Name)\n\tif len(msg.Arg) > 0 {\n\t\tbuff.WriteString(\" \")\n\t\tbuff.WriteString(msg.Arg)\n\t}\n\n\tif len(msg.Json) > 0 {\n\t\tbuff.WriteString(\"\\n\")\n\t\tbuff.WriteString(msg.Json)\n\t}\n\n\treturn buff.Bytes()\n}\n\n// Decode decodes a message, sent from the server to the client.\n// The decoded messages can have one of the types: *Message or *NotificationMessage\nfunc Decode(message []byte) (interface{}, error) {\n\tif len(message) >= 1 && (message[0] == '#' || message[0] == '!') {\n\t\treturn parseNotificationMessage(message)\n\t}\n\treturn ParseMessage(message)\n}\n\nfunc ParseMessage(message []byte) (*Message, error) {\n\tparts := strings.SplitN(string(message), \"\\n\", 3)\n\tif len(message) == 0 {\n\t\treturn nil, fmt.Errorf(\"empty message\")\n\t}\n\n\tmeta := strings.Split(parts[0], \",\")\n\n\tif len(meta) != 7 {\n\t\treturn nil, fmt.Errorf(\"message metadata has to have 7 fields, but was %v\", parts[0])\n\t}\n\n\tif len(meta[0]) == 0 || meta[0][0] != '/' {\n\t\treturn nil, fmt.Errorf(\"message has invalid topic, got %v\", meta[0])\n\t}\n\n\tid, err := strconv.ParseUint(meta[1], 10, 0)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"message metadata to have an integer (message-id) as second field, but was %v\", meta[1])\n\t}\n\n\tpublishingTime, err := strconv.ParseInt(meta[5], 10, 64)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"message metadata to have an integer (publishing time) as sixth field, but was %v\", meta[5])\n\t}\n\n\tnodeID, err := strconv.ParseUint(meta[6], 10, 8)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"message metadata to have an integer (nodeID) as seventh field, but was %v\", meta[6])\n\t}\n\n\tmsg := &Message{\n\t\tID:            id,\n\t\tPath:          Path(meta[0]),\n\t\tUserID:        meta[2],\n\t\tApplicationID: meta[3],\n\t\tTime:          publishingTime,\n\t\tNodeID:        uint8(nodeID),\n\t}\n\tmsg.decodeFilters([]byte(meta[4]))\n\n\tif len(parts) >= 2 {\n\t\tmsg.HeaderJSON = parts[1]\n\t}\n\n\tif len(parts) == 3 {\n\t\tmsg.Body = []byte(parts[2])\n\t}\n\n\treturn msg, nil\n}\n\nfunc parseNotificationMessage(message []byte) (*NotificationMessage, error) {\n\tmsg := &NotificationMessage{}\n\n\tif len(message) < 2 || (message[0] != '#' && message[0] != '!') {\n\t\treturn nil, fmt.Errorf(\"message has to start with '#' or '!' and a name, but got '%v'\", message)\n\t}\n\tmsg.IsError = message[0] == '!'\n\n\tparts := strings.SplitN(string(message)[1:], \"\\n\", 2)\n\tfirstLine := strings.SplitN(parts[0], \" \", 2)\n\n\tmsg.Name = firstLine[0]\n\n\tif len(firstLine) > 1 {\n\t\tmsg.Arg = firstLine[1]\n\t}\n\n\tif len(parts) > 1 {\n\t\tmsg.Json = parts[1]\n\t}\n\n\treturn msg, nil\n}\n"
  },
  {
    "path": "protocol/message_test.go",
    "content": "package protocol\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nvar aNormalMessage = `/foo/bar,42,user01,phone01,{\"user\":\"user01\"},1420110000,1\n{\"Content-Type\": \"text/plain\", \"Correlation-Id\": \"7sdks723ksgqn\"}\nHello World`\n\nvar aMinimalMessage = \"/,42,,,,1420110000,0\"\n\nvar aConnectedNotification = `#connected You are connected to the server.\n{\"ApplicationId\": \"phone1\", \"UserId\": \"user01\", \"Time\": \"1420110000\"}`\n\n// 2015-01-01T12:00:00+01:00 is equal to  1420110000\nvar unixTime, _ = time.Parse(time.RFC3339, \"2015-01-01T12:00:00+01:00\")\n\nfunc TestParsingANormalMessage(t *testing.T) {\n\tassert := assert.New(t)\n\n\tmsgI, err := Decode([]byte(aNormalMessage))\n\tassert.NoError(err)\n\tassert.IsType(&Message{}, msgI)\n\tmsg := msgI.(*Message)\n\n\tassert.Equal(uint64(42), msg.ID)\n\tassert.Equal(Path(\"/foo/bar\"), msg.Path)\n\tassert.Equal(\"user01\", msg.UserID)\n\tassert.Equal(\"phone01\", msg.ApplicationID)\n\tassert.Equal(map[string]string{\"user\": \"user01\"}, msg.Filters)\n\tassert.Equal(unixTime.Unix(), msg.Time)\n\tassert.Equal(uint8(1), msg.NodeID)\n\tassert.Equal(`{\"Content-Type\": \"text/plain\", \"Correlation-Id\": \"7sdks723ksgqn\"}`, msg.HeaderJSON)\n\tassert.Equal(\"Hello World\", string(msg.Body))\n}\n\nfunc TestSerializeANormalMessage(t *testing.T) {\n\t// given: a message\n\tmsg := &Message{\n\t\tID:            uint64(42),\n\t\tPath:          Path(\"/foo/bar\"),\n\t\tUserID:        \"user01\",\n\t\tApplicationID: \"phone01\",\n\t\tFilters:       map[string]string{\"user\": \"user01\"},\n\t\tTime:          unixTime.Unix(),\n\t\tNodeID:        1,\n\t\tHeaderJSON:    `{\"Content-Type\": \"text/plain\", \"Correlation-Id\": \"7sdks723ksgqn\"}`,\n\t\tBody:          []byte(\"Hello World\"),\n\t}\n\n\t// then: the serialisation is as expected\n\tassert.Equal(t, aNormalMessage, string(msg.Bytes()))\n\tassert.Equal(t, \"Hello World\", msg.BodyAsString())\n\n\t// and: the first line is as expected\n\tassert.Equal(t, strings.SplitN(aNormalMessage, \"\\n\", 2)[0], msg.Metadata())\n}\n\nfunc TestSerializeAMinimalMessage(t *testing.T) {\n\tmsg := &Message{\n\t\tID:   uint64(42),\n\t\tPath: Path(\"/\"),\n\t\tTime: unixTime.Unix(),\n\t}\n\n\tassert.Equal(t, aMinimalMessage, string(msg.Bytes()))\n}\n\nfunc TestSerializeAMinimalMessageWithBody(t *testing.T) {\n\tmsg := &Message{\n\t\tID:   uint64(42),\n\t\tPath: Path(\"/\"),\n\t\tTime: unixTime.Unix(),\n\t\tBody: []byte(\"Hello World\"),\n\t}\n\n\tassert.Equal(t, aMinimalMessage+\"\\n\\nHello World\", string(msg.Bytes()))\n}\n\nfunc TestParsingAMinimalMessage(t *testing.T) {\n\tassert := assert.New(t)\n\n\tmsgI, err := Decode([]byte(aMinimalMessage))\n\tassert.NoError(err)\n\tassert.IsType(&Message{}, msgI)\n\tmsg := msgI.(*Message)\n\n\tassert.Equal(uint64(42), msg.ID)\n\tassert.Equal(Path(\"/\"), msg.Path)\n\tassert.Equal(\"\", msg.UserID)\n\tassert.Equal(\"\", msg.ApplicationID)\n\tassert.Nil(msg.Filters)\n\tassert.Equal(unixTime.Unix(), msg.Time)\n\tassert.Equal(\"\", msg.HeaderJSON)\n\n\tassert.Equal(\"\", string(msg.Body))\n}\n\nfunc TestErrorsOnParsingMessages(t *testing.T) {\n\tassert := assert.New(t)\n\n\tvar err error\n\t_, err = Decode([]byte(\"\"))\n\tassert.Error(err)\n\n\t// missing meta field\n\t_, err = Decode([]byte(\"42,/foo/bar,user01,phone1,id123\\n{}\\nBla\"))\n\tassert.Error(err)\n\n\t// id not an integer\n\t_, err = Decode([]byte(\"xy42,/foo/bar,user01,phone1,id123,1420110000\\n\"))\n\tassert.Error(err)\n\n\t// path is empty\n\t_, err = Decode([]byte(\"42,,user01,phone1,id123,1420110000\\n\"))\n\tassert.Error(err)\n\n\t// Error Message without Name\n\t_, err = Decode([]byte(\"!\"))\n\tassert.Error(err)\n}\n\nfunc TestParsingNotificationMessage(t *testing.T) {\n\tassert := assert.New(t)\n\n\tmsgI, err := Decode([]byte(aConnectedNotification))\n\tassert.NoError(err)\n\tassert.IsType(&NotificationMessage{}, msgI)\n\tmsg := msgI.(*NotificationMessage)\n\n\tassert.Equal(SUCCESS_CONNECTED, msg.Name)\n\tassert.Equal(\"You are connected to the server.\", msg.Arg)\n\tassert.Equal(`{\"ApplicationId\": \"phone1\", \"UserId\": \"user01\", \"Time\": \"1420110000\"}`, msg.Json)\n\tassert.Equal(false, msg.IsError)\n}\n\nfunc TestSerializeANotificationMessage(t *testing.T) {\n\tmsg := &NotificationMessage{\n\t\tName:    SUCCESS_CONNECTED,\n\t\tArg:     \"You are connected to the server.\",\n\t\tJson:    `{\"ApplicationId\": \"phone1\", \"UserId\": \"user01\", \"Time\": \"1420110000\"}`,\n\t\tIsError: false,\n\t}\n\n\tassert.Equal(t, aConnectedNotification, string(msg.Bytes()))\n}\n\nfunc TestSerializeAnErrorMessage(t *testing.T) {\n\tmsg := &NotificationMessage{\n\t\tName:    ERROR_BAD_REQUEST,\n\t\tArg:     \"you are so bad.\",\n\t\tIsError: true,\n\t}\n\n\tassert.Equal(t, \"!\"+ERROR_BAD_REQUEST+\" \"+\"you are so bad.\", string(msg.Bytes()))\n}\n\nfunc TestSerializeANotificationMessageWithEmptyArg(t *testing.T) {\n\tmsg := &NotificationMessage{\n\t\tName:    SUCCESS_SEND,\n\t\tArg:     \"\",\n\t\tIsError: false,\n\t}\n\n\tassert.Equal(t, \"#\"+SUCCESS_SEND, string(msg.Bytes()))\n}\n\nfunc TestParsingErrorNotificationMessage(t *testing.T) {\n\tassert := assert.New(t)\n\n\traw := \"!bad-request unknown command 'sdcsd'\"\n\n\tmsgI, err := Decode([]byte(raw))\n\tassert.NoError(err)\n\tassert.IsType(&NotificationMessage{}, msgI)\n\tmsg := msgI.(*NotificationMessage)\n\n\tassert.Equal(\"bad-request\", msg.Name)\n\tassert.Equal(\"unknown command 'sdcsd'\", msg.Arg)\n\tassert.Equal(\"\", msg.Json)\n\tassert.Equal(true, msg.IsError)\n}\n\nfunc Test_Message_getPartitionFromTopic(t *testing.T) {\n\ta := assert.New(t)\n\ta.Equal(\"foo\", Path(\"/foo/bar/bazz\").Partition())\n\ta.Equal(\"foo\", Path(\"/foo\").Partition())\n\ta.Equal(\"\", Path(\"/\").Partition())\n\ta.Equal(\"\", Path(\"\").Partition())\n}\n\nfunc TestMessage_Filters(t *testing.T) {\n\ta := assert.New(t)\n\n\tmsg := &Message{}\n\tmsg.SetFilter(\"user\", \"user01\")\n\tmsg.SetFilter(\"device_id\", \"ID_DEVICE\")\n\n\ta.NotNil(msg.Filters)\n\ta.Equal(msg.Filters[\"user\"], \"user01\")\n\ta.Equal(msg.Filters[\"device_id\"], \"ID_DEVICE\")\n\n\ta.JSONEq(`{\"user\": \"user01\",\"device_id\":\"ID_DEVICE\"}`, string(msg.encodeFilters()))\n}\n\nfunc TestMessage_decodeFilters(t *testing.T) {\n\ta := assert.New(t)\n\n\tmsg := &Message{}\n\n\tfilters := []byte(`{\"user\": \"user01\",\"device_id\":\"ID_DEVICE\"}`)\n\tmsg.decodeFilters(filters)\n\n\ta.NotNil(msg.Filters)\n\ta.Contains(msg.Filters, \"user\")\n\ta.Contains(msg.Filters, \"device_id\")\n\n\ta.Equal(msg.Filters[\"user\"], \"user01\")\n\ta.Equal(msg.Filters[\"device_id\"], \"ID_DEVICE\")\n}\n"
  },
  {
    "path": "protocol/path.go",
    "content": "package protocol\n\nimport \"strings\"\n\n// Path is the path of a topic\ntype Path string\n\n// Partition returns the parsed partition from the path.\nfunc (path Path) Partition() string {\n\tif len(path) > 0 && path[0] == '/' {\n\t\tpath = path[1:]\n\t}\n\treturn strings.SplitN(string(path), \"/\", 2)[0]\n}\n\nfunc (path Path) RemovePrefixSlash() string {\n\treturn strings.TrimPrefix(string(path), \"/\")\n}\n"
  },
  {
    "path": "restclient/guble_sender.go",
    "content": "package restclient\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"strings\"\n\n\t\"io/ioutil\"\n\t\"net/url\"\n\n\tlog \"github.com/Sirupsen/logrus\"\n)\n\ntype gubleSender struct {\n\tEndpoint   string\n\thttpClient *http.Client\n}\n\n// New returns a new Sender.\nfunc New(endpoint string) Sender {\n\treturn &gubleSender{\n\t\tEndpoint:   endpoint,\n\t\thttpClient: &http.Client{},\n\t}\n}\n\nfunc (gs gubleSender) GetSubscribers(topic string) ([]byte, error) {\n\tlogger.WithField(\"topic\", topic).Info(\"GetSubscribers called\")\n\tbody := make([]byte, 0)\n\trequest, err := http.NewRequest(\n\t\thttp.MethodGet,\n\t\tfmt.Sprintf(\"%s/subscribers/%s\", gs.Endpoint, trimPrefixSlash(topic)),\n\t\tbytes.NewReader(body),\n\t)\n\tlogger.WithField(\"url\", fmt.Sprintf(\"%s/subscribers/%s\", gs.Endpoint, topic))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse, err := gs.httpClient.Do(request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer response.Body.Close()\n\n\tif response.StatusCode != http.StatusOK {\n\t\tlogger.WithFields(log.Fields{\n\t\t\t\"header\": response.Header,\n\t\t\t\"code\":   response.StatusCode,\n\t\t\t\"status\": response.Status,\n\t\t}).Error(\"Guble response error\")\n\t\treturn nil, fmt.Errorf(\"Error code returned from guble: %d\", response.StatusCode)\n\t}\n\n\tcontent, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlogger.WithFields(log.Fields{\n\t\t\"header\": response.Header,\n\t\t\"code\":   response.StatusCode,\n\t\t\"body\":   string(content),\n\t}).Debug(\"Guble response\")\n\treturn content, nil\n}\n\nfunc (gs gubleSender) Check() bool {\n\trequest, err := http.NewRequest(http.MethodHead, gs.Endpoint, nil)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"error creating request url\")\n\t\treturn false\n\t}\n\tresponse, err := gs.httpClient.Do(request)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"error reaching guble server endpoint\")\n\t\treturn false\n\t}\n\tdefer response.Body.Close()\n\treturn response.StatusCode == http.StatusOK\n}\n\nfunc (gs gubleSender) Send(topic string, body []byte, userID string, params map[string]string) error {\n\tlogger.WithFields(log.Fields{\n\t\t\"topic\":  topic,\n\t\t\"body\":   body,\n\t\t\"userID\": userID,\n\t\t\"params\": params,\n\t}).Debug(\"Sending guble message\")\n\trequest, err := http.NewRequest(http.MethodPost, getURL(gs.Endpoint, topic, userID, params), bytes.NewReader(body))\n\tif err != nil {\n\t\treturn err\n\t}\n\tresponse, err := gs.httpClient.Do(request)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer response.Body.Close()\n\n\tif response.StatusCode != http.StatusOK {\n\t\tlogger.WithFields(log.Fields{\n\t\t\t\"header\": response.Header,\n\t\t\t\"code\":   response.StatusCode,\n\t\t\t\"status\": response.Status,\n\t\t}).Error(\"Guble response error\")\n\t\treturn fmt.Errorf(\"Error code returned from guble: %d\", response.StatusCode)\n\t}\n\treturn nil\n}\n\nfunc getURL(endpoint, topic, userID string, params map[string]string) string {\n\tuv := url.Values{}\n\tuv.Add(\"userId\", userID)\n\tif params != nil {\n\t\tfor k, v := range params {\n\t\t\tif k != \"\" {\n\t\t\t\tuv.Add(k, v)\n\t\t\t}\n\t\t}\n\t}\n\treturn fmt.Sprintf(\"%s/%s?%s\", endpoint, topic, uv.Encode())\n}\n\nfunc trimPrefixSlash(topic string) string {\n\tif strings.HasPrefix(topic, \"/\") {\n\t\treturn strings.TrimPrefix(topic, \"/\")\n\t}\n\treturn topic\n}\n"
  },
  {
    "path": "restclient/guble_sender_test.go",
    "content": "package restclient\n\nimport (\n\t\"github.com/stretchr/testify/assert\"\n\t\"net/url\"\n\t\"testing\"\n)\n\nfunc TestGetURL(t *testing.T) {\n\ta := assert.New(t)\n\n\ttestcases := map[string]struct {\n\t\tendpoint string\n\t\ttopic    string\n\t\tuserID   string\n\t\tparams   map[string]string\n\n\t\t// expected result\n\t\texpected string\n\t}{\n\t\t\"endpoint only, no topic, no user, no params\": {\n\t\t\tendpoint: \"http://localhost:8080/api\",\n\t\t\texpected: \"http://localhost:8080/api/?userId=\",\n\t\t},\n\t\t\"endpoint, valid topic, no user, no params\": {\n\t\t\tendpoint: \"http://localhost:8080/api\",\n\t\t\ttopic:    \"topic\",\n\t\t\texpected: \"http://localhost:8080/api/topic?userId=\",\n\t\t},\n\t\t\"endpoint, valid topic, valid user, no params\": {\n\t\t\tendpoint: \"http://localhost:8080/api\",\n\t\t\ttopic:    \"topic\",\n\t\t\tuserID:   \"user\",\n\t\t\texpected: \"http://localhost:8080/api/topic?userId=user\",\n\t\t},\n\t\t\"endpoint, valid topic, valid user, empty params\": {\n\t\t\tendpoint: \"http://localhost:8080/api\",\n\t\t\ttopic:    \"topic\",\n\t\t\tuserID:   \"user\",\n\t\t\tparams:   map[string]string{},\n\t\t\texpected: \"http://localhost:8080/api/topic?userId=user\",\n\t\t},\n\t\t\"endpoint, valid topic, valid user, one valid param\": {\n\t\t\tendpoint: \"http://localhost:8080/api\",\n\t\t\ttopic:    \"topic\",\n\t\t\tuserID:   \"user\",\n\t\t\tparams:   map[string]string{\"filterCriteria1\": \"value1\"},\n\t\t\texpected: \"http://localhost:8080/api/topic?filterCriteria1=value1&userId=user\",\n\t\t},\n\t\t\"endpoint, valid topic, valid user, more valid params\": {\n\t\t\tendpoint: \"http://localhost:8080/api\",\n\t\t\ttopic:    \"topic\",\n\t\t\tuserID:   \"user\",\n\t\t\tparams: map[string]string{\n\t\t\t\t\"filterCriteria1\": \"value1\",\n\t\t\t\t\"filterCriteria2\": \"value2\",\n\t\t\t},\n\t\t\texpected: \"http://localhost:8080/api/topic?filterCriteria1=value1&filterCriteria2=value2&userId=user\",\n\t\t},\n\t\t\"endpoint, valid topic, valid user, one param value invalid inside URL\": {\n\t\t\tendpoint: \"http://localhost:8080/api\",\n\t\t\ttopic:    \"topic\",\n\t\t\tuserID:   \"user\",\n\t\t\tparams:   map[string]string{\"filterCriteria1\": \"?\"},\n\t\t\texpected: \"http://localhost:8080/api/topic?filterCriteria1=%3F&userId=user\",\n\t\t},\n\t\t\"endpoint, valid topic, valid user, one param key empty\": {\n\t\t\tendpoint: \"http://localhost:8080/api\",\n\t\t\ttopic:    \"topic\",\n\t\t\tuserID:   \"user\",\n\t\t\tparams:   map[string]string{\"\": \"value\"},\n\t\t\texpected: \"http://localhost:8080/api/topic?userId=user\",\n\t\t},\n\t}\n\n\tvar err error\n\tfor name, c := range testcases {\n\t\t_, err = url.Parse(c.expected)\n\t\ta.NoError(err)\n\t\ta.Equal(c.expected,\n\t\t\tgetURL(c.endpoint, c.topic, c.userID, c.params),\n\t\t\t\"Failed check for case: \"+name)\n\t}\n\n}\n"
  },
  {
    "path": "restclient/logger.go",
    "content": "package restclient\n\nimport (\n\tlog \"github.com/Sirupsen/logrus\"\n)\n\nvar logger = log.WithFields(log.Fields{\n\t\"module\": \"restclient\",\n})\n"
  },
  {
    "path": "restclient/sender.go",
    "content": "package restclient\n\n// Sender is an interface used to send a message to the guble server.\ntype Sender interface {\n\t// Send a a message(body) to the guble Server, to the given topic, with the given userID.\n\tSend(topic string, body []byte, userID string, params map[string]string) error\n\n\t// Check returns `true` if the guble server endpoint is reachable, or `false` otherwise.\n\tCheck() bool\n\n\t// GetSubscribers returns a binary encoded JSON of all subscribers of 'topic' or an error otherwise\n\tGetSubscribers(topic string) ([]byte, error)\n}\n"
  },
  {
    "path": "scripts/Dockerfile-cluster",
    "content": "# this Dockerfile requires u to build the app locally with the name `guble`\nFROM phusion/baseimage\nRUN mkdir -p /var/lib/guble\n\nCOPY guble /go/bin/app\n\n\nEXPOSE 10000 8080\nVOLUME /var/lib/guble\n\n\nENTRYPOINT ['/go/bin/app']\n"
  },
  {
    "path": "scripts/compose.cluster.test.yml",
    "content": "version: '2'\nservices:\n  cluster_1:\n    build:\n      context: ..\n      dockerfile: scripts/Dockerfile-cluster\n    entrypoint:\n      - /go/bin/app\n    environment:\n      - GUBLE_NODE_ID=1\n      - GUBLE_LOG=debug\n      - GUBLE_REMOTES=localhost:10000 localhost:10001\n    ports:\n      - \"8080:8080\"\n      - \"10000:10000\"\n\n  cluster_2:\n    build:\n      context: ..\n      dockerfile: scripts/Dockerfile-cluster\n    entrypoint:\n      - /go/bin/app\n    environment:\n      - GUBLE_NODE_ID=2\n      - GUBLE_LOG=debug\n      - GUBLE_REMOTES=localhost:10000 localhost:10001\n    ports:\n      - \"8080:8080\"\n      - \"10001:10000\"\n"
  },
  {
    "path": "scripts/compose.postgres.test.yml",
    "content": "# docker-compose file to run test(s) using dockerized Postgresql\n# Start Postgres from root of project with following command:\n#     sudo docker-compose -f scripts/compose.postgres.test.yml up -d\n# Stop Postgres from root of project with following command:\n#     sudo docker-compose -f scripts/compose.postgres.test.yml down\nversion: '2'\nservices:\n  postgres:\n    image: postgres:9\n    environment:\n      - POSTGRES_USER=postgres\n      - POSTGRES_PASSWORD=\n      - POSTGRES_DB=guble\n    volumes:\n      - /tmp/guble_test_postgres:/var/lib/postgresql/data\n    ports:\n      - \"5432:5432\"\n"
  },
  {
    "path": "scripts/cov.sh",
    "content": "#!/bin/bash -e\n# Run from parent directory via:\n#     ./scripts/cov.sh\n# Requires installation of:\n#     go get golang.org/x/tools/cmd/cover\n#     go get github.com/wadey/gocovmerge\n\nsource scripts/generate_coverage.sh\n\n# If we have an arg, assume travis run and push to coveralls. Otherwise launch browser results\ngo tool cover -html=full_cov.out\nrm -f full_cov.out\n"
  },
  {
    "path": "scripts/dependencies_graph.sh",
    "content": "#!/bin/bash -e\n# Requires installation of package: graphviz\n\n(echo \"digraph G {\"\ngo list -f '{{range .Imports}}{{printf \"\\t%q -> %q;\\n\" $.ImportPath .}}{{end}}' $(go list -f '{{join .Deps \" \"}}' github.com/smancke/guble ) github.com/smancke/guble\necho \"}\" ) | dot -Tsvg -o dependencies_graph.svg"
  },
  {
    "path": "scripts/file-hex.sh",
    "content": "#!/usr/bin/env bash\n\nxxd -p $1 | tr -d '\\n'\n"
  },
  {
    "path": "scripts/generate_coverage.sh",
    "content": "#!/bin/bash -e\n# Requires local installation of: `github.com/wadey/gocovmerge`\n\ncd $GOPATH/src/github.com/smancke/guble\n\nrm -rf ./cov\nmkdir cov\n\ni=0\nfor dir in $(find . -maxdepth 10 -not -path './.git*' -not -path '*/_test.go' -type d);\ndo\n    if ls ${dir}/*.go &> /dev/null; then\n        GO_TEST_DISABLED=true go test -v -covermode=atomic -coverprofile=./cov/$i.out ./${dir}\n        i=$((i+1))\n    fi\ndone\n\ngocovmerge ./cov/*.out > full_cov.out\nrm -rf ./cov\n"
  },
  {
    "path": "scripts/generate_mocks.sh",
    "content": "#!/bin/bash -xe\n\n# Prerequisites: mockgen should be installed\n#   go get github.com/golang/mock/mockgen\n\nif [ -z \"$GOPATH\" ]; then\n      echo \"Missing $GOPATH!\";\n      exit 1\nfi\n\n# replace in file if last operation was successful\nfunction replace {\n      FILE=$1; shift;\n      while [ -n \"$1\" ]; do\n            echo \"Replacing: $1\"\n            sed -i \"s/$1//g\" $FILE\n            shift\n      done\n}\n\nMOCKGEN=$GOPATH/bin/mockgen\n\n# server/service mocks\n$MOCKGEN  -self_package service -package service \\\n      -destination server/service/mocks_router_gen_test.go \\\n      github.com/smancke/guble/server/router \\\n      Router &\n\n$MOCKGEN -self_package service -package service \\\n      -destination server/service/mocks_checker_gen_test.go \\\n      github.com/docker/distribution/health \\\n      Checker &\n\n# server/router mocks\n$MOCKGEN -self_package router -package router \\\n      -destination server/router/mocks_router_gen_test.go \\\n      github.com/smancke/guble/server/router \\\n      Router\nreplace \"server/router/mocks_router_gen_test.go\" \"router \\\"github.com\\/smancke\\/guble\\/server\\/router\\\"\" \"router\\.\"\n\n$MOCKGEN -self_package router -package router \\\n      -destination server/router/mocks_store_gen_test.go \\\n      github.com/smancke/guble/server/store \\\n      MessageStore &\n\n$MOCKGEN -self_package router -package router \\\n      -destination server/router/mocks_kvstore_gen_test.go \\\n      github.com/smancke/guble/server/kvstore \\\n      KVStore &\n\n$MOCKGEN -self_package router -package router \\\n      -destination server/router/mocks_auth_gen_test.go \\\n      github.com/smancke/guble/server/auth \\\n      AccessManager &\n\n$MOCKGEN -self_package router -package router \\\n      -destination server/router/mocks_checker_gen_test.go \\\n      github.com/docker/distribution/health \\\n      Checker &\n\n# client mocks\n$MOCKGEN  -self_package client -package client \\\n      -destination client/mocks_client_gen_test.go \\\n      github.com/smancke/guble/client \\\n      WSConnection,Client\nreplace \"client/mocks_client_gen_test.go\" \"client \\\"github.com\\/smancke\\/guble\\/client\\\"\" \"client\\.\"\n\n# server/apns mocks\n$MOCKGEN -package apns \\\n      -destination server/apns/mocks_router_gen_test.go \\\n      github.com/smancke/guble/server/router \\\n      Router &\n\n$MOCKGEN -package apns \\\n      -destination server/apns/mocks_kvstore_gen_test.go \\\n      github.com/smancke/guble/server/kvstore \\\n      KVStore &\n\n$MOCKGEN -package apns \\\n      -destination server/apns/mocks_connector_gen_test.go \\\n      github.com/smancke/guble/server/connector \\\n      Sender,Request,Subscriber &\n\n$MOCKGEN -package apns \\\n      -destination server/apns/mocks_pusher_gen_test.go \\\n      github.com/smancke/guble/server/apns \\\n      Pusher &\n\n# server/fcm mocks\n$MOCKGEN -package fcm \\\n      -destination server/fcm/mocks_router_gen_test.go \\\n      github.com/smancke/guble/server/router \\\n      Router &\n\n$MOCKGEN -self_package fcm -package fcm \\\n      -destination server/fcm/mocks_kvstore_gen_test.go \\\n      github.com/smancke/guble/server/kvstore \\\n      KVStore &\n\n$MOCKGEN -self_package fcm -package fcm \\\n      -destination server/fcm/mocks_store_gen_test.go \\\n      github.com/smancke/guble/server/store \\\n      MessageStore &\n\n$MOCKGEN -self_package fcm -package fcm \\\n      -destination server/fcm/mocks_gcm_gen_test.go \\\n      github.com/Bogh/gcm \\\n      Sender &\n\n# server mocks\n$MOCKGEN -package server \\\n      -destination server/mocks_router_gen_test.go \\\n      github.com/smancke/guble/server/router \\\n      Router &\n\n$MOCKGEN -self_package server -package server \\\n      -destination server/mocks_auth_gen_test.go \\\n      github.com/smancke/guble/server/auth \\\n      AccessManager &\n\n$MOCKGEN -self_package server -package server \\\n      -destination server/mocks_store_gen_test.go \\\n      github.com/smancke/guble/server/store \\\n      MessageStore &\n\n$MOCKGEN -package server \\\n      -destination server/mocks_apns_pusher_gen_test.go \\\n      github.com/smancke/guble/server/apns \\\n      Pusher &\n\n# server/auth mocks\n$MOCKGEN -self_package auth -package auth \\\n      -destination server/auth/mocks_auth_gen_test.go \\\n      github.com/smancke/guble/server/auth \\\n      AccessManager\nreplace \"server/auth/mocks_auth_gen_test.go\" \\\n      \"auth \\\"github.com\\/smancke\\/guble\\/server\\/auth\\\"\" \\\n      \"auth\\.\"\n\n# server/connector mocks\n$MOCKGEN -self_package connector -package connector \\\n      -destination server/connector/mocks_connector_gen_test.go \\\n      github.com/smancke/guble/server/connector \\\n      Connector,Sender,ResponseHandler,Manager,Queue,Request,Subscriber\nreplace \"server/connector/mocks_connector_gen_test.go\" \\\n      \"connector \\\"github.com\\/smancke\\/guble\\/server\\/connector\\\"\" \\\n      \"connector\\.\"\n\n$MOCKGEN -self_package connector -package connector \\\n      -destination server/connector/mocks_router_gen_test.go \\\n      github.com/smancke/guble/server/router \\\n      Router &\n\n$MOCKGEN -self_package connector -package connector \\\n      -destination server/connector/mocks_kvstore_gen_test.go \\\n      github.com/smancke/guble/server/kvstore \\\n      KVStore &\n\n# server/websocket mocks\n$MOCKGEN  -self_package websocket -package websocket \\\n      -destination server/websocket/mocks_websocket_gen_test.go \\\n      github.com/smancke/guble/server/websocket \\\n      WSConnection\nreplace \"server/websocket/mocks_websocket_gen_test.go\" \\\n      \"websocket \\\"github.com\\/smancke\\/server\\/websocket\\\"\" \\\n      \"websocket\\.\"\n\n$MOCKGEN -self_package websocket -package websocket \\\n      -destination server/websocket/mocks_router_gen_test.go \\\n      github.com/smancke/guble/server/router \\\n      Router &\n\n$MOCKGEN -self_package websocket -package websocket \\\n      -destination server/websocket/mocks_store_gen_test.go \\\n      github.com/smancke/guble/server/store \\\n      MessageStore &\n\n$MOCKGEN -self_package websocket -package websocket \\\n      -destination server/websocket/mocks_auth_gen_test.go \\\n      github.com/smancke/guble/server/auth \\\n      AccessManager &\n\n# server/rest Mocks\n$MOCKGEN -package rest \\\n      -destination server/rest/mocks_router_gen_test.go \\\n      github.com/smancke/guble/server/router \\\n      Router &\n\n# server/sms Mocks\n$MOCKGEN -package sms \\\n      -destination server/sms/mocks_sender_gen_test.go \\\n      github.com/smancke/guble/server/sms \\\n      Sender &\n\n$MOCKGEN -package sms \\\n      -destination server/sms/mocks_router_gen_test.go \\\n      github.com/smancke/guble/server/router \\\n      Router &\n\n$MOCKGEN -self_package router -package sms \\\n      -destination server/sms/mocks_store_gen_test.go \\\n      github.com/smancke/guble/server/store \\\n      MessageStore &\n\nwait\n"
  },
  {
    "path": "server/apns/apns.go",
    "content": "package apns\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com/sideshow/apns2\"\n\t\"github.com/smancke/guble/server/connector\"\n\t\"github.com/smancke/guble/server/metrics\"\n\t\"github.com/smancke/guble/server/router\"\n\t\"time\"\n)\n\nconst (\n\t// schema is the default database schema for APNS\n\tschema = \"apns_registration\"\n)\n\nvar (\n\terrSenderNotRecreated = errors.New(\"APNS Sender could not be recreated.\")\n)\n\n// Config is used for configuring the APNS module.\ntype Config struct {\n\tEnabled             *bool\n\tProduction          *bool\n\tCertificateFileName *string\n\tCertificateBytes    *[]byte\n\tCertificatePassword *string\n\tAppTopic            *string\n\tWorkers             *int\n\tPrefix              *string\n\tIntervalMetrics     *bool\n}\n\n// apns is the private struct for handling the communication with APNS\ntype apns struct {\n\tConfig\n\tconnector.Connector\n}\n\n// New creates a new connector.ResponsiveConnector without starting it\nfunc New(router router.Router, sender connector.Sender, config Config) (connector.ResponsiveConnector, error) {\n\tbaseConn, err := connector.NewConnector(\n\t\trouter,\n\t\tsender,\n\t\tconnector.Config{\n\t\t\tName:       \"apns\",\n\t\t\tSchema:     schema,\n\t\t\tPrefix:     *config.Prefix,\n\t\t\tURLPattern: fmt.Sprintf(\"/{%s}/{%s}/{%s:.*}\", deviceIDKey, userIDKey, connector.TopicParam),\n\t\t\tWorkers:    *config.Workers,\n\t\t},\n\t)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"Base connector error\")\n\t\treturn nil, err\n\t}\n\ta := &apns{\n\t\tConfig:    config,\n\t\tConnector: baseConn,\n\t}\n\ta.SetResponseHandler(a)\n\treturn a, nil\n}\n\nfunc (a *apns) Start() error {\n\terr := a.Connector.Start()\n\tif err == nil {\n\t\ta.startMetrics()\n\t}\n\treturn err\n}\n\nfunc (a *apns) startMetrics() {\n\tmTotalSentMessages.Set(0)\n\tmTotalSendErrors.Set(0)\n\tmTotalResponseErrors.Set(0)\n\tmTotalResponseInternalErrors.Set(0)\n\tmTotalResponseRegistrationErrors.Set(0)\n\tmTotalResponseOtherErrors.Set(0)\n\tmTotalSendNetworkErrors.Set(0)\n\tmTotalSendRetryCloseTLS.Set(0)\n\tmTotalSendRetryUnrecoverable.Set(0)\n\n\tif *a.IntervalMetrics {\n\t\ta.startIntervalMetric(mMinute, time.Minute)\n\t\ta.startIntervalMetric(mHour, time.Hour)\n\t\ta.startIntervalMetric(mDay, time.Hour*24)\n\t}\n}\n\nfunc (a *apns) startIntervalMetric(m metrics.Map, td time.Duration) {\n\tmetrics.RegisterInterval(a.Context(), m, td, resetIntervalMetrics, processAndResetIntervalMetrics)\n}\n\nfunc (a *apns) HandleResponse(request connector.Request, responseIface interface{}, metadata *connector.Metadata, errSend error) error {\n\tlogger.Info(\"Handle APNS response\")\n\tif errSend != nil {\n\t\tlogger.WithField(\"error\", errSend.Error()).WithField(\"error_type\", errSend).Error(\"error when trying to send APNS notification\")\n\t\tmTotalSendErrors.Add(1)\n\t\tif *a.IntervalMetrics && metadata != nil {\n\t\t\taddToLatenciesAndCountsMaps(currentTotalErrorsLatenciesKey, currentTotalErrorsKey, metadata.Latency)\n\t\t}\n\t\treturn errSend\n\t}\n\tr, ok := responseIface.(*apns2.Response)\n\tif !ok {\n\t\tmTotalResponseErrors.Add(1)\n\t\treturn fmt.Errorf(\"Response could not be converted to an APNS Response\")\n\t}\n\tmessageID := request.Message().ID\n\tsubscriber := request.Subscriber()\n\tsubscriber.SetLastID(messageID)\n\tif err := a.Manager().Update(subscriber); err != nil {\n\t\tlogger.WithField(\"error\", err.Error()).Error(\"Manager could not update subscription\")\n\t\tmTotalResponseInternalErrors.Add(1)\n\t\treturn err\n\t}\n\tif r.Sent() {\n\t\tlogger.WithField(\"id\", r.ApnsID).Info(\"APNS notification was successfully sent\")\n\t\tmTotalSentMessages.Add(1)\n\t\tif *a.IntervalMetrics && metadata != nil {\n\t\t\taddToLatenciesAndCountsMaps(currentTotalMessagesLatenciesKey, currentTotalMessagesKey, metadata.Latency)\n\t\t}\n\t\treturn nil\n\t}\n\tlogger.Error(\"APNS notification was not sent\")\n\tlogger.WithField(\"id\", r.ApnsID).WithField(\"reason\", r.Reason).Info(\"APNS notification was not sent - details\")\n\tswitch r.Reason {\n\tcase\n\t\tapns2.ReasonMissingDeviceToken,\n\t\tapns2.ReasonBadDeviceToken,\n\t\tapns2.ReasonDeviceTokenNotForTopic,\n\t\tapns2.ReasonUnregistered:\n\n\t\tlogger.WithField(\"id\", r.ApnsID).Info(\"trying to remove subscriber because a relevant error was received from APNS\")\n\t\tmTotalResponseRegistrationErrors.Add(1)\n\t\terr := a.Manager().Remove(subscriber)\n\t\tif err != nil {\n\t\t\tlogger.WithField(\"id\", r.ApnsID).Error(\"could not remove subscriber\")\n\t\t}\n\tdefault:\n\t\tlogger.Error(\"handling other APNS errors\")\n\t\tmTotalResponseOtherErrors.Add(1)\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "server/apns/apns_metrics.go",
    "content": "package apns\n\nimport (\n\t\"github.com/smancke/guble/server/metrics\"\n\t\"time\"\n)\n\nvar (\n\tns                               = metrics.NS(\"apns\")\n\tmTotalSentMessages               = ns.NewInt(\"total_sent_messages\")\n\tmTotalSendErrors                 = ns.NewInt(\"total_sent_message_errors\")\n\tmTotalResponseErrors             = ns.NewInt(\"total_response_errors\")\n\tmTotalResponseInternalErrors     = ns.NewInt(\"total_response_internal_errors\")\n\tmTotalResponseRegistrationErrors = ns.NewInt(\"total_response_registration_errors\")\n\tmTotalResponseOtherErrors        = ns.NewInt(\"total_response_other_errors\")\n\tmTotalSendNetworkErrors          = ns.NewInt(\"total_send_network_errors\")\n\tmTotalSendRetryCloseTLS          = ns.NewInt(\"total_send_retry_close_tls\")\n\tmTotalSendRetryUnrecoverable     = ns.NewInt(\"total_send_retry_unrecoverable\")\n\tmMinute                          = ns.NewMap(\"minute\")\n\tmHour                            = ns.NewMap(\"hour\")\n\tmDay                             = ns.NewMap(\"day\")\n)\n\nconst (\n\tcurrentTotalMessagesLatenciesKey = \"current_messages_total_latencies_nanos\"\n\tcurrentTotalMessagesKey          = \"current_messages_count\"\n\tcurrentTotalErrorsLatenciesKey   = \"current_errors_total_latencies_nanos\"\n\tcurrentTotalErrorsKey            = \"current_errors_count\"\n)\n\nfunc processAndResetIntervalMetrics(m metrics.Map, td time.Duration, t time.Time) {\n\tmsgLatenciesValue := m.Get(currentTotalMessagesLatenciesKey)\n\tmsgNumberValue := m.Get(currentTotalMessagesKey)\n\terrLatenciesValue := m.Get(currentTotalErrorsLatenciesKey)\n\terrNumberValue := m.Get(currentTotalErrorsKey)\n\n\tm.Init()\n\tresetIntervalMetrics(m, t)\n\tmetrics.SetRate(m, \"last_messages_rate_sec\", msgNumberValue, td, time.Second)\n\tmetrics.SetRate(m, \"last_errors_rate_sec\", errNumberValue, td, time.Second)\n\tmetrics.SetAverage(m, \"last_messages_average_latency_msec\",\n\t\tmsgLatenciesValue, msgNumberValue, metrics.MilliPerNano, metrics.DefaultAverageLatencyJSONValue)\n\tmetrics.SetAverage(m, \"last_errors_average_latency_msec\",\n\t\terrLatenciesValue, errNumberValue, metrics.MilliPerNano, metrics.DefaultAverageLatencyJSONValue)\n}\n\nfunc resetIntervalMetrics(m metrics.Map, t time.Time) {\n\tm.Set(\"current_interval_start\", metrics.NewTime(t))\n\tmetrics.AddToMaps(currentTotalMessagesLatenciesKey, 0, m)\n\tmetrics.AddToMaps(currentTotalMessagesKey, 0, m)\n\tmetrics.AddToMaps(currentTotalErrorsLatenciesKey, 0, m)\n\tmetrics.AddToMaps(currentTotalErrorsKey, 0, m)\n}\n\nfunc addToLatenciesAndCountsMaps(latenciesKey string, countKey string, latency time.Duration) {\n\tmetrics.AddToMaps(latenciesKey, int64(latency), mMinute, mHour, mDay)\n\tmetrics.AddToMaps(countKey, 1, mMinute, mHour, mDay)\n}\n"
  },
  {
    "path": "server/apns/apns_pusher.go",
    "content": "package apns\n\nimport (\n\t\"crypto/tls\"\n\t\"github.com/sideshow/apns2\"\n\t\"github.com/sideshow/apns2/certificate\"\n\t\"golang.org/x/net/http2\"\n\t\"net\"\n\t\"net/http\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\t//see https://github.com/sideshow/apns2/issues/24 and https://github.com/sideshow/apns2/issues/20\n\ttlsDialTimeout    = 20 * time.Second\n\thttpClientTimeout = 30 * time.Second\n)\n\ntype Pusher interface {\n\tPush(*apns2.Notification) (*apns2.Response, error)\n}\n\ntype closable interface {\n\tCloseTLS()\n}\n\nfunc newPusher(c Config) (Pusher, error) {\n\tlogger.Info(\"creating new apns pusher\")\n\n\tvar (\n\t\tcert    tls.Certificate\n\t\terrCert error\n\t)\n\tif c.CertificateFileName != nil && *c.CertificateFileName != \"\" {\n\t\tcert, errCert = certificate.FromP12File(*c.CertificateFileName, *c.CertificatePassword)\n\t} else {\n\t\tcert, errCert = certificate.FromP12Bytes(*c.CertificateBytes, *c.CertificatePassword)\n\t}\n\tif errCert != nil {\n\t\treturn nil, errCert\n\t}\n\n\tvar clientFactory func(certificate tls.Certificate) *apns2Client\n\tif *c.Production {\n\t\tclientFactory = newProductionClient\n\t} else {\n\t\tclientFactory = newDevelopmentClient\n\t}\n\n\tapns2.TLSDialTimeout = tlsDialTimeout\n\tapns2.HTTPClientTimeout = httpClientTimeout\n\n\tlogger.Info(\"created new apns pusher\")\n\n\treturn clientFactory(cert), nil\n}\n\nfunc newProductionClient(certificate tls.Certificate) *apns2Client {\n\tlogger.Info(\"APNS Pusher in Production mode\")\n\tc := newApns2Client(certificate)\n\tc.Production()\n\tlogger.WithField(\"apns_url\", c.Host).Info(\"APNS Pusher in Production mode url\")\n\treturn c\n}\n\nfunc newDevelopmentClient(certificate tls.Certificate) *apns2Client {\n\tlogger.Info(\"APNS Pusher in Development mode\")\n\tc := newApns2Client(certificate)\n\tc.Development()\n\tlogger.WithField(\"apns_url\", c.Host).Info(\"APNS Pusher in Development mode url\")\n\treturn c\n}\n\ntype apns2Client struct {\n\t*apns2.Client\n\n\ttlsConn net.Conn\n\tmu      sync.Mutex\n}\n\nfunc newApns2Client(certificate tls.Certificate) *apns2Client {\n\tlogger.Info(\"creating new apns2client\")\n\n\tc := &apns2Client{}\n\n\ttlsConfig := &tls.Config{\n\t\tCertificates: []tls.Certificate{certificate},\n\t}\n\tif len(certificate.Certificate) > 0 {\n\t\ttlsConfig.BuildNameToCertificate()\n\t}\n\ttransport := &http2.Transport{\n\t\tTLSClientConfig: tlsConfig,\n\t\tDialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {\n\t\t\tconn, err := tls.DialWithDialer(&net.Dialer{Timeout: tlsDialTimeout, KeepAlive: 2 * time.Second}, network, addr, cfg)\n\n\t\t\tc.mu.Lock()\n\t\t\tdefer c.mu.Unlock()\n\t\t\tif err == nil {\n\t\t\t\tc.tlsConn = conn\n\t\t\t} else {\n\t\t\t\tc.tlsConn = nil\n\t\t\t}\n\t\t\treturn conn, err\n\t\t},\n\t}\n\tclient := &apns2.Client{\n\t\tHTTPClient: &http.Client{\n\t\t\tTransport: transport,\n\t\t\tTimeout:   httpClientTimeout,\n\t\t},\n\t\tCertificate: certificate,\n\t\tHost:        apns2.DefaultHost,\n\t}\n\tc.Client = client\n\tlogger.Info(\"created new apns2client\")\n\treturn c\n}\n\n// interface closable used used by apns_sender\nfunc (c *apns2Client) CloseTLS() {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tif c.tlsConn != nil {\n\t\tlogger.Info(\"Trying to close TLS connection\")\n\t\tc.tlsConn.Close()\n\t\tlogger.Info(\"Closed TLS connection\")\n\t\tc.tlsConn = nil\n\t}\n}\n"
  },
  {
    "path": "server/apns/apns_sender.go",
    "content": "package apns\n\nimport (\n\t\"errors\"\n\t\"github.com/jpillora/backoff\"\n\t\"github.com/sideshow/apns2\"\n\t\"github.com/smancke/guble/server/connector\"\n\t\"net\"\n\t\"time\"\n)\n\nconst (\n\t// deviceIDKey is the key name set on the route params to identify the application\n\tdeviceIDKey = \"device_token\"\n\tuserIDKey   = \"user_id\"\n)\n\nvar (\n\terrPusherInvalidParams = errors.New(\"Invalid parameters of APNS Pusher\")\n\tErrRetryFailed         = errors.New(\"Retry failed\")\n)\n\ntype sender struct {\n\tclient   Pusher\n\tappTopic string\n}\n\nfunc NewSender(config Config) (connector.Sender, error) {\n\tpusher, err := newPusher(config)\n\tif err != nil {\n\t\tlogger.WithField(\"error\", err.Error()).Error(\"APNS Pusher creation error\")\n\t\treturn nil, err\n\t}\n\treturn NewSenderUsingPusher(pusher, *config.AppTopic)\n}\n\nfunc NewSenderUsingPusher(pusher Pusher, appTopic string) (connector.Sender, error) {\n\tif pusher == nil || appTopic == \"\" {\n\t\treturn nil, errPusherInvalidParams\n\t}\n\treturn &sender{\n\t\tclient:   pusher,\n\t\tappTopic: appTopic,\n\t}, nil\n}\n\nfunc (s sender) Send(request connector.Request) (interface{}, error) {\n\tdeviceToken := request.Subscriber().Route().Get(deviceIDKey)\n\tlogger.WithField(\"deviceToken\", deviceToken).Info(\"Trying to push a message to APNS\")\n\tpush := func() (interface{}, error) {\n\t\treturn s.client.Push(&apns2.Notification{\n\t\t\tPriority:    apns2.PriorityHigh,\n\t\t\tTopic:       s.appTopic,\n\t\t\tDeviceToken: deviceToken,\n\t\t\tPayload:     request.Message().Body,\n\t\t})\n\t}\n\twithRetry := &retryable{\n\t\tBackoff: backoff.Backoff{\n\t\t\tMin:    1 * time.Second,\n\t\t\tMax:    10 * time.Second,\n\t\t\tFactor: 2,\n\t\t\tJitter: true,\n\t\t},\n\t\tmaxTries: 3,\n\t}\n\tresult, err := withRetry.execute(push)\n\tif err != nil && err == ErrRetryFailed {\n\t\tif closable, ok := s.client.(closable); ok {\n\t\t\tlogger.Warn(\"Close TLS and retry again\")\n\t\t\tmTotalSendRetryCloseTLS.Add(1)\n\t\t\tclosable.CloseTLS()\n\t\t\treturn push()\n\t\t} else {\n\t\t\tmTotalSendRetryUnrecoverable.Add(1)\n\t\t\tlogger.Error(\"Cannot Close TLS. Unrecoverable state\")\n\t\t}\n\t}\n\treturn result, err\n}\n\ntype retryable struct {\n\tbackoff.Backoff\n\tmaxTries int\n}\n\nfunc (r *retryable) execute(op func() (interface{}, error)) (interface{}, error) {\n\ttryCounter := 0\n\tfor {\n\t\ttryCounter++\n\t\tresult, opError := op()\n\t\t// retry on network errors\n\t\tif _, ok := opError.(net.Error); ok {\n\t\t\tmTotalSendNetworkErrors.Add(1)\n\t\t\tif tryCounter >= r.maxTries {\n\t\t\t\treturn \"\", ErrRetryFailed\n\t\t\t}\n\t\t\td := r.Duration()\n\t\t\tlogger.WithField(\"error\", opError.Error()).Warn(\"Retry in \", d)\n\t\t\ttime.Sleep(d)\n\t\t\tcontinue\n\t\t} else {\n\t\t\treturn result, opError\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "server/apns/apns_sender_test.go",
    "content": "package apns\n\nimport (\n\t\"errors\"\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/router\"\n\t\"github.com/smancke/guble/testutil\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc TestNewSender_ErrorBytes(t *testing.T) {\n\ta := assert.New(t)\n\n\t//given\n\temptyBytes := []byte(\"\")\n\temptyPassword := \"\"\n\tcfg := Config{\n\t\tCertificateBytes:    &emptyBytes,\n\t\tCertificatePassword: &emptyPassword,\n\t}\n\n\t//when\n\tpusher, err := NewSender(cfg)\n\n\t// then\n\ta.Error(err)\n\ta.Nil(pusher)\n}\n\nfunc TestNewSender_ErrorFile(t *testing.T) {\n\ta := assert.New(t)\n\n\t//given\n\twrongFilename := \".\"\n\temptyPassword := \"\"\n\tcfg := Config{\n\t\tCertificateFileName: &wrongFilename,\n\t\tCertificatePassword: &emptyPassword,\n\t}\n\n\t//when\n\tpusher, err := NewSender(cfg)\n\n\t// then\n\ta.Error(err)\n\ta.Nil(pusher)\n}\n\nfunc TestSender_Send(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\ta := assert.New(t)\n\n\t// given\n\trouteParams := make(map[string]string)\n\trouteParams[\"device_id\"] = \"1234\"\n\trouteConfig := router.RouteConfig{\n\t\tPath:        protocol.Path(\"path\"),\n\t\tRouteParams: routeParams,\n\t}\n\troute := router.NewRoute(routeConfig)\n\n\tmsg := &protocol.Message{\n\t\tBody: []byte(\"{}\"),\n\t}\n\n\tmSubscriber := NewMockSubscriber(testutil.MockCtrl)\n\tmSubscriber.EXPECT().Route().Return(route).AnyTimes()\n\n\tmRequest := NewMockRequest(testutil.MockCtrl)\n\tmRequest.EXPECT().Subscriber().Return(mSubscriber).AnyTimes()\n\tmRequest.EXPECT().Message().Return(msg).AnyTimes()\n\n\tmPusher := NewMockPusher(testutil.MockCtrl)\n\tmPusher.EXPECT().Push(gomock.Any()).Return(nil, nil)\n\n\t// and\n\ts, err := NewSenderUsingPusher(mPusher, \"com.myapp\")\n\ta.NoError(err)\n\n\t// when\n\trsp, err := s.Send(mRequest)\n\n\t// then\n\ta.NoError(err)\n\ta.Nil(rsp)\n}\n\nfunc TestSender_Retry(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\ta := assert.New(t)\n\n\t// given\n\trouteParams := make(map[string]string)\n\trouteParams[\"device_id\"] = \"1234\"\n\trouteConfig := router.RouteConfig{\n\t\tPath:        protocol.Path(\"path\"),\n\t\tRouteParams: routeParams,\n\t}\n\troute := router.NewRoute(routeConfig)\n\n\tmsg := &protocol.Message{\n\t\tBody: []byte(\"{}\"),\n\t}\n\n\tmSubscriber := NewMockSubscriber(testutil.MockCtrl)\n\tmSubscriber.EXPECT().Route().Return(route).AnyTimes()\n\n\tmRequest := NewMockRequest(testutil.MockCtrl)\n\tmRequest.EXPECT().Subscriber().Return(mSubscriber).AnyTimes()\n\tmRequest.EXPECT().Message().Return(msg).AnyTimes()\n\n\tmPusher := NewMockPusher(testutil.MockCtrl)\n\n\tmPusher.EXPECT().Push(gomock.Any()).Return(nil, errMockTimeout)\n\tmPusher.EXPECT().Push(gomock.Any()).Return(nil, nil)\n\n\t// and\n\ts, err := NewSenderUsingPusher(mPusher, \"com.myapp\")\n\ta.NoError(err)\n\n\t// when\n\trsp, err := s.Send(mRequest)\n\n\t// then\n\ta.NoError(err)\n\ta.Nil(rsp)\n\n}\n\ntype resultpair struct {\n\tresult interface{}\n\terr    error\n}\n\nfunc Test_Retriable(t *testing.T) {\n\ta := assert.New(t)\n\n\ttestCases := []struct {\n\t\tname                string\n\t\tmaxTries            int\n\t\tresults             []resultpair\n\t\texpectedResult      string\n\t\texpectedError       error\n\t\texpectedMethodCalls int\n\t}{\n\t\t{\"No errors\", 3, []resultpair{{result: \"0\"}}, \"0\", nil, 1},\n\t\t{\"Retry once\", 3, []resultpair{{err: errMockTimeout}, {result: \"1\"}}, \"1\", nil, 2},\n\t\t{\"Retry twice\", 3, []resultpair{{err: errMockTimeout}, {err: errMockTimeout}, {result: \"2\"}}, \"2\", nil, 3},\n\t\t{\"Retry only twice\", 3, []resultpair{{err: errMockTimeout}, {err: errMockTimeout}, {err: errMockTimeout, result: \"\"}, {result: \"3\"}}, \"\", ErrRetryFailed, 3},\n\t\t{\"Do not retry\", 3, []resultpair{{err: errMockOther, result: \"\"}, {result: \"1\"}}, \"\", errMockOther, 1},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\n\t\t\t// given\n\t\t\tvar counter int\n\t\t\tmethod := func() (interface{}, error) {\n\t\t\t\telem := tc.results[counter]\n\t\t\t\tcounter++\n\t\t\t\treturn elem.result, elem.err\n\t\t\t}\n\n\t\t\twithRetry := &retryable{\n\t\t\t\tmaxTries: tc.maxTries,\n\t\t\t}\n\n\t\t\t// when\n\t\t\tresult, err := withRetry.execute(method)\n\n\t\t\t// then\n\t\t\ta.EqualValues(tc.expectedResult, result)\n\t\t\ta.EqualValues(tc.expectedError, err)\n\t\t\ta.EqualValues(tc.expectedMethodCalls, counter)\n\t\t})\n\t}\n}\n\n// see - net.Error\ntype mockTimeout struct{}\n\nfunc (e *mockTimeout) Error() string   { return \"mock i/o timeout\" }\nfunc (e *mockTimeout) Timeout() bool   { return true }\nfunc (e *mockTimeout) Temporary() bool { return true }\n\nvar errMockTimeout error = &mockTimeout{}\nvar errMockOther error = errors.New(\"mock not retriable\")\n"
  },
  {
    "path": "server/apns/apns_test.go",
    "content": "package apns\n\nimport (\n\t\"errors\"\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/sideshow/apns2\"\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/connector\"\n\t\"github.com/smancke/guble/testutil\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nvar ErrSendRandomError = errors.New(\"A Sender error\")\n\nfunc TestNew_WithoutKVStore(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\ta := assert.New(t)\n\n\t//given\n\tmRouter := NewMockRouter(testutil.MockCtrl)\n\terrKVS := errors.New(\"No KVS was set-up in Router\")\n\tmRouter.EXPECT().KVStore().Return(nil, errKVS).AnyTimes()\n\tmSender := NewMockSender(testutil.MockCtrl)\n\tprefix := \"/apns/\"\n\tworkers := 1\n\tcfg := Config{\n\t\tPrefix:  &prefix,\n\t\tWorkers: &workers,\n\t}\n\n\t//when\n\tc, err := New(mRouter, mSender, cfg)\n\n\t//then\n\ta.Error(err)\n\ta.Nil(c)\n}\n\nfunc TestConn_HandleResponseOnSendError(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\ta := assert.New(t)\n\n\t//given\n\tc, _ := newAPNSConnector(t)\n\tmRequest := NewMockRequest(testutil.MockCtrl)\n\n\t//when\n\terr := c.HandleResponse(mRequest, nil, nil, ErrSendRandomError)\n\n\t//then\n\ta.Equal(ErrSendRandomError, err)\n}\n\nfunc TestConn_HandleResponse(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\ta := assert.New(t)\n\n\t//given\n\tc, mKVS := newAPNSConnector(t)\n\n\tmSubscriber := NewMockSubscriber(testutil.MockCtrl)\n\tmSubscriber.EXPECT().SetLastID(gomock.Any())\n\tmSubscriber.EXPECT().Key().Return(\"key\").AnyTimes()\n\tmSubscriber.EXPECT().Encode().Return([]byte(\"{}\"), nil).AnyTimes()\n\tmKVS.EXPECT().Put(schema, \"key\", []byte(\"{}\")).Times(2)\n\n\tc.Manager().Add(mSubscriber)\n\n\tmessage := &protocol.Message{\n\t\tID: 42,\n\t}\n\tmRequest := NewMockRequest(testutil.MockCtrl)\n\tmRequest.EXPECT().Message().Return(message).AnyTimes()\n\tmRequest.EXPECT().Subscriber().Return(mSubscriber).AnyTimes()\n\n\tresponse := &apns2.Response{\n\t\tApnsID:     \"id-life\",\n\t\tStatusCode: 200,\n\t}\n\n\t//when\n\terr := c.HandleResponse(mRequest, response, nil, nil)\n\n\t//then\n\ta.NoError(err)\n}\n\nfunc TestNew_HandleResponseHandleSubscriber(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\ta := assert.New(t)\n\n\t//given\n\tc, mKVS := newAPNSConnector(t)\n\n\tremoveForReasons := []string{\n\t\tapns2.ReasonMissingDeviceToken,\n\t\tapns2.ReasonBadDeviceToken,\n\t\tapns2.ReasonDeviceTokenNotForTopic,\n\t\tapns2.ReasonUnregistered,\n\t}\n\tfor _, reason := range removeForReasons {\n\t\tmessage := &protocol.Message{\n\t\t\tID: 42,\n\t\t}\n\t\tmSubscriber := NewMockSubscriber(testutil.MockCtrl)\n\t\tmSubscriber.EXPECT().SetLastID(gomock.Any())\n\t\tmSubscriber.EXPECT().Cancel()\n\t\tmSubscriber.EXPECT().Key().Return(\"key\").AnyTimes()\n\t\tmSubscriber.EXPECT().Encode().Return([]byte(\"{}\"), nil).AnyTimes()\n\t\tmKVS.EXPECT().Put(schema, \"key\", []byte(\"{}\")).Times(2)\n\t\tmKVS.EXPECT().Delete(schema, \"key\")\n\n\t\tc.Manager().Add(mSubscriber)\n\n\t\tmRequest := NewMockRequest(testutil.MockCtrl)\n\t\tmRequest.EXPECT().Message().Return(message).AnyTimes()\n\t\tmRequest.EXPECT().Subscriber().Return(mSubscriber).AnyTimes()\n\n\t\tresponse := &apns2.Response{\n\t\t\tApnsID:     \"id-life\",\n\t\t\tStatusCode: 400,\n\t\t\tReason:     reason,\n\t\t}\n\n\t\t//when\n\t\terr := c.HandleResponse(mRequest, response, nil, nil)\n\n\t\t//then\n\t\ta.NoError(err)\n\t}\n}\n\nfunc TestNew_HandleResponseDoNotHandleSubscriber(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\ta := assert.New(t)\n\n\t//given\n\tc, mKVS := newAPNSConnector(t)\n\n\tnoActionForReasons := []string{\n\t\tapns2.ReasonPayloadEmpty,\n\t\tapns2.ReasonPayloadTooLarge,\n\t\tapns2.ReasonBadTopic,\n\t\tapns2.ReasonTopicDisallowed,\n\t\tapns2.ReasonBadMessageID,\n\t\tapns2.ReasonBadExpirationDate,\n\t\tapns2.ReasonBadPriority,\n\t\tapns2.ReasonDuplicateHeaders,\n\t\tapns2.ReasonBadCertificateEnvironment,\n\t\tapns2.ReasonBadCertificate,\n\t\tapns2.ReasonForbidden,\n\t\tapns2.ReasonBadPath,\n\t\tapns2.ReasonMethodNotAllowed,\n\t\tapns2.ReasonTooManyRequests,\n\t\tapns2.ReasonIdleTimeout,\n\t\tapns2.ReasonShutdown,\n\t\tapns2.ReasonInternalServerError,\n\t\tapns2.ReasonServiceUnavailable,\n\t\tapns2.ReasonMissingTopic,\n\t}\n\n\tfor _, reason := range noActionForReasons {\n\t\tmessage := &protocol.Message{\n\t\t\tID: 42,\n\t\t}\n\n\t\tmSubscriber := NewMockSubscriber(testutil.MockCtrl)\n\t\tmSubscriber.EXPECT().SetLastID(gomock.Any())\n\t\tmSubscriber.EXPECT().Key().Return(\"key\").AnyTimes()\n\t\tmSubscriber.EXPECT().Encode().Return([]byte(\"{}\"), nil).AnyTimes()\n\t\tmSubscriber.EXPECT().Cancel()\n\t\tmKVS.EXPECT().Put(schema, \"key\", []byte(\"{}\")).Times(2)\n\t\tmKVS.EXPECT().Delete(schema, \"key\")\n\n\t\tc.Manager().Add(mSubscriber)\n\n\t\tmRequest := NewMockRequest(testutil.MockCtrl)\n\t\tmRequest.EXPECT().Message().Return(message).AnyTimes()\n\t\tmRequest.EXPECT().Subscriber().Return(mSubscriber).AnyTimes()\n\n\t\tresponse := &apns2.Response{\n\t\t\tApnsID:     \"id-apns\",\n\t\t\tStatusCode: 400,\n\t\t\tReason:     reason,\n\t\t}\n\n\t\t//when\n\t\terr := c.HandleResponse(mRequest, response, nil, nil)\n\n\t\t//then\n\t\ta.NoError(err)\n\n\t\tc.Manager().Remove(mSubscriber)\n\t}\n}\n\nfunc newAPNSConnector(t *testing.T) (c connector.ResponsiveConnector, mKVS *MockKVStore) {\n\tmKVS = NewMockKVStore(testutil.MockCtrl)\n\tmRouter := NewMockRouter(testutil.MockCtrl)\n\tmRouter.EXPECT().KVStore().Return(mKVS, nil).AnyTimes()\n\tmSender := NewMockSender(testutil.MockCtrl)\n\n\tprefix := \"/apns/\"\n\tworkers := 1\n\tintervalMetrics := false\n\tpassword := \"test\"\n\tbytes := []byte(\"test\")\n\tcfg := Config{\n\t\tPrefix:              &prefix,\n\t\tWorkers:             &workers,\n\t\tIntervalMetrics:     &intervalMetrics,\n\t\tCertificatePassword: &password,\n\t\tCertificateBytes:    &bytes,\n\t}\n\tc, err := New(mRouter, mSender, cfg)\n\n\tassert.NoError(t, err)\n\tassert.NotNil(t, c)\n\treturn\n}\n"
  },
  {
    "path": "server/apns/logger.go",
    "content": "package apns\n\nimport (\n\tlog \"github.com/Sirupsen/logrus\"\n)\n\nvar logger = log.WithField(\"module\", \"apns\")\n"
  },
  {
    "path": "server/apns/mocks_connector_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/smancke/guble/server/connector (interfaces: Sender,Request,Subscriber)\n\npackage apns\n\nimport (\n\t\"context\"\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/connector\"\n\t\"github.com/smancke/guble/server/router\"\n)\n\n// Mock of Sender interface\ntype MockSender struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockSenderRecorder\n}\n\n// Recorder for MockSender (not exported)\ntype _MockSenderRecorder struct {\n\tmock *MockSender\n}\n\nfunc NewMockSender(ctrl *gomock.Controller) *MockSender {\n\tmock := &MockSender{ctrl: ctrl}\n\tmock.recorder = &_MockSenderRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockSender) EXPECT() *_MockSenderRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockSender) Send(_param0 connector.Request) (interface{}, error) {\n\tret := _m.ctrl.Call(_m, \"Send\", _param0)\n\tret0, _ := ret[0].(interface{})\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockSenderRecorder) Send(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Send\", arg0)\n}\n\n// Mock of Request interface\ntype MockRequest struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockRequestRecorder\n}\n\n// Recorder for MockRequest (not exported)\ntype _MockRequestRecorder struct {\n\tmock *MockRequest\n}\n\nfunc NewMockRequest(ctrl *gomock.Controller) *MockRequest {\n\tmock := &MockRequest{ctrl: ctrl}\n\tmock.recorder = &_MockRequestRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockRequest) EXPECT() *_MockRequestRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockRequest) Message() *protocol.Message {\n\tret := _m.ctrl.Call(_m, \"Message\")\n\tret0, _ := ret[0].(*protocol.Message)\n\treturn ret0\n}\n\nfunc (_mr *_MockRequestRecorder) Message() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Message\")\n}\n\nfunc (_m *MockRequest) Subscriber() connector.Subscriber {\n\tret := _m.ctrl.Call(_m, \"Subscriber\")\n\tret0, _ := ret[0].(connector.Subscriber)\n\treturn ret0\n}\n\nfunc (_mr *_MockRequestRecorder) Subscriber() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Subscriber\")\n}\n\n// Mock of Subscriber interface\ntype MockSubscriber struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockSubscriberRecorder\n}\n\n// Recorder for MockSubscriber (not exported)\ntype _MockSubscriberRecorder struct {\n\tmock *MockSubscriber\n}\n\nfunc NewMockSubscriber(ctrl *gomock.Controller) *MockSubscriber {\n\tmock := &MockSubscriber{ctrl: ctrl}\n\tmock.recorder = &_MockSubscriberRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockSubscriber) EXPECT() *_MockSubscriberRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockSubscriber) Cancel() {\n\t_m.ctrl.Call(_m, \"Cancel\")\n}\n\nfunc (_mr *_MockSubscriberRecorder) Cancel() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Cancel\")\n}\n\nfunc (_m *MockSubscriber) Encode() ([]byte, error) {\n\tret := _m.ctrl.Call(_m, \"Encode\")\n\tret0, _ := ret[0].([]byte)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockSubscriberRecorder) Encode() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Encode\")\n}\n\nfunc (_m *MockSubscriber) Filter(_param0 map[string]string) bool {\n\tret := _m.ctrl.Call(_m, \"Filter\", _param0)\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}\n\nfunc (_mr *_MockSubscriberRecorder) Filter(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Filter\", arg0)\n}\n\nfunc (_m *MockSubscriber) Key() string {\n\tret := _m.ctrl.Call(_m, \"Key\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}\n\nfunc (_mr *_MockSubscriberRecorder) Key() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Key\")\n}\n\nfunc (_m *MockSubscriber) Loop(_param0 context.Context, _param1 connector.Queue) error {\n\tret := _m.ctrl.Call(_m, \"Loop\", _param0, _param1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockSubscriberRecorder) Loop(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Loop\", arg0, arg1)\n}\n\nfunc (_m *MockSubscriber) Reset() error {\n\tret := _m.ctrl.Call(_m, \"Reset\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockSubscriberRecorder) Reset() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Reset\")\n}\n\nfunc (_m *MockSubscriber) Route() *router.Route {\n\tret := _m.ctrl.Call(_m, \"Route\")\n\tret0, _ := ret[0].(*router.Route)\n\treturn ret0\n}\n\nfunc (_mr *_MockSubscriberRecorder) Route() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Route\")\n}\n\nfunc (_m *MockSubscriber) SetLastID(_param0 uint64) {\n\t_m.ctrl.Call(_m, \"SetLastID\", _param0)\n}\n\nfunc (_mr *_MockSubscriberRecorder) SetLastID(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"SetLastID\", arg0)\n}\n"
  },
  {
    "path": "server/apns/mocks_kvstore_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/smancke/guble/server/kvstore (interfaces: KVStore)\n\npackage apns\n\nimport (\n\tgomock \"github.com/golang/mock/gomock\"\n)\n\n// Mock of KVStore interface\ntype MockKVStore struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockKVStoreRecorder\n}\n\n// Recorder for MockKVStore (not exported)\ntype _MockKVStoreRecorder struct {\n\tmock *MockKVStore\n}\n\nfunc NewMockKVStore(ctrl *gomock.Controller) *MockKVStore {\n\tmock := &MockKVStore{ctrl: ctrl}\n\tmock.recorder = &_MockKVStoreRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockKVStore) EXPECT() *_MockKVStoreRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockKVStore) Delete(_param0 string, _param1 string) error {\n\tret := _m.ctrl.Call(_m, \"Delete\", _param0, _param1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockKVStoreRecorder) Delete(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Delete\", arg0, arg1)\n}\n\nfunc (_m *MockKVStore) Get(_param0 string, _param1 string) ([]byte, bool, error) {\n\tret := _m.ctrl.Call(_m, \"Get\", _param0, _param1)\n\tret0, _ := ret[0].([]byte)\n\tret1, _ := ret[1].(bool)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}\n\nfunc (_mr *_MockKVStoreRecorder) Get(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Get\", arg0, arg1)\n}\n\nfunc (_m *MockKVStore) Iterate(_param0 string, _param1 string) chan [2]string {\n\tret := _m.ctrl.Call(_m, \"Iterate\", _param0, _param1)\n\tret0, _ := ret[0].(chan [2]string)\n\treturn ret0\n}\n\nfunc (_mr *_MockKVStoreRecorder) Iterate(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Iterate\", arg0, arg1)\n}\n\nfunc (_m *MockKVStore) IterateKeys(_param0 string, _param1 string) chan string {\n\tret := _m.ctrl.Call(_m, \"IterateKeys\", _param0, _param1)\n\tret0, _ := ret[0].(chan string)\n\treturn ret0\n}\n\nfunc (_mr *_MockKVStoreRecorder) IterateKeys(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"IterateKeys\", arg0, arg1)\n}\n\nfunc (_m *MockKVStore) Put(_param0 string, _param1 string, _param2 []byte) error {\n\tret := _m.ctrl.Call(_m, \"Put\", _param0, _param1, _param2)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockKVStoreRecorder) Put(arg0, arg1, arg2 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Put\", arg0, arg1, arg2)\n}\n"
  },
  {
    "path": "server/apns/mocks_pusher_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/smancke/guble/server/apns (interfaces: Pusher)\n\npackage apns\n\nimport (\n\tgomock \"github.com/golang/mock/gomock\"\n\tapns2 \"github.com/sideshow/apns2\"\n)\n\n// Mock of Pusher interface\ntype MockPusher struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockPusherRecorder\n}\n\n// Recorder for MockPusher (not exported)\ntype _MockPusherRecorder struct {\n\tmock *MockPusher\n}\n\nfunc NewMockPusher(ctrl *gomock.Controller) *MockPusher {\n\tmock := &MockPusher{ctrl: ctrl}\n\tmock.recorder = &_MockPusherRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockPusher) EXPECT() *_MockPusherRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockPusher) Push(_param0 *apns2.Notification) (*apns2.Response, error) {\n\tret := _m.ctrl.Call(_m, \"Push\", _param0)\n\tret0, _ := ret[0].(*apns2.Response)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockPusherRecorder) Push(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Push\", arg0)\n}\n"
  },
  {
    "path": "server/apns/mocks_router_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/smancke/guble/server/router (interfaces: Router)\n\npackage apns\n\nimport (\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/auth\"\n\t\"github.com/smancke/guble/server/cluster\"\n\t\"github.com/smancke/guble/server/kvstore\"\n\t\"github.com/smancke/guble/server/router\"\n\t\"github.com/smancke/guble/server/store\"\n)\n\n// Mock of Router interface\ntype MockRouter struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockRouterRecorder\n}\n\n// Recorder for MockRouter (not exported)\ntype _MockRouterRecorder struct {\n\tmock *MockRouter\n}\n\nfunc NewMockRouter(ctrl *gomock.Controller) *MockRouter {\n\tmock := &MockRouter{ctrl: ctrl}\n\tmock.recorder = &_MockRouterRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockRouter) EXPECT() *_MockRouterRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockRouter) AccessManager() (auth.AccessManager, error) {\n\tret := _m.ctrl.Call(_m, \"AccessManager\")\n\tret0, _ := ret[0].(auth.AccessManager)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) AccessManager() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"AccessManager\")\n}\n\nfunc (_m *MockRouter) Cluster() *cluster.Cluster {\n\tret := _m.ctrl.Call(_m, \"Cluster\")\n\tret0, _ := ret[0].(*cluster.Cluster)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) Cluster() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Cluster\")\n}\n\nfunc (_m *MockRouter) Done() <-chan bool {\n\tret := _m.ctrl.Call(_m, \"Done\")\n\tret0, _ := ret[0].(<-chan bool)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) Done() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Done\")\n}\n\nfunc (_m *MockRouter) Fetch(_param0 *store.FetchRequest) error {\n\tret := _m.ctrl.Call(_m, \"Fetch\", _param0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) Fetch(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Fetch\", arg0)\n}\n\nfunc (_m *MockRouter) GetSubscribers(_param0 string) ([]byte, error) {\n\tret := _m.ctrl.Call(_m, \"GetSubscribers\", _param0)\n\tret0, _ := ret[0].([]byte)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) GetSubscribers(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"GetSubscribers\", arg0)\n}\n\nfunc (_m *MockRouter) HandleMessage(_param0 *protocol.Message) error {\n\tret := _m.ctrl.Call(_m, \"HandleMessage\", _param0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) HandleMessage(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"HandleMessage\", arg0)\n}\n\nfunc (_m *MockRouter) KVStore() (kvstore.KVStore, error) {\n\tret := _m.ctrl.Call(_m, \"KVStore\")\n\tret0, _ := ret[0].(kvstore.KVStore)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) KVStore() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"KVStore\")\n}\n\nfunc (_m *MockRouter) MessageStore() (store.MessageStore, error) {\n\tret := _m.ctrl.Call(_m, \"MessageStore\")\n\tret0, _ := ret[0].(store.MessageStore)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) MessageStore() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"MessageStore\")\n}\n\nfunc (_m *MockRouter) Subscribe(_param0 *router.Route) (*router.Route, error) {\n\tret := _m.ctrl.Call(_m, \"Subscribe\", _param0)\n\tret0, _ := ret[0].(*router.Route)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) Subscribe(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Subscribe\", arg0)\n}\n\nfunc (_m *MockRouter) Unsubscribe(_param0 *router.Route) {\n\t_m.ctrl.Call(_m, \"Unsubscribe\", _param0)\n}\n\nfunc (_mr *_MockRouterRecorder) Unsubscribe(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Unsubscribe\", arg0)\n}\n"
  },
  {
    "path": "server/auth/accessmanager.go",
    "content": "package auth\n\nimport (\n\t\"github.com/smancke/guble/protocol\"\n)\n\n// AccessType permission required by the user\ntype AccessType int\n\nconst (\n\t// READ permission\n\tREAD AccessType = iota\n\n\t// WRITE permission\n\tWRITE\n)\n\n// AccessManager interface allows to provide a custom authentication mechanism\ntype AccessManager interface {\n\tIsAllowed(accessType AccessType, userID string, path protocol.Path) bool\n}\n"
  },
  {
    "path": "server/auth/accessmanager_test.go",
    "content": "package auth\n\nimport (\n\t\"github.com/stretchr/testify/assert\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"testing\"\n)\n\nfunc Test_AllowAllAccessManager(t *testing.T) {\n\ta := assert.New(t)\n\tam := AccessManager(NewAllowAllAccessManager(true))\n\ta.True(am.IsAllowed(READ, \"userid\", \"/path\"))\n\n\tam = AccessManager(NewAllowAllAccessManager(false))\n\ta.False(am.IsAllowed(READ, \"userid\", \"/path\"))\n}\n\nfunc Test_RestAccessManagerAllowed(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Write([]byte(\"true\"))\n\t}))\n\n\tdefer ts.Close()\n\ta := assert.New(t)\n\tam := NewRestAccessManager(ts.URL)\n\ta.True(am.IsAllowed(READ, \"foo\", \"/foo\"))\n\ta.True(am.IsAllowed(WRITE, \"foo\", \"/foo\"))\n}\n\nfunc Test_RestAccessManagerNotAllowed(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Write([]byte(\"false\"))\n\t}))\n\n\tdefer ts.Close()\n\tam := NewRestAccessManager(ts.URL)\n\ta := assert.New(t)\n\ta.False(am.IsAllowed(READ, \"user\", \"/foo\"))\n}\n\nfunc Test_RestAccessManagerNotAllowedWithServerNotStarted(t *testing.T) {\n\tts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Write([]byte(\"false\"))\n\t}))\n\n\tdefer ts.Close()\n\tam := NewRestAccessManager(ts.URL)\n\ta := assert.New(t)\n\ta.False(am.IsAllowed(READ, \"user\", \"/foo\"))\n}\n\nfunc Test_RestAccessManagerNotAllowedHttpReturningStatusForbidden(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.WriteHeader(http.StatusForbidden)\n\t}))\n\n\tdefer ts.Close()\n\ta := assert.New(t)\n\tam := NewRestAccessManager(ts.URL)\n\ta.False(am.IsAllowed(READ, \"foo\", \"/foo\"))\n\ta.False(am.IsAllowed(WRITE, \"foo\", \"/foo\"))\n}\n"
  },
  {
    "path": "server/auth/allow_all_accessmanager.go",
    "content": "package auth\n\nimport (\n\t\"github.com/smancke/guble/protocol\"\n)\n\n//AllowAllAccessManager is a dummy implementation that grants access for everything.\ntype AllowAllAccessManager bool\n\n//NewAllowAllAccessManager returns a new AllowAllAccessManager (depending on the passed parameter, always true or always false)\nfunc NewAllowAllAccessManager(allowAll bool) AllowAllAccessManager {\n\treturn AllowAllAccessManager(allowAll)\n}\n\n//IsAllowed returns always the same value, given at construction time (true or false).\nfunc (am AllowAllAccessManager) IsAllowed(accessType AccessType, userID string, path protocol.Path) bool {\n\treturn bool(am)\n}\n"
  },
  {
    "path": "server/auth/logger.go",
    "content": "package auth\n\nimport (\n\tlog \"github.com/Sirupsen/logrus\"\n)\n\nvar logger = log.WithFields(log.Fields{\n\t\"module\": \"accessManager\",\n})\n"
  },
  {
    "path": "server/auth/mocks_auth_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/smancke/guble/server/auth (interfaces: AccessManager)\n\npackage auth\n\nimport (\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/smancke/guble/protocol\"\n)\n\n// Mock of AccessManager interface\ntype MockAccessManager struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockAccessManagerRecorder\n}\n\n// Recorder for MockAccessManager (not exported)\ntype _MockAccessManagerRecorder struct {\n\tmock *MockAccessManager\n}\n\nfunc NewMockAccessManager(ctrl *gomock.Controller) *MockAccessManager {\n\tmock := &MockAccessManager{ctrl: ctrl}\n\tmock.recorder = &_MockAccessManagerRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockAccessManager) EXPECT() *_MockAccessManagerRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockAccessManager) IsAllowed(_param0 AccessType, _param1 string, _param2 protocol.Path) bool {\n\tret := _m.ctrl.Call(_m, \"IsAllowed\", _param0, _param1, _param2)\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}\n\nfunc (_mr *_MockAccessManagerRecorder) IsAllowed(arg0, arg1, arg2 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"IsAllowed\", arg0, arg1, arg2)\n}\n"
  },
  {
    "path": "server/auth/rest_accessmanager.go",
    "content": "package auth\n\nimport (\n\t\"github.com/smancke/guble/protocol\"\n\n\tlog \"github.com/Sirupsen/logrus\"\n\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"net/url\"\n)\n\n// RestAccessManager is a url for which the access is allowed or not.\ntype RestAccessManager string\n\n// NewRestAccessManager returns a new RestAccessManager.\nfunc NewRestAccessManager(url string) RestAccessManager {\n\treturn RestAccessManager(url)\n}\n\n// IsAllowed is an implementation of the AccessManager interface.\n// The boolean result is based on matching between the desired AccessType, the userId and the path.\nfunc (ram RestAccessManager) IsAllowed(accessType AccessType, userId string, path protocol.Path) bool {\n\n\tu, _ := url.Parse(string(ram))\n\tq := u.Query()\n\tif accessType == READ {\n\t\tq.Set(\"type\", \"read\")\n\t} else {\n\t\tq.Set(\"type\", \"write\")\n\t}\n\n\tq.Set(\"userId\", userId)\n\tq.Set(\"path\", string(path))\n\n\tresp, err := http.DefaultClient.Get(u.String())\n\n\tif err != nil {\n\t\tlogger.WithError(err).WithField(\"module\", \"RestAccessManager\").Warn(\"Write message failed\")\n\t\treturn false\n\t}\n\tdefer resp.Body.Close()\n\tresponseBody, err := ioutil.ReadAll(resp.Body)\n\n\tif err != nil || resp.StatusCode != 200 {\n\t\tlogger.WithError(err).WithField(\"httpCode\", resp.StatusCode).Info(\"Error getting permission\")\n\t\tlogger.WithField(\"responseBody\", responseBody).Debug(\"HTTP Response Body\")\n\t\treturn false\n\t}\n\tlogger.WithFields(log.Fields{\n\t\t\"access_type\":  accessType,\n\t\t\"userId\":       userId,\n\t\t\"path\":         path,\n\t\t\"responseBody\": string(responseBody),\n\t}).Debug(\"Access allowed\")\n\treturn \"true\" == string(responseBody)\n}\n"
  },
  {
    "path": "server/benchmarking_apns_test.go",
    "content": "package server\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\tlog \"github.com/Sirupsen/logrus\"\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/sideshow/apns2\"\n\t\"github.com/smancke/guble/client\"\n\t\"github.com/smancke/guble/server/apns\"\n\t\"github.com/smancke/guble/server/connector\"\n\t\"github.com/smancke/guble/server/router\"\n\t\"github.com/smancke/guble/server/websocket\"\n\t\"github.com/smancke/guble/testutil\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\n// APNS benchmarks\nfunc BenchmarkAPNS_1Workers50MilliTimeout(b *testing.B) {\n\tparams := &benchParams{\n\t\tB:             b,\n\t\tworkers:       1,\n\t\tsubscriptions: 8,\n\t\ttimeout:       50 * time.Millisecond,\n\t\tclients:       8,\n\t\tsender:        sendMessageSample,\n\t}\n\tparams.throughputAPNS()\n\tfmt.Println(params)\n}\n\nfunc BenchmarkAPNS_8Workers50MilliTimeout(b *testing.B) {\n\tparams := &benchParams{\n\t\tB:             b,\n\t\tworkers:       8,\n\t\tsubscriptions: 8,\n\t\ttimeout:       50 * time.Millisecond,\n\t\tclients:       8,\n\t\tsender:        sendMessageSample,\n\t}\n\tparams.throughputAPNS()\n\tfmt.Println(params)\n}\n\nfunc BenchmarkAPNS_16Workers50MilliTimeout(b *testing.B) {\n\tparams := &benchParams{\n\t\tB:             b,\n\t\tworkers:       16,\n\t\tsubscriptions: 8,\n\t\ttimeout:       50 * time.Millisecond,\n\t\tclients:       8,\n\t\tsender:        sendMessageSample,\n\t}\n\tparams.throughputAPNS()\n\tfmt.Println(params)\n}\n\nfunc BenchmarkAPNS_1Workers100MilliTimeout(b *testing.B) {\n\tparams := &benchParams{\n\t\tB:             b,\n\t\tworkers:       1,\n\t\tsubscriptions: 8,\n\t\ttimeout:       100 * time.Millisecond,\n\t\tclients:       8,\n\t\tsender:        sendMessageSample,\n\t}\n\tparams.throughputAPNS()\n\tfmt.Println(params)\n}\n\nfunc BenchmarkAPNS_8Workers100MilliTimeout(b *testing.B) {\n\tparams := &benchParams{\n\t\tB:             b,\n\t\tworkers:       8,\n\t\tsubscriptions: 8,\n\t\ttimeout:       100 * time.Millisecond,\n\t\tclients:       8,\n\t\tsender:        sendMessageSample,\n\t}\n\tparams.throughputAPNS()\n\tfmt.Println(params)\n}\n\nfunc BenchmarkAPNS_16Workers100MilliTimeout(b *testing.B) {\n\tparams := &benchParams{\n\t\tB:             b,\n\t\tworkers:       16,\n\t\tsubscriptions: 8,\n\t\ttimeout:       100 * time.Millisecond,\n\t\tclients:       8,\n\t\tsender:        sendMessageSample,\n\t}\n\tparams.throughputAPNS()\n\tfmt.Println(params)\n}\n\nfunc (params *benchParams) throughputAPNS() {\n\tdefer testutil.EnableDebugForMethod()()\n\t_, finish := testutil.NewMockBenchmarkCtrl(params.B)\n\tdefer finish()\n\tdefer testutil.ResetDefaultRegistryHealthCheck()\n\ta := assert.New(params)\n\n\tdir, errTempDir := ioutil.TempDir(\"\", \"guble_benchmarking_apns_test\")\n\ta.NoError(errTempDir)\n\n\t*Config.HttpListen = \"localhost:0\"\n\t*Config.KVS = \"memory\"\n\t*Config.MS = \"file\"\n\t*Config.StoragePath = dir\n\t*Config.APNS.Enabled = true\n\t*Config.APNS.AppTopic = \"app.topic\"\n\t*Config.APNS.Prefix = \"/apns/\"\n\n\tparams.receiveC = make(chan bool)\n\tCreateModules = createModulesWebsocketAndMockAPNSPusher(params.receiveC, params.timeout)\n\n\tparams.service = StartService()\n\n\tvar apnsConn connector.ResponsiveConnector\n\tvar ok bool\n\tfor _, iface := range params.service.ModulesSortedByStartOrder() {\n\t\tapnsConn, ok = iface.(connector.ResponsiveConnector)\n\t\tif ok {\n\t\t\tbreak\n\t\t}\n\t}\n\tif apnsConn == nil {\n\t\ta.FailNow(\"There should be a module of type: APNS Connector\")\n\t}\n\n\turlFormat := fmt.Sprintf(\"http://%s/apns/apns-%%d/%%d/%%s\", params.service.WebServer().GetAddr())\n\tfor i := 1; i <= params.subscriptions; i++ {\n\t\t// create APNS subscription\n\t\tresponse, errPost := http.Post(\n\t\t\tfmt.Sprintf(urlFormat, i, i, strings.TrimPrefix(testTopic, \"/\")),\n\t\t\t\"text/plain\",\n\t\t\tbytes.NewBufferString(\"\"),\n\t\t)\n\t\ta.NoError(errPost)\n\t\ta.Equal(response.StatusCode, 200)\n\n\t\tbody, errReadAll := ioutil.ReadAll(response.Body)\n\t\ta.NoError(errReadAll)\n\t\ta.Equal(\"{\\\"subscribed\\\":\\\"/topic\\\"}\", string(body))\n\t}\n\n\tclients := params.createClients()\n\n\t// Report allocations also\n\tparams.ReportAllocs()\n\n\texpectedMessagesNumber := params.N * params.clients * params.subscriptions\n\tlogger.WithFields(log.Fields{\n\t\t\"expectedMessagesNumber\": expectedMessagesNumber,\n\t\t\"b.N\": params.N,\n\t}).Info(\"Expecting messages\")\n\tparams.wg.Add(expectedMessagesNumber)\n\n\t// start the receive loop (a select on receiveC and doneC)\n\tparams.doneC = make(chan struct{})\n\tparams.receiveLoop()\n\n\tparams.ResetTimer()\n\n\t// send all messages, or fail on any error\n\tfor _, cl := range clients {\n\t\tgo func(cl client.Client) {\n\t\t\tfor i := 0; i < params.N; i++ {\n\t\t\t\terr := params.sender(cl)\n\t\t\t\tif err != nil {\n\t\t\t\t\ta.FailNow(\"Message could not be sent\")\n\t\t\t\t}\n\t\t\t\tparams.sent++\n\t\t\t}\n\t\t}(cl)\n\t}\n\n\t// wait to receive all messages\n\tparams.wg.Wait()\n\n\t// stop timer after the actual test\n\tparams.StopTimer()\n\n\tclose(params.doneC)\n\n\ta.NoError(params.service.Stop())\n\tparams.service = nil\n\tclose(params.receiveC)\n\terrRemove := os.RemoveAll(dir)\n\tif errRemove != nil {\n\t\tlogger.WithError(errRemove).WithField(\"module\", \"testing\").Error(\"Could not remove directory\")\n\t}\n}\n\nvar createModulesWebsocketAndMockAPNSPusher = func(receiveC chan bool, simulatedLatency time.Duration) func(router router.Router) []interface{} {\n\treturn func(router router.Router) []interface{} {\n\t\tvar modules []interface{}\n\n\t\tif wsHandler, err := websocket.NewWSHandler(router, \"/stream/\"); err != nil {\n\t\t\tlogger.WithError(err).Error(\"Error loading WSHandler module\")\n\t\t} else {\n\t\t\tmodules = append(modules, wsHandler)\n\t\t}\n\n\t\tif *Config.APNS.Enabled {\n\t\t\tif *Config.APNS.AppTopic == \"\" {\n\t\t\t\tlogger.Panic(\"The Mobile App Topic (usually the bundle-id) has to be provided when APNS is enabled\")\n\t\t\t}\n\n\t\t\t// create and use a mock Pusher - introducing a latency per each message\n\t\t\trsp := &apns2.Response{\n\t\t\t\tApnsID:     \"apns-id\",\n\t\t\t\tStatusCode: 200,\n\t\t\t}\n\t\t\tmPusher := NewMockPusher(testutil.MockCtrl)\n\t\t\tmPusher.EXPECT().Push(gomock.Any()).\n\t\t\t\tDo(func(notif *apns2.Notification) (*apns2.Response, error) {\n\t\t\t\t\ttime.Sleep(simulatedLatency)\n\t\t\t\t\treceiveC <- true\n\t\t\t\t\treturn nil, nil\n\t\t\t\t}).Return(rsp, nil).AnyTimes()\n\n\t\t\tapnsSender, err := apns.NewSenderUsingPusher(mPusher, *Config.APNS.AppTopic)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Panic(\"APNS Sender could not be created\")\n\t\t\t}\n\t\t\tif apnsConn, err := apns.New(router, apnsSender, Config.APNS); err != nil {\n\t\t\t\tlogger.WithError(err).Error(\"Error creating APNS connector\")\n\t\t\t} else {\n\t\t\t\tmodules = append(modules, apnsConn)\n\t\t\t}\n\t\t} else {\n\t\t\tlogger.Info(\"APNS: disabled\")\n\t\t}\n\n\t\treturn modules\n\t}\n}\n"
  },
  {
    "path": "server/benchmarking_common_test.go",
    "content": "package server\n\nimport (\n\t\"fmt\"\n\t\"github.com/smancke/guble/client\"\n\t\"github.com/smancke/guble/server/service\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"strconv\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\nconst (\n\ttestTopic = \"/topic\"\n)\n\ntype sender func(c client.Client) error\n\nfunc sendMessageSample(c client.Client) error {\n\treturn c.Send(testTopic, \"test-body\", \"{id:id}\")\n}\n\ntype benchParams struct {\n\t*testing.B\n\tworkers       int           // number of workers\n\tsubscriptions int           // number of subscriptions listening on the topic\n\ttimeout       time.Duration // timeout response\n\tclients       int           // number of clients\n\tsender        sender        // the function that will send the messages\n\tsent          int           // sent messages\n\treceived      int           // received messages\n\n\tservice  *service.Service\n\treceiveC chan bool\n\tdoneC    chan struct{}\n\n\twg    sync.WaitGroup\n\tstart time.Time\n\tend   time.Time\n}\n\nfunc (params *benchParams) createClients() (clients []client.Client) {\n\twsURL := \"ws://\" + params.service.WebServer().GetAddr() + \"/stream/user/\"\n\tfor clientID := 0; clientID < params.clients; clientID++ {\n\t\tlocation := wsURL + strconv.Itoa(clientID)\n\t\tc, err := client.Open(location, \"http://localhost/\", 1000, true)\n\t\tif err != nil {\n\t\t\tassert.FailNow(params, \"guble client could not connect to server\")\n\t\t}\n\t\tclients = append(clients, c)\n\t}\n\treturn\n}\n\nfunc (params *benchParams) receiveLoop() {\n\tfor i := 0; i <= params.workers; i++ {\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-params.receiveC:\n\t\t\t\t\tparams.received++\n\t\t\t\t\tlogger.WithField(\"received\", params.received).Debug(\"Received a call\")\n\t\t\t\t\tparams.wg.Done()\n\t\t\t\tcase <-params.doneC:\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc (params *benchParams) String() string {\n\treturn fmt.Sprintf(`\n\t\tThroughput %.2f messages/second using:\n\t\t\t%d workers\n\t\t\t%d subscriptions\n\t\t\t%s response timeout\n\t\t\t%d clients\n\t`, params.messagesPerSecond(), params.workers, params.subscriptions, params.timeout, params.clients)\n}\n\nfunc (params *benchParams) ResetTimer() {\n\tparams.start = time.Now()\n\tparams.B.ResetTimer()\n}\n\nfunc (params *benchParams) StopTimer() {\n\tparams.end = time.Now()\n\tparams.B.StopTimer()\n}\n\nfunc (params *benchParams) duration() time.Duration {\n\treturn params.end.Sub(params.start)\n}\n\nfunc (params *benchParams) messagesPerSecond() float64 {\n\treturn float64(params.received) / params.duration().Seconds()\n}\n"
  },
  {
    "path": "server/benchmarking_fcm_test.go",
    "content": "package server\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\tlog \"github.com/Sirupsen/logrus\"\n\t\"github.com/smancke/guble/client\"\n\t\"github.com/smancke/guble/server/connector\"\n\t\"github.com/smancke/guble/server/fcm\"\n\t\"github.com/smancke/guble/testutil\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\n// FCM benchmarks\n// Default number of clients and subscriptions are 8, for tests that do not\n// specify this in their name\nfunc BenchmarkFCM_1Workers50MilliTimeout(b *testing.B) {\n\tparams := &benchParams{\n\t\tB:             b,\n\t\tworkers:       1,\n\t\tsubscriptions: 8,\n\t\ttimeout:       50 * time.Millisecond,\n\t\tclients:       8,\n\t\tsender:        sendMessageSample,\n\t}\n\tparams.throughputFCM()\n\tfmt.Println(params)\n}\n\nfunc BenchmarkFCM_8Workers50MilliTimeout(b *testing.B) {\n\tparams := &benchParams{\n\t\tB:             b,\n\t\tworkers:       8,\n\t\tsubscriptions: 8,\n\t\ttimeout:       50 * time.Millisecond,\n\t\tclients:       8,\n\t\tsender:        sendMessageSample,\n\t}\n\tparams.throughputFCM()\n\tfmt.Println(params)\n}\n\nfunc BenchmarkFCM_16Workers50MilliTimeout(b *testing.B) {\n\tparams := &benchParams{\n\t\tB:             b,\n\t\tworkers:       16,\n\t\tsubscriptions: 8,\n\t\ttimeout:       50 * time.Millisecond,\n\t\tclients:       8,\n\t\tsender:        sendMessageSample,\n\t}\n\tparams.throughputFCM()\n\tfmt.Println(params)\n}\n\nfunc BenchmarkFCM_1Workers100MilliTimeout(b *testing.B) {\n\tparams := &benchParams{\n\t\tB:             b,\n\t\tworkers:       1,\n\t\tsubscriptions: 8,\n\t\ttimeout:       100 * time.Millisecond,\n\t\tclients:       8,\n\t\tsender:        sendMessageSample,\n\t}\n\tparams.throughputFCM()\n\tfmt.Println(params)\n}\n\nfunc BenchmarkFCM_8Workers100MilliTimeout(b *testing.B) {\n\tparams := &benchParams{\n\t\tB:             b,\n\t\tworkers:       8,\n\t\tsubscriptions: 8,\n\t\ttimeout:       100 * time.Millisecond,\n\t\tclients:       8,\n\t\tsender:        sendMessageSample,\n\t}\n\tparams.throughputFCM()\n\tfmt.Println(params)\n}\n\nfunc BenchmarkFCM_16Workers100MilliTimeout(b *testing.B) {\n\tparams := &benchParams{\n\t\tB:             b,\n\t\tworkers:       16,\n\t\tsubscriptions: 8,\n\t\ttimeout:       100 * time.Millisecond,\n\t\tclients:       8,\n\t\tsender:        sendMessageSample,\n\t}\n\tparams.throughputFCM()\n\tfmt.Println(params)\n}\n\nfunc (params *benchParams) throughputFCM() {\n\tdefer testutil.ResetDefaultRegistryHealthCheck()\n\ta := assert.New(params)\n\n\tdir, errTempDir := ioutil.TempDir(\"\", \"guble_benchmarking_fcm_test\")\n\ta.NoError(errTempDir)\n\n\t*Config.HttpListen = \"localhost:0\"\n\t*Config.KVS = \"memory\"\n\t*Config.MS = \"file\"\n\t*Config.StoragePath = dir\n\t*Config.FCM.Enabled = true\n\t*Config.FCM.APIKey = \"WILL BE OVERWRITTEN\"\n\t*Config.FCM.Workers = params.workers\n\n\tparams.service = StartService()\n\n\tvar fcmConn connector.ResponsiveConnector\n\tvar ok bool\n\tfor _, iface := range params.service.ModulesSortedByStartOrder() {\n\t\tfcmConn, ok = iface.(connector.ResponsiveConnector)\n\t\tif ok {\n\t\t\tbreak\n\t\t}\n\t}\n\tif fcmConn == nil {\n\t\ta.FailNow(\"There should be a module of type: FCM Connector\")\n\t}\n\n\tparams.receiveC = make(chan bool)\n\tsender, err := fcm.CreateFcmSender(fcm.SuccessFCMResponse, params.receiveC, params.timeout)\n\ta.NoError(err)\n\tfcmConn.SetSender(sender)\n\n\turlFormat := fmt.Sprintf(\"http://%s/fcm/%%d/gcmId%%d/subscribe/%%s\", params.service.WebServer().GetAddr())\n\tfor i := 1; i <= params.subscriptions; i++ {\n\t\t// create FCM subscription\n\t\tresponse, errPost := http.Post(\n\t\t\tfmt.Sprintf(urlFormat, i, i, strings.TrimPrefix(testTopic, \"/\")),\n\t\t\t\"text/plain\",\n\t\t\tbytes.NewBufferString(\"\"),\n\t\t)\n\t\ta.NoError(errPost)\n\t\ta.Equal(response.StatusCode, 200)\n\n\t\tbody, errReadAll := ioutil.ReadAll(response.Body)\n\t\ta.NoError(errReadAll)\n\t\ta.Equal(\"{\\\"subscribed\\\":\\\"/topic\\\"}\", string(body))\n\t}\n\n\tclients := params.createClients()\n\n\t// Report allocations also\n\tparams.ReportAllocs()\n\n\texpectedMessagesNumber := params.N * params.clients * params.subscriptions\n\tlogger.WithFields(log.Fields{\n\t\t\"expectedMessagesNumber\": expectedMessagesNumber,\n\t\t\"N\": params.N,\n\t}).Info(\"Expecting messages\")\n\tparams.wg.Add(expectedMessagesNumber)\n\n\t// start the receive loop (a select on receiveC and doneC)\n\tparams.doneC = make(chan struct{})\n\tparams.receiveLoop()\n\n\tparams.ResetTimer()\n\n\t// send all messages, or fail on any error\n\tfor _, cl := range clients {\n\t\tgo func(cl client.Client) {\n\t\t\tfor i := 0; i < params.N; i++ {\n\t\t\t\terr := params.sender(cl)\n\t\t\t\tif err != nil {\n\t\t\t\t\ta.FailNow(\"Message could not be sent\")\n\t\t\t\t}\n\t\t\t\tparams.sent++\n\t\t\t}\n\t\t}(cl)\n\t}\n\n\t// wait to receive all messages\n\tparams.wg.Wait()\n\n\t// stop timer after the actual test\n\tparams.StopTimer()\n\n\tclose(params.doneC)\n\ta.NoError(params.service.Stop())\n\tparams.service = nil\n\tclose(params.receiveC)\n\terrRemove := os.RemoveAll(dir)\n\tif errRemove != nil {\n\t\tlogger.WithError(errRemove).WithField(\"module\", \"testing\").Error(\"Could not remove directory\")\n\t}\n}\n"
  },
  {
    "path": "server/benchmarking_fetch_test.go",
    "content": "package server\n\nimport (\n\t\"github.com/smancke/guble/client\"\n\t\"github.com/smancke/guble/testutil\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc Benchmark_E2E_Fetch_HelloWorld_Messages(b *testing.B) {\n\tdefer testutil.ResetDefaultRegistryHealthCheck()\n\n\ta := assert.New(b)\n\tdir, _ := ioutil.TempDir(\"\", \"guble_benchmarking_fetch_test\")\n\tdefer os.RemoveAll(dir)\n\n\t*Config.HttpListen = \"localhost:0\"\n\t*Config.KVS = \"memory\"\n\t*Config.MS = \"file\"\n\t*Config.StoragePath = dir\n\tservice := StartService()\n\tdefer service.Stop()\n\n\ttime.Sleep(time.Millisecond * 10)\n\n\t// fill the topic\n\tlocation := \"ws://\" + service.WebServer().GetAddr() + \"/stream/user/xy\"\n\tc, err := client.Open(location, \"http://localhost/\", 1000, true)\n\ta.NoError(err)\n\n\tfor i := 1; i <= b.N; i++ {\n\t\ta.NoError(c.Send(\"/hello\", fmt.Sprintf(\"Hello %v\", i), \"\"))\n\t\tselect {\n\t\tcase <-c.StatusMessages():\n\t\t\t// wait for, but ignore\n\t\tcase <-time.After(time.Millisecond * 100):\n\t\t\ta.Fail(\"timeout on send notification\")\n\t\t\treturn\n\t\t}\n\t}\n\n\tstart := time.Now()\n\tb.ResetTimer()\n\tc.WriteRawMessage([]byte(\"+ /hello 0 1000000\"))\n\tfor i := 1; i <= b.N; i++ {\n\t\tselect {\n\t\tcase msg := <-c.Messages():\n\t\t\ta.Equal(fmt.Sprintf(\"Hello %v\", i), msg.BodyAsString())\n\t\tcase e := <-c.Errors():\n\t\t\ta.Fail(string(e.Bytes()))\n\t\t\treturn\n\t\tcase <-time.After(time.Second):\n\t\t\ta.Fail(\"timeout on message: \" + strconv.Itoa(i))\n\t\t\treturn\n\t\t}\n\t}\n\tb.StopTimer()\n\n\tend := time.Now()\n\tthroughput := float64(b.N) / end.Sub(start).Seconds()\n\tfmt.Printf(\"\\n\\tThroughput: %v/sec (%v message in %v)\\n\", int(throughput), b.N, end.Sub(start))\n}\n"
  },
  {
    "path": "server/benchmarking_test.go",
    "content": "package server\n\nimport (\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"github.com/smancke/guble/client\"\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/testutil\"\n)\n\ntype testgroup struct {\n\tt                   *testing.T\n\tgroupID             int\n\taddr                string\n\tdone                chan bool\n\tmessagesToSend      int\n\tconsumer, publisher client.Client\n\ttopic               string\n}\n\nfunc newTestgroup(t *testing.T, groupID int, addr string, messagesToSend int) *testgroup {\n\treturn &testgroup{\n\t\tt:              t,\n\t\tgroupID:        groupID,\n\t\taddr:           addr,\n\t\tdone:           make(chan bool),\n\t\tmessagesToSend: messagesToSend,\n\t}\n}\n\nfunc TestThroughput(t *testing.T) {\n\t// TODO: We disabled this test because the receiver implementation of fetching messages\n\t// should be reimplemented according to the new message store\n\ttestutil.SkipIfDisabled(t)\n\ttestutil.SkipIfShort(t)\n\tdefer testutil.ResetDefaultRegistryHealthCheck()\n\n\tdir, _ := ioutil.TempDir(\"\", \"guble_benchmarking_test\")\n\n\t*Config.HttpListen = \"localhost:0\"\n\t*Config.KVS = \"memory\"\n\t*Config.MS = \"file\"\n\t*Config.StoragePath = dir\n\n\tservice := StartService()\n\n\ttestgroupCount := 4\n\tmessagesPerGroup := 100\n\tlog.Printf(\"init the %v testgroups\", testgroupCount)\n\ttestgroups := make([]*testgroup, testgroupCount, testgroupCount)\n\tfor i := range testgroups {\n\t\ttestgroups[i] = newTestgroup(t, i, service.WebServer().GetAddr(), messagesPerGroup)\n\t}\n\n\t// init test\n\tlog.Print(\"init the testgroups\")\n\tfor i := range testgroups {\n\t\ttestgroups[i].Init()\n\t}\n\n\tdefer func() {\n\t\t// cleanup tests\n\t\tlog.Print(\"cleanup the testgroups\")\n\t\tfor i := range testgroups {\n\t\t\ttestgroups[i].Clean()\n\t\t}\n\n\t\tservice.Stop()\n\n\t\tos.RemoveAll(dir)\n\t}()\n\n\t// start test\n\tlog.Print(\"start the testgroups\")\n\tstart := time.Now()\n\tfor i := range testgroups {\n\t\tgo testgroups[i].Start()\n\t}\n\n\tlog.Print(\"wait for finishing\")\n\tfor i, test := range testgroups {\n\t\tselect {\n\t\tcase successFlag := <-test.done:\n\t\t\tif !successFlag {\n\t\t\t\tt.Logf(\"testgroup %v returned with error\", i)\n\t\t\t\tt.FailNow()\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-time.After(time.Second * 20):\n\t\t\tt.Log(\"timeout. testgroups not ready before timeout\")\n\t\t\tt.Fail()\n\t\t\treturn\n\t\t}\n\t}\n\n\tend := time.Now()\n\ttotalMessages := testgroupCount * messagesPerGroup\n\tthroughput := float64(totalMessages) / end.Sub(start).Seconds()\n\tlog.Printf(\"finished! Throughput: %v/sec (%v message in %v)\", int(throughput), totalMessages, end.Sub(start))\n\n\ttime.Sleep(time.Second * 1)\n}\n\nfunc (tg *testgroup) Init() {\n\ttg.topic = fmt.Sprintf(\"/%v-foo\", tg.groupID)\n\tvar err error\n\tlocation := \"ws://\" + tg.addr + \"/stream/user/xy\"\n\t//location := \"ws://gathermon.mancke.net:8080/stream/\"\n\t//location := \"ws://127.0.0.1:8080/stream/\"\n\ttg.consumer, err = client.Open(location, \"http://localhost/\", 10, false)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\ttg.publisher, err = client.Open(location, \"http://localhost/\", 10, false)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\ttg.expectStatusMessage(protocol.SUCCESS_CONNECTED, \"You are connected to the server.\")\n\n\ttg.consumer.Subscribe(tg.topic)\n\ttime.Sleep(time.Millisecond * 1)\n\t//test.expectStatusMessage(protocol.SUCCESS_SUBSCRIBED_TO, test.topic)\n}\n\nfunc (tg *testgroup) expectStatusMessage(name string, arg string) {\n\tselect {\n\tcase notify := <-tg.consumer.StatusMessages():\n\t\tassert.Equal(tg.t, name, notify.Name)\n\t\tassert.Equal(tg.t, arg, notify.Arg)\n\tcase <-time.After(time.Second * 1):\n\t\ttg.t.Logf(\"[%v] no notification of type %s until timeout\", tg.groupID, name)\n\t\ttg.done <- false\n\t\ttg.t.Fail()\n\t\treturn\n\t}\n}\n\nfunc (tg *testgroup) Start() {\n\tgo func() {\n\t\tfor i := 0; i < tg.messagesToSend; i++ {\n\t\t\tbody := fmt.Sprintf(\"Hallo-%d\", i)\n\t\t\ttg.publisher.Send(tg.topic, body, \"\")\n\t\t}\n\t}()\n\n\tfor i := 0; i < tg.messagesToSend; i++ {\n\t\tbody := fmt.Sprintf(\"Hallo-%d\", i)\n\n\t\tselect {\n\t\tcase msg := <-tg.consumer.Messages():\n\t\t\tassert.Equal(tg.t, tg.topic, string(msg.Path))\n\t\t\tif !assert.Equal(tg.t, body, msg.BodyAsString()) {\n\t\t\t\ttg.t.FailNow()\n\t\t\t\ttg.done <- false\n\t\t\t}\n\t\tcase msg := <-tg.consumer.Errors():\n\t\t\ttg.t.Logf(\"[%v] received error: %v\", tg.groupID, msg)\n\t\t\ttg.done <- false\n\t\t\ttg.t.Fail()\n\t\t\treturn\n\t\tcase <-time.After(time.Second * 5):\n\t\t\ttg.t.Logf(\"[%v] no message received until timeout, expected message %v\", tg.groupID, i)\n\t\t\ttg.done <- false\n\t\t\ttg.t.Fail()\n\t\t\treturn\n\t\t}\n\t}\n\ttg.done <- true\n}\n\nfunc (tg *testgroup) Clean() {\n\ttg.consumer.Close()\n\ttg.publisher.Close()\n}\n"
  },
  {
    "path": "server/cluster/cluster.go",
    "content": "package cluster\n\nimport (\n\t\"io/ioutil\"\n\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/store\"\n\n\tlog \"github.com/Sirupsen/logrus\"\n\t\"github.com/hashicorp/memberlist\"\n\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"strconv\"\n)\n\nvar (\n\tErrNodeNotFound = errors.New(\"Node not found.\")\n)\n\n// Config is a struct used by the local node when creating and running the guble cluster\ntype Config struct {\n\tID                   uint8\n\tHost                 string\n\tPort                 int\n\tRemotes              []*net.TCPAddr\n\tHealthScoreThreshold int\n}\n\n// router interface specify only the methods we require in cluster from the Router\n// router is an interface used for handling messages in cluster.\n// It is logically connected to the router.Router interface, by reusing the same func signature.\ntype router interface {\n\tHandleMessage(message *protocol.Message) error\n\tMessageStore() (store.MessageStore, error)\n}\n\n// Cluster is a struct for managing the `local view` of the guble cluster, as seen by a node.\ntype Cluster struct {\n\t// Pointer to a Config struct, based on which the Cluster node is created and runs.\n\tConfig *Config\n\n\t// Router is used for dispatching messages received by this node.\n\t// Should be set after the node is created with New(), and before Start().\n\tRouter router\n\n\tname       string\n\tmemberlist *memberlist.Memberlist\n\tbroadcasts [][]byte\n\n\tnumJoins   int\n\tnumLeaves  int\n\tnumUpdates int\n\n\tsynchronizer *synchronizer\n}\n\n//New returns a new instance of the cluster, created using the given Config.\nfunc New(config *Config) (*Cluster, error) {\n\tc := &Cluster{\n\t\tConfig: config,\n\t\tname:   fmt.Sprintf(\"%d\", config.ID),\n\t}\n\n\tmemberlistConfig := memberlist.DefaultLANConfig()\n\tmemberlistConfig.Name = c.name\n\tmemberlistConfig.BindAddr = config.Host\n\tmemberlistConfig.BindPort = config.Port\n\n\t//TODO Cosmin temporarily disabling any logging from memberlist, we might want to enable it again using logrus?\n\tmemberlistConfig.LogOutput = ioutil.Discard\n\n\tml, err := memberlist.Create(memberlistConfig)\n\tif err != nil {\n\t\tlogger.WithField(\"error\", err).Error(\"Error when creating the internal memberlist of the cluster\")\n\t\treturn nil, err\n\t}\n\tc.memberlist = ml\n\tmemberlistConfig.Delegate = c\n\tmemberlistConfig.Conflict = c\n\tmemberlistConfig.Events = c\n\n\treturn c, nil\n}\n\n// Start the cluster module.\nfunc (cluster *Cluster) Start() error {\n\tlogger.WithField(\"remotes\", cluster.Config.Remotes).Debug(\"Starting Cluster\")\n\n\tif cluster.Router == nil {\n\t\terrorMessage := \"There should be a valid Router already set-up\"\n\t\tlogger.Error(errorMessage)\n\t\treturn errors.New(errorMessage)\n\t}\n\n\tsynchronizer, err := newSynchronizer(cluster)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"Error creating cluster synchronizer\")\n\t\treturn err\n\t}\n\tcluster.synchronizer = synchronizer\n\n\tnum, err := cluster.memberlist.Join(cluster.remotesAsStrings())\n\tif err != nil {\n\t\tlogger.WithField(\"error\", err).Error(\"Error when this node wanted to join the cluster\")\n\t\treturn err\n\t}\n\tif num == 0 {\n\t\terrorMessage := \"No remote hosts were successfully contacted when this node wanted to join the cluster\"\n\t\tlogger.WithField(\"remotes\", cluster.remotesAsStrings()).Error(errorMessage)\n\t\treturn errors.New(errorMessage)\n\t}\n\n\tlogger.Debug(\"Started Cluster\")\n\n\treturn nil\n}\n\n// Stop the cluster module.\nfunc (cluster *Cluster) Stop() error {\n\tif cluster.synchronizer != nil {\n\t\tclose(cluster.synchronizer.stopC)\n\t}\n\treturn cluster.memberlist.Shutdown()\n}\n\n// Check returns a non-nil error if the health status of the cluster (as seen by this node) is not perfect.\nfunc (cluster *Cluster) Check() error {\n\tif healthScore := cluster.memberlist.GetHealthScore(); healthScore > cluster.Config.HealthScoreThreshold {\n\t\terrorMessage := \"Cluster Health Score is not perfect\"\n\t\tlogger.WithField(\"healthScore\", healthScore).Error(errorMessage)\n\t\treturn errors.New(errorMessage)\n\t}\n\treturn nil\n}\n\n// newMessage returns a *message to be used in broadcasting or sending to a node\nfunc (cluster *Cluster) newMessage(t messageType, body []byte) *message {\n\treturn &message{\n\t\tNodeID: cluster.Config.ID,\n\t\tType:   t,\n\t\tBody:   body,\n\t}\n}\n\nfunc (cluster *Cluster) newEncoderMessage(t messageType, entity encoder) (*message, error) {\n\tbody, err := entity.encode()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn cluster.newMessage(t, body), nil\n}\n\n// BroadcastString broadcasts a string to all the other nodes in the guble cluster\nfunc (cluster *Cluster) BroadcastString(sMessage *string) error {\n\tlogger.WithField(\"string\", sMessage).Debug(\"BroadcastString\")\n\tcMessage := &message{\n\t\tNodeID: cluster.Config.ID,\n\t\tType:   mtStringMessage,\n\t\tBody:   []byte(*sMessage),\n\t}\n\treturn cluster.broadcastClusterMessage(cMessage)\n}\n\n// BroadcastMessage broadcasts a guble-protocol-message to all the other nodes in the guble cluster.\nfunc (cluster *Cluster) BroadcastMessage(pMessage *protocol.Message) error {\n\tlogger.WithField(\"message\", pMessage).Debug(\"BroadcastMessage\")\n\tcMessage := &message{\n\t\tNodeID: cluster.Config.ID,\n\t\tType:   mtGubleMessage,\n\t\tBody:   pMessage.Bytes(),\n\t}\n\treturn cluster.broadcastClusterMessage(cMessage)\n}\n\nfunc (cluster *Cluster) broadcastClusterMessage(cMessage *message) error {\n\tif cMessage == nil {\n\t\terrorMessage := \"Could not broadcast a nil cluster-message\"\n\t\tlogger.Error(errorMessage)\n\t\treturn errors.New(errorMessage)\n\t}\n\n\tcMessageBytes, err := cMessage.encode()\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"Could not encode and broadcast cluster-message\")\n\t\treturn err\n\t}\n\n\tfor _, node := range cluster.memberlist.Members() {\n\t\tif cluster.name == node.Name {\n\t\t\tcontinue\n\t\t}\n\t\tgo cluster.sendToNode(node, cMessageBytes)\n\t}\n\treturn nil\n}\n\nfunc (cluster *Cluster) sendToNode(node *memberlist.Node, msgBytes []byte) error {\n\tlogger.WithFields(log.Fields{\n\t\t\"node\": cluster.Config.ID,\n\t\t\"to\":   node.Name,\n\t}).Debug(\"Sending cluster-message to a node\")\n\n\terr := cluster.memberlist.SendToTCP(node, msgBytes)\n\tif err != nil {\n\t\tlogger.WithFields(log.Fields{\n\t\t\t\"err\":  err,\n\t\t\t\"node\": node,\n\t\t}).Error(\"Error sending cluster-message to a node\")\n\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (cluster *Cluster) sendMessageToNode(node *memberlist.Node, cmsg *message) error {\n\tlogger.WithField(\"node\", node.Name).Debug(\"Sending message to a node\")\n\n\tbytes, err := cmsg.encode()\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"Could not encode and broadcast cluster-message\")\n\t\treturn err\n\t}\n\n\tif err = cluster.memberlist.SendToTCP(node, bytes); err != nil {\n\t\tlogger.WithField(\"node\", node.Name).WithError(err).Error(\"Error send message to node\")\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (cluster *Cluster) sendMessageToNodeID(nodeID uint8, cmsg *message) error {\n\tnode := cluster.GetNodeByID(nodeID)\n\tif node == nil {\n\t\treturn ErrNodeNotFound\n\t}\n\n\treturn cluster.sendMessageToNode(node, cmsg)\n}\n\nfunc (cluster *Cluster) GetNodeByID(id uint8) *memberlist.Node {\n\tname := strconv.FormatUint(uint64(id), 10)\n\tfor _, node := range cluster.memberlist.Members() {\n\t\tif node.Name == name {\n\t\t\treturn node\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (cluster *Cluster) remotesAsStrings() (strings []string) {\n\tlog.WithField(\"Remotes\", cluster.Config.Remotes).Debug(\"Cluster remotes\")\n\tfor _, remote := range cluster.Config.Remotes {\n\t\tstrings = append(strings, remote.IP.String()+\":\"+strconv.Itoa(remote.Port))\n\t}\n\treturn\n}\n"
  },
  {
    "path": "server/cluster/cluster_benchmarking_test.go",
    "content": "package cluster\n\nimport (\n\tlog \"github.com/Sirupsen/logrus\"\n\t\"github.com/hashicorp/memberlist\"\n\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc BenchmarkMemberListCluster(b *testing.B) {\n\tbenchmarkCluster(b, 36, 10*time.Second, 15000)\n}\n\nfunc benchmarkCluster(b *testing.B, num int, timeoutForAllJoins time.Duration, lowestPort int) {\n\tstartTime := time.Now()\n\n\tvar nodes []*memberlist.Memberlist\n\teventC := make(chan memberlist.NodeEvent, num)\n\taddr := \"127.0.0.1\"\n\tvar firstMemberName string\n\tfor i := 0; i < num; i++ {\n\t\tc := memberlist.DefaultLANConfig()\n\t\tport := lowestPort + i\n\t\tc.Name = fmt.Sprintf(\"%s:%d\", addr, port)\n\t\tc.BindAddr = addr\n\t\tc.BindPort = port\n\t\tc.ProbeInterval = 20 * time.Millisecond\n\t\tc.ProbeTimeout = 100 * time.Millisecond\n\t\tc.GossipInterval = 20 * time.Millisecond\n\t\tc.PushPullInterval = 200 * time.Millisecond\n\n\t\tc.LogOutput = ioutil.Discard\n\n\t\tif i == 0 {\n\t\t\tc.Events = &memberlist.ChannelEventDelegate{eventC}\n\t\t\tfirstMemberName = c.Name\n\t\t}\n\n\t\tnewMember, err := memberlist.Create(c)\n\t\tif err != nil {\n\t\t\tlog.WithField(\"error\", err).Fatal(\"Unexpected error when creating the memberlist\")\n\t\t}\n\t\tnodes = append(nodes, newMember)\n\t\tdefer newMember.Shutdown()\n\n\t\tif i > 0 {\n\t\t\tnumContacted, err := newMember.Join([]string{firstMemberName})\n\t\t\tif numContacted == 0 || err != nil {\n\t\t\t\tlog.WithField(\"error\", err).Fatal(\"Unexpected fatal error when node wanted to join the cluster\")\n\t\t\t}\n\t\t}\n\t}\n\n\tif convergence(nodes, num, eventC, timeoutForAllJoins) {\n\t\tendTime := time.Now()\n\t\tlog.WithField(\"durationSeconds\", endTime.Sub(startTime).Seconds()).Info(\"Cluster convergence reached\")\n\t}\n\n\tb.StartTimer()\n\tsendMessagesInCluster(nodes, b.N)\n\tb.StopTimer()\n}\n\nfunc convergence(nodes []*memberlist.Memberlist, num int, eventC chan memberlist.NodeEvent, timeoutForAllJoins time.Duration) bool {\n\tbreakTimer := time.After(timeoutForAllJoins)\n\tnumJoins := 0\nWAIT:\n\tfor {\n\t\tselect {\n\t\tcase e := <-eventC:\n\t\t\tl := log.WithFields(log.Fields{\n\t\t\t\t\"node\":       *e.Node,\n\t\t\t\t\"numJoins\":   numJoins,\n\t\t\t\t\"numMembers\": nodes[0].NumMembers(),\n\t\t\t})\n\t\t\tif e.Event == memberlist.NodeJoin {\n\t\t\t\tl.Info(\"Node join\")\n\t\t\t\tnumJoins++\n\t\t\t\tif numJoins == num {\n\t\t\t\t\tl.Info(\"All nodes joined\")\n\t\t\t\t\tbreak WAIT\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tl.Info(\"Node leave\")\n\t\t\t}\n\t\tcase <-breakTimer:\n\t\t\tbreak WAIT\n\t\t}\n\t}\n\n\tif numJoins != num {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"joinCounter\": numJoins,\n\t\t\t\"num\":         num,\n\t\t}).Error(\"Timeout before completing all joins\")\n\t}\n\n\tconvergence := false\n\tfor !convergence {\n\t\tconvergence = true\n\t\tfor idx, node := range nodes {\n\t\t\tnumSeenByNode := node.NumMembers()\n\t\t\tif numSeenByNode != num {\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"index\":    idx,\n\t\t\t\t\t\"expected\": num,\n\t\t\t\t\t\"actual\":   numSeenByNode,\n\t\t\t\t}).Debug(\"Wrong number of nodes\")\n\t\t\t\tconvergence = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn numJoins == num\n}\n\nfunc sendMessagesInCluster(nodes []*memberlist.Memberlist, numMessages int) {\n\tfor senderID, node := range nodes {\n\t\tfor receiverID, member := range node.Members() {\n\t\t\tfor i := 0; i < numMessages; i++ {\n\t\t\t\tmessage := fmt.Sprintf(\"Hello from %v to %v !\", senderID, receiverID)\n\t\t\t\tlog.WithField(\"message\", message).Debug(\"SendToTCP\")\n\t\t\t\tnode.SendToTCP(member, []byte(message))\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "server/cluster/cluster_conflict.go",
    "content": "package cluster\n\nimport (\n\tlog \"github.com/Sirupsen/logrus\"\n\n\t\"github.com/hashicorp/memberlist\"\n)\n\n// =============================================================\n// memberlist.ConflictDelegate implementation for cluster struct\n// =============================================================\n\nfunc (cluster *Cluster) NotifyConflict(existing, other *memberlist.Node) {\n\tlogger.WithFields(log.Fields{\n\t\t\"existing\": *existing,\n\t\t\"other\":    *other,\n\t}).Panic(\"NotifyConflict\")\n}\n"
  },
  {
    "path": "server/cluster/cluster_delegate.go",
    "content": "package cluster\n\nimport (\n\tlog \"github.com/Sirupsen/logrus\"\n\n\t\"github.com/smancke/guble/protocol\"\n)\n\n// ======================================================\n// memberslist.Delegate implementation for cluster struct\n// ======================================================\n\n// NotifyMsg is invoked each time a message is received by this node of the cluster;\n// it decodes and dispatches the messages.\nfunc (cluster *Cluster) NotifyMsg(data []byte) {\n\tlogger.WithField(\"msgAsBytes\", data).Debug(\"NotifyMsg\")\n\n\tcmsg := new(message)\n\terr := cmsg.decode(data)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"Decoding of cluster message failed\")\n\t\treturn\n\t}\n\n\tlogger.WithFields(log.Fields{\n\t\t\"senderNodeID\": cmsg.NodeID,\n\t\t\"type\":         cmsg.Type,\n\t}).Debug(\"NotifyMsg: Received cluster message\")\n\n\tswitch cmsg.Type {\n\tcase mtGubleMessage:\n\t\tcluster.handleGubleMessage(cmsg)\n\tcase mtSyncPartitions:\n\t\tcluster.handleSyncPartitions(cmsg)\n\tcase mtSyncMessage:\n\t\tcluster.handleSyncMessage(cmsg)\n\tcase mtSyncMessageRequest:\n\t\t// cluster node is requesting to receive messages for sync\n\t\tcluster.handleSyncMessageRequest(cmsg)\n\t}\n}\n\nfunc (cluster *Cluster) GetBroadcasts(overhead, limit int) [][]byte {\n\tb := cluster.broadcasts\n\tcluster.broadcasts = nil\n\treturn b\n}\n\nfunc (cluster *Cluster) NodeMeta(limit int) []byte { return nil }\n\nfunc (cluster *Cluster) LocalState(join bool) []byte { return nil }\n\nfunc (cluster *Cluster) MergeRemoteState(s []byte, join bool) {}\n\n// handles message received with type `mtGubleMessage`\nfunc (cluster *Cluster) handleGubleMessage(cmsg *message) {\n\tif cluster.Router == nil {\n\t\treturn\n\t}\n\tmessage, err := protocol.ParseMessage(cmsg.Body)\n\tif err != nil {\n\t\tlogger.WithField(\"err\", err).Error(\"Parsing of guble-message contained in cluster-message failed\")\n\t\treturn\n\t}\n\tcluster.Router.HandleMessage(message)\n}\n\n// handles message received with type `mtSyncPartitions`\nfunc (cluster *Cluster) handleSyncPartitions(cmsg *message) {\n\tlogger.WithField(\"message\", cmsg).Debug(\"Received sync partitions message\")\n\n\t// Decode message\n\tpartitionsSlice := make(partitions, 0)\n\t// Decode data into the new slice\n\terr := partitionsSlice.decode(cmsg.Body)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"Error decoding partitions\")\n\t\treturn\n\t}\n\n\tlogger.WithFields(log.Fields{\n\t\t\"partitions\": partitionsSlice,\n\t\t\"nodeID\":     cmsg.NodeID,\n\t}).Debug(\"Partitions received\")\n\n\t// add to synchronizer\n\tcluster.synchronizer.sync(cmsg.NodeID, partitionsSlice)\n}\n\nfunc (cluster *Cluster) handleSyncMessage(cmsg *message) {\n\tlogger.WithField(\"cmsg\", cmsg).Debug(\"Handling sync message\")\n\terr := cluster.synchronizer.syncMessage(cmsg.NodeID, cmsg.Body)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"Error synchronizing messages\")\n\t}\n}\n\nfunc (cluster *Cluster) handleSyncMessageRequest(cmsg *message) {\n\tlogger.WithField(\"cmsg\", cmsg).Debug(\"Handling sync message request\")\n\terr := cluster.synchronizer.messageRequest(cmsg.NodeID, cmsg.Body)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"Error send synchronization messages\")\n\t}\n}\n"
  },
  {
    "path": "server/cluster/cluster_event_delegate.go",
    "content": "package cluster\n\nimport (\n\tlog \"github.com/Sirupsen/logrus\"\n\n\t\"github.com/hashicorp/memberlist\"\n)\n\n// ==========================================================\n// memberlist.EventDelegate implementation for cluster struct\n// ==========================================================\n\nfunc (cluster *Cluster) NotifyJoin(node *memberlist.Node) {\n\tcluster.numJoins++\n\tcluster.eventLog(node, \"Cluster Node Join\")\n\n\tcluster.sendPartitions(node)\n}\n\nfunc (cluster *Cluster) NotifyLeave(node *memberlist.Node) {\n\tcluster.numLeaves++\n\tcluster.eventLog(node, \"Cluster Node Leave\")\n}\n\nfunc (cluster *Cluster) NotifyUpdate(node *memberlist.Node) {\n\tcluster.numUpdates++\n\tcluster.eventLog(node, \"Cluster Node Update\")\n}\n\nfunc (cluster *Cluster) eventLog(node *memberlist.Node, message string) {\n\tlogger.WithFields(log.Fields{\n\t\t\"node\":       node.Name,\n\t\t\"numJoins\":   cluster.numJoins,\n\t\t\"numLeaves\":  cluster.numLeaves,\n\t\t\"numUpdates\": cluster.numUpdates,\n\t}).Debug(message)\n}\n\nfunc (cluster *Cluster) sendPartitions(node *memberlist.Node) {\n\tif _, inSync := cluster.synchronizer.inSync(node.Name); inSync {\n\t\tlogger.WithField(\"node\", node.Name).Debug(\"Already in sync with node\")\n\t\treturn\n\t}\n\n\tlogger.WithField(\"node\", node.Name).Debug(\"Sending partitions info\")\n\n\t// Send message partitions to the new node\n\tstore, err := cluster.Router.MessageStore()\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"Error retriving message store to get partitions\")\n\t\treturn\n\t}\n\n\tpartitionsSlice := partitionsFromStore(store)\n\n\t// sending partitions\n\tdata, err := partitionsSlice.encode()\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"Error encoding partitions\")\n\t\treturn\n\t}\n\tcmsg := cluster.newMessage(mtSyncPartitions, data)\n\n\t// send message to node\n\terr = cluster.sendMessageToNode(node, cmsg)\n\tif err != nil {\n\t\tlogger.WithField(\"node\", node.Name).WithError(err).Error(\"Error sending partitions info to node\")\n\t}\n}\n"
  },
  {
    "path": "server/cluster/cluster_test.go",
    "content": "package cluster\n\nimport (\n\t\"io/ioutil\"\n\n\t\"github.com/smancke/guble/server/store/filestore\"\n\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/store\"\n\n\t\"github.com/hashicorp/go-multierror\"\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"errors\"\n\t\"net\"\n\t\"testing\"\n\t\"time\"\n)\n\nconst basePort = 10000\n\nvar (\n\tindex = 1\n)\n\nfunc testConfig() (config Config) {\n\tremoteAddr := net.TCPAddr{IP: []byte{127, 0, 0, 1}, Port: basePort + index}\n\tvar remotes []*net.TCPAddr\n\tremotes = append(remotes, &remoteAddr)\n\tconfig = Config{ID: uint8(index), Host: \"127.0.0.1\", Port: basePort + index, Remotes: remotes}\n\tindex++\n\treturn\n}\n\nfunc testConfigAnother() (config Config) {\n\tremoteAddr := net.TCPAddr{IP: []byte{127, 0, 0, 1}, Port: basePort + index - 1}\n\tvar remotes []*net.TCPAddr\n\tremotes = append(remotes, &remoteAddr)\n\tconfig = Config{ID: uint8(index), Host: \"127.0.0.1\", Port: basePort + index, Remotes: remotes}\n\tindex++\n\treturn\n}\n\nfunc TestCluster_StartCheckStop(t *testing.T) {\n\ta := assert.New(t)\n\n\tconf := testConfig()\n\tnode, err := New(&conf)\n\ta.NoError(err, \"No error should be raised when Creating the Cluster\")\n\n\tnode.Router = newDummyRouter(t)\n\n\terr = node.Start()\n\ta.NoError(err, \"No error should be raised when Starting the Cluster\")\n\n\terr = node.Check()\n\ta.NoError(err, \"Health-check score of a Cluster with a single node should be OK\")\n\n\terr = node.Stop()\n\ta.NoError(err, \"No error should be raised when Stopping the Cluster\")\n}\n\nfunc TestCluster_BroadcastStringAndMessageAndCheck(t *testing.T) {\n\ta := assert.New(t)\n\n\tconfig1 := testConfig()\n\tnode1, err := New(&config1)\n\ta.NoError(err, \"No error should be raised when Creating the Cluster\")\n\n\tnode1.Router = newDummyRouter(t)\n\n\t//start the cluster node 1\n\tdefer node1.Stop()\n\terr = node1.Start()\n\ta.NoError(err, \"No error should be raised when starting node 1 of the Cluster\")\n\n\tconfig2 := testConfigAnother()\n\tnode2, err := New(&config2)\n\ta.NoError(err, \"No error should be raised when Creating the Cluster\")\n\n\tnode2.Router = newDummyRouter(t)\n\n\t//start the cluster node 2\n\tdefer node2.Stop()\n\terr = node2.Start()\n\ta.NoError(err, \"No error should be raised when starting node 2 of the Cluster\")\n\n\t// Send a String Message\n\tstr := \"TEST\"\n\terr = node1.BroadcastString(&str)\n\ta.NoError(err, \"No error should be raised when sending a string to Cluster\")\n\n\t// and a protocol message\n\tpmsg := protocol.Message{\n\t\tID:            1,\n\t\tPath:          \"/stuff\",\n\t\tUserID:        \"id\",\n\t\tApplicationID: \"appId\",\n\t\tTime:          time.Now().Unix(),\n\t\tHeaderJSON:    \"{}\",\n\t\tBody:          []byte(\"test\"),\n\t\tNodeID:        1}\n\terr = node1.BroadcastMessage(&pmsg)\n\ta.NoError(err, \"No error should be raised when sending a protocol message to Cluster\")\n\n\terr = node1.Check()\n\ta.NoError(err, \"Health-check score of a Cluster with 2 nodes should be OK for node 1\")\n\n\terr = node2.Check()\n\ta.NoError(err, \"Health-check score of a Cluster with 2 nodes should be OK for node 2\")\n}\n\nfunc TestCluster_NewShouldReturnErrorWhenPortIsInvalid(t *testing.T) {\n\ta := assert.New(t)\n\n\tremoteAddr := net.TCPAddr{IP: []byte{127, 0, 0, 1}, Port: basePort + index - 1}\n\tvar remotes []*net.TCPAddr\n\tremotes = append(remotes, &remoteAddr)\n\tindex++\n\n\tconfig := Config{ID: 1, Host: \"localhost\", Port: -1, Remotes: remotes}\n\t_, err := New(&config)\n\tif a.Error(err, \"An error was expected when Creating the Cluster\") {\n\t\ta.Equal(err, errors.New(\"Failed to start TCP listener. Err: listen tcp :-1: bind: invalid argument\"),\n\t\t\t\"Error should be precisely defined\")\n\t}\n}\n\nfunc TestCluster_StartShouldReturnErrorWhenNoRemotes(t *testing.T) {\n\ta := assert.New(t)\n\n\tvar remotes []*net.TCPAddr\n\tindex++\n\n\tconfig := Config{ID: 1, Host: \"localhost\", Port: basePort + index - 1, Remotes: remotes}\n\tnode, err := New(&config)\n\ta.NoError(err, \"No error should be raised when Creating the Cluster\")\n\n\tnode.Router = newDummyRouter(t)\n\n\tdefer node.Stop()\n\terr = node.Start()\n\tif a.Error(err, \"An error is expected when Starting the Cluster\") {\n\t\ta.Equal(err, errors.New(\"No remote hosts were successfully contacted when this node wanted to join the cluster\"),\n\t\t\t\"Error should be precisely defined\")\n\t}\n}\n\nfunc TestCluster_StartShouldReturnErrorWhenInvalidRemotes(t *testing.T) {\n\ta := assert.New(t)\n\n\tremoteAddr := net.TCPAddr{IP: []byte{127, 0, 0, 1}, Port: 0}\n\tvar remotes []*net.TCPAddr\n\tremotes = append(remotes, &remoteAddr)\n\tindex++\n\n\tconfig := Config{ID: 1, Host: \"localhost\", Port: basePort + index - 1, Remotes: remotes}\n\tnode, err := New(&config)\n\ta.NoError(err, \"No error should be raised when Creating the Cluster\")\n\n\tnode.Router = newDummyRouter(t)\n\n\tdefer node.Stop()\n\terr = node.Start()\n\tif a.Error(err, \"An error is expected when Starting the Cluster\") {\n\t\texpected := multierror.Append(errors.New(\"Failed to join 127.0.0.1: dial tcp 127.0.0.1:0: getsockopt: connection refused\"))\n\t\ta.Equal(err, expected, \"Error should be precisely defined\")\n\t}\n}\n\nfunc TestCluster_StartShouldReturnErrorWhenNoMessageHandler(t *testing.T) {\n\ta := assert.New(t)\n\n\tconfig := testConfig()\n\tnode, err := New(&config)\n\ta.NoError(err, \"No error should be raised when Creating the Cluster\")\n\n\tdefer node.Stop()\n\terr = node.Start()\n\tif a.Error(err, \"An error is expected when Starting the Cluster\") {\n\t\texpected := errors.New(\"There should be a valid Router already set-up\")\n\t\ta.Equal(expected, err, \"Error should be precisely defined\")\n\t}\n}\n\nfunc TestCluster_NotifyMsgShouldSimplyReturnWhenDecodingInvalidMessage(t *testing.T) {\n\ta := assert.New(t)\n\n\tconfig := testConfig()\n\tnode, err := New(&config)\n\ta.NoError(err, \"No error should be raised when Creating the Cluster\")\n\n\tnode.Router = newDummyRouter(t)\n\n\tdefer node.Stop()\n\terr = node.Start()\n\ta.NoError(err, \"No error should be raised when Starting the Cluster\")\n\n\tnode.NotifyMsg([]byte{})\n\n\t//TODO Cosmin check that HandleMessage is not invoked (i.e. invalid message is not dispatched)\n}\n\nfunc TestCluster_broadcastClusterMessage(t *testing.T) {\n\ta := assert.New(t)\n\n\tconfig := testConfig()\n\tnode, err := New(&config)\n\ta.NoError(err, \"No error should be raised when Creating the Cluster\")\n\n\tnode.Router = newDummyRouter(t)\n\n\tdefer node.Stop()\n\terr = node.Start()\n\ta.NoError(err, \"No error should be raised when Starting the Cluster\")\n\n\terr = node.broadcastClusterMessage(nil)\n\tif a.Error(err, \"An error is expected from broadcastClusterMessage\") {\n\t\texpected := errors.New(\"Could not broadcast a nil cluster-message\")\n\t\ta.Equal(err, expected, \"Error should be precisely defined\")\n\t}\n}\n\ntype dummyRouter struct {\n\tstore store.MessageStore\n}\n\nfunc newDummyRouter(t *testing.T) *dummyRouter {\n\tdir, err := ioutil.TempDir(\"\", \"guble_cluster_test\")\n\tassert.NoError(t, err)\n\treturn &dummyRouter{store: filestore.New(dir)}\n}\n\nfunc (_ *dummyRouter) HandleMessage(pmsg *protocol.Message) error {\n\treturn nil\n}\n\nfunc (d *dummyRouter) MessageStore() (store.MessageStore, error) {\n\treturn d.store, nil\n}\n"
  },
  {
    "path": "server/cluster/codec.go",
    "content": "package cluster\n\nimport (\n\tlog \"github.com/Sirupsen/logrus\"\n\n\t\"github.com/ugorji/go/codec\"\n)\n\ntype messageType int\n\nvar h = &codec.MsgpackHandle{}\n\nconst (\n\t// Guble protocol.Message\n\tmtGubleMessage messageType = iota\n\n\t// A node will send this message type when the body contains the partitions\n\t// in it's store with the max message id for each ([]partitions)\n\tmtSyncPartitions\n\n\t// Sent this to request a node to give us the next message so we can save it\n\tmtSyncMessageRequest\n\n\t// Sent to synchronize a message, contains the message to synchonrize along with\n\t// updated partition info\n\tmtSyncMessage\n\n\tmtStringMessage\n)\n\ntype encoder interface {\n\tencode() ([]byte, error)\n}\n\ntype decoder interface {\n\tdecode(data []byte) error\n}\n\ntype message struct {\n\tNodeID uint8\n\tType   messageType\n\tBody   []byte\n}\n\nfunc (cmsg *message) encode() ([]byte, error) {\n\tlogger.WithFields(log.Fields{\n\t\t\"nodeID\": cmsg.NodeID,\n\t\t\"type\":   cmsg.Type,\n\t\t\"body\":   string(cmsg.Body),\n\t}).Debug(\"Encoding cluster message\")\n\treturn encode(cmsg)\n}\n\nfunc (cmsg *message) decode(data []byte) error {\n\tlogger.WithField(\"data\", string(data)).Debug(\"decode\")\n\treturn decode(cmsg, data)\n}\n\nfunc encode(entity interface{}) ([]byte, error) {\n\tlogger.WithField(\"entity\", entity).Debug(\"Encoding\")\n\n\tvar bytes []byte\n\tencoder := codec.NewEncoderBytes(&bytes, h)\n\n\terr := encoder.Encode(entity)\n\tif err != nil {\n\t\tlogger.WithField(\"err\", err).Error(\"Encoding failed\")\n\t\treturn nil, err\n\t}\n\n\treturn bytes, nil\n}\n\nfunc decode(o interface{}, data []byte) error {\n\tlogger.WithField(\"data\", string(data)).Debug(\"Decoding\")\n\n\tdecoder := codec.NewDecoderBytes(data, h)\n\n\terr := decoder.Decode(o)\n\tif err != nil {\n\t\tlogger.WithField(\"err\", err).Error(\"Decoding failed\")\n\t\treturn err\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "server/cluster/codec_test.go",
    "content": "package cluster\n"
  },
  {
    "path": "server/cluster/logger.go",
    "content": "package cluster\n\nimport (\n\tlog \"github.com/Sirupsen/logrus\"\n)\n\nvar logger = log.WithFields(log.Fields{\n\t\"module\": \"cluster\",\n})\n"
  },
  {
    "path": "server/cluster/synchronizer.go",
    "content": "package cluster\n\nimport (\n\t\"errors\"\n\t\"math\"\n\t\"strconv\"\n\t\"sync\"\n\n\tlog \"github.com/Sirupsen/logrus\"\n\n\t\"github.com/smancke/guble/server/store\"\n\t\"github.com/ugorji/go/codec\"\n)\n\nconst (\n\tsyncPartitionsProcessBuffer = 100\n)\n\nvar (\n\tErrNodeNotInSync = errors.New(\"Node not found in syncPartitions list.\")\n\n\tErrMissingSyncPartition = errors.New(\"Missing sync partition\")\n)\n\ntype synchronizer struct {\n\tcluster *Cluster\n\tstore   store.MessageStore\n\n\t// map to keep track of nodes and remote partitions and local partitions\n\tsyncPartitions map[string]*syncPartition\n\tnodes          map[uint8]partitions // store the lastest info received from a node\n\n\tsync.RWMutex\n\n\tlogger *log.Entry\n\tstopC  chan struct{}\n}\n\nfunc newSynchronizer(cluster *Cluster) (*synchronizer, error) {\n\tstore, err := cluster.Router.MessageStore()\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"Error retriving message store for synchronizer\")\n\t\treturn nil, err\n\t}\n\n\treturn &synchronizer{\n\t\tcluster:        cluster,\n\t\tstore:          store,\n\t\tsyncPartitions: make(map[string]*syncPartition),\n\t\tnodes:          make(map[uint8]partitions),\n\n\t\tlogger: logger.WithField(\"module\", \"synchronizer\"),\n\t\tstopC:  make(chan struct{}),\n\t}, nil\n}\n\n// add the partitions received from a node to the nodes list and start the loop\n// for each partition\nfunc (s *synchronizer) sync(nodeID uint8, partitions partitions) {\n\tif s.inSyncID(nodeID) {\n\t\treturn\n\t}\n\n\ts.addNode(nodeID, partitions)\n}\n\n// inSync returns nodeID and a boolean value specifying if this node is already in sync\n// returns 0 as nodeID and false if the node cannot be parsed\n// the cluster should not send partitions nor should accept partitions information\n// from a node that is already in sync\nfunc (s *synchronizer) inSync(nodeID string) (uint8, bool) {\n\tid, err := strconv.ParseUint(nodeID, 10, 8)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"Error parsing node ID\")\n\t\treturn 0, false\n\t}\n\tID := uint8(id)\n\treturn ID, s.inSyncID(ID)\n}\n\nfunc (s *synchronizer) inSyncID(nodeID uint8) bool {\n\ts.RLock()\n\tdefer s.RUnlock()\n\n\t_, in := s.nodes[nodeID]\n\treturn in\n}\n\n// addNode adds the node to the state with the missing partitions\nfunc (s *synchronizer) addNode(nodeID uint8, partitions partitions) {\n\ts.Lock()\n\tdefer s.Unlock()\n\n\ts.nodes[nodeID] = partitions\n\n\tfor _, p := range partitions {\n\t\tsp, exists := s.syncPartitions[p.Name]\n\t\tif !exists {\n\t\t\tlocalPartition, err := s.store.Partition(p.Name)\n\t\t\tif err != nil {\n\t\t\t\tlogger.WithError(err).WithField(\"partition\", p.Name).Error(\"Error retrieving local partition\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlocalMaxID := localPartition.MaxMessageID()\n\n\t\t\tsp = &syncPartition{\n\t\t\t\tsynchronizer:    s,\n\t\t\t\tlocalPartition:  localPartition,\n\t\t\t\tlocalStartMaxID: localMaxID,\n\t\t\t\tnodes:           make(map[uint8]partition, 1),\n\t\t\t\tlastID:          localMaxID,\n\t\t\t\tprocessC:        make(chan *syncMessage, syncPartitionsProcessBuffer),\n\t\t\t}\n\t\t}\n\n\t\tsp.nodes[nodeID] = p\n\t\ts.syncPartitions[p.Name] = sp\n\n\t\tgo sp.run()\n\t}\n}\n\nfunc (s *synchronizer) messageRequest(nodeID uint8, data []byte) error {\n\tsmr := &syncMessageRequest{}\n\terr := smr.decode(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// start goroutine that will fetch messages from store and send them to the node\n\tgo s.requestLoop(nodeID, smr)\n\treturn nil\n}\n\n// requestLoop handles sending messages fetched from the store to the node\n// that made the request a message sent from here will be received by the syncMessage\n// method on the other node\nfunc (s *synchronizer) requestLoop(nodeID uint8, smr *syncMessageRequest) {\n\ts.logger.WithFields(log.Fields{\n\t\t\"requestNodeID\":      nodeID,\n\t\t\"syncMessageRequest\": smr,\n\t}).Debug(\"Sending requested messages\")\n\n\treq := &store.FetchRequest{\n\t\tPartition: smr.Partition,\n\t\tStartID:   smr.StartID,\n\t\tEndID:     smr.EndID,\n\t\tDirection: 1,\n\t\tMessageC:  make(chan *store.FetchedMessage, 10),\n\t\tErrorC:    make(chan error),\n\t\tStartC:    make(chan int),\n\t\tCount:     math.MaxInt32,\n\t}\n\n\ts.store.Fetch(req)\n\n\tvar fetchedMessage *store.FetchedMessage\n\n\topened := true\n\tfor opened {\n\t\tselect {\n\t\tcase count := <-req.StartC:\n\t\t\tlogger.WithField(\"count\", count).\n\t\t\t\tDebug(\"Receiving messages for sync request from store\")\n\t\tcase fetchedMessage, opened = <-req.MessageC:\n\t\t\tif !opened {\n\t\t\t\ts.logger.WithField(\"requestNodeID\", nodeID).\n\t\t\t\t\tDebug(\"Receive channel closed by the store for the sync request\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// send message to node\n\t\t\tcmsg, err := s.cluster.newEncoderMessage(mtSyncMessage, &syncMessage{\n\t\t\t\tPartition: smr.Partition,\n\t\t\t\tID:        fetchedMessage.ID,\n\t\t\t\tMessage:   fetchedMessage.Message,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tlogger.WithError(err).\n\t\t\t\t\tWithField(\"fetchedMessage\", fetchedMessage).\n\t\t\t\t\tError(\"Error creating cluster message for fetched message\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\terr = s.cluster.sendMessageToNodeID(nodeID, cmsg)\n\t\t\tif err != nil {\n\t\t\t\tlogger.WithError(err).\n\t\t\t\t\tWithField(\"clusterMesssage\", cmsg).\n\t\t\t\t\tError(\"Error sending sync message to node\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\tcase <-s.stopC:\n\t\t\ts.logger.WithField(\"requestNodeID\", nodeID).Debug(\"Stopping synchronization request loop\")\n\t\t\tbreak\n\t\t}\n\t}\n}\n\n// syncMessage received data from another node after we made a request for a set\n// of messages  it will decode the data into a *syncMessage and send it into the\n// appropriate syncPartition processC channel\nfunc (s *synchronizer) syncMessage(nodeID uint8, data []byte) error {\n\tif !s.inSyncID(nodeID) {\n\t\treturn ErrNodeNotInSync\n\t}\n\n\tsm := &syncMessage{}\n\terr := sm.decode(data)\n\tif err != nil {\n\t\tlogger.WithError(err).WithFields(log.Fields{\n\t\t\t\"nodeID\": nodeID,\n\t\t\t\"data\":   string(data),\n\t\t}).Error(\"Error decoding sync message received\")\n\t\treturn err\n\t}\n\n\ts.RLock()\n\tdefer s.RUnlock()\n\n\tsyncPartition, exists := s.syncPartitions[sm.Partition]\n\tif !exists {\n\t\treturn ErrMissingSyncPartition\n\t}\n\n\ts.logger.WithFields(log.Fields{\n\t\t\"sm\":           sm,\n\t\t\"syncPartiton\": syncPartition,\n\t}).Debug(\"Processing received message\")\n\n\tsyncPartition.processC <- sm\n\treturn nil\n}\n\n// keep state of fetching for a partition\ntype syncPartition struct {\n\tsync.RWMutex\n\tsynchronizer *synchronizer\n\n\tlocalPartition  store.MessagePartition\n\tlocalStartMaxID uint64              // max message ID in the local store before the sync request\n\tnodes           map[uint8]partition // store nodes that have this partition and the info in does nodes\n\tlastID          uint64              // last fetched message ID\n\n\t// processC channel will receive the message from the cluster and store it in the\n\t// it's partition updating the lastID and sending a new request\n\tprocessC  chan *syncMessage\n\trunning   bool\n\trunningMu sync.RWMutex\n}\n\n// start the loop that will synchronize this partition with other nodes\nfunc (sp *syncPartition) run() {\n\tif sp.isRunning() {\n\t\treturn\n\t}\n\tsp.setRunning(true)\n\tdefer sp.setRunning(false)\n\n\tsp.loop()\n}\n\n// syncLoop will start to sync the partition\n// Each nodes job is to ask the messages it's missing from own store.\nfunc (sp *syncPartition) loop() {\n\t// send request for the missing messages\n\t// get node with the highest message\n\tmaxID, nodeID := sp.maxIDNode()\n\n\tpartitionName := sp.localPartition.Name()\n\tcmsg, err := sp.synchronizer.cluster.newEncoderMessage(mtSyncMessageRequest, &syncMessageRequest{\n\t\tPartition: partitionName,\n\t\tStartID:   sp.lastID,\n\t\tEndID:     maxID,\n\t})\n\tif err != nil {\n\t\tsp.synchronizer.logger.WithError(err).Error(\"Error creating sync message request\")\n\t\treturn\n\t}\n\n\terr = sp.synchronizer.cluster.sendMessageToNodeID(nodeID, cmsg)\n\tif err != nil {\n\t\tsp.synchronizer.logger.WithError(err).WithFields(log.Fields{\n\t\t\t\"nodeID\":  nodeID,\n\t\t\t\"StartID\": sp.lastID,\n\t\t\t\"EndID\":   maxID,\n\t\t}).Error(\"Error sending sync message request to node\")\n\t\treturn\n\t}\n\n\tfor {\n\t\t// wait to receive the message\n\t\t// end the loop in case we are stopping the process or we finished synchronizing\n\t\tselect {\n\t\tcase sm := <-sp.processC:\n\t\t\terr := sp.synchronizer.store.Store(partitionName, sm.ID, sm.Message)\n\t\t\tif err != nil {\n\t\t\t\tsp.synchronizer.logger.WithError(err).\n\t\t\t\t\tWithField(\"messageID\", sm.ID).\n\t\t\t\t\tError(\"Error storing synchronize message\")\n\t\t\t}\n\n\t\t\t// end loop if we reached the end\n\t\t\tsp.lastID = sm.ID\n\t\t\tif sm.ID >= maxID {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-sp.synchronizer.stopC:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (sp *syncPartition) maxIDNode() (max uint64, nodeID uint8) {\n\tfor nid, p := range sp.nodes {\n\t\tif p.MaxID > max {\n\t\t\tmax = p.MaxID\n\t\t\tnodeID = nid\n\t\t}\n\t}\n\treturn\n}\n\nfunc (sp *syncPartition) isRunning() bool {\n\tsp.runningMu.RLock()\n\tdefer sp.runningMu.RUnlock()\n\n\treturn sp.running\n}\n\nfunc (sp *syncPartition) setRunning(r bool) {\n\tsp.runningMu.Lock()\n\tdefer sp.runningMu.Unlock()\n\n\tsp.running = r\n}\n\ntype partition struct {\n\tName  string\n\tMaxID uint64\n}\n\n// syncPartitions will be sent by cluster server to notify the joining server\n// on the partitions they store\ntype partitions []partition\n\nfunc (p *partitions) encode() ([]byte, error) {\n\tvar bytes []byte\n\tencoder := codec.NewEncoderBytes(&bytes, h)\n\n\terr := encoder.Encode(p)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"Error encoding partitions\")\n\t\treturn nil, err\n\t}\n\n\treturn bytes, nil\n}\n\n// decode will decode the bytes into the receiver `p` in our case\n// Example:\n// ```\n// p := make(partitions, 0)\n// err := p.decode(data)\n// if err != nil {\n// \t ...\n// }\n// ```\nfunc (p *partitions) decode(data []byte) error {\n\tdecoder := codec.NewDecoderBytes(data, h)\n\terr := decoder.Decode(p)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"Error decoding partitions data\")\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc partitionsFromStore(store store.MessageStore) *partitions {\n\tmessagePartitions, err := store.Partitions()\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"Error retrieving store localPartitions\")\n\t\treturn nil\n\t}\n\n\tlocalPartitions := make(partitions, 0, len(messagePartitions))\n\tfor _, p := range messagePartitions {\n\t\tlocalPartitions = append(localPartitions, partition{\n\t\t\tName:  p.Name(),\n\t\t\tMaxID: p.MaxMessageID(),\n\t\t})\n\t}\n\treturn &localPartitions\n}\n\n// send this struct to a node to request the messages between StartID and EndID\ntype syncMessageRequest struct {\n\tPartition string\n\tStartID   uint64\n\tEndID     uint64\n}\n\nfunc (smr *syncMessageRequest) encode() ([]byte, error) {\n\treturn encode(smr)\n}\n\nfunc (smr *syncMessageRequest) decode(data []byte) error {\n\treturn decode(smr, data)\n}\n\ntype syncMessage struct {\n\t// Hold updated partition info\n\tPartition string\n\tID        uint64\n\tMessage   []byte\n}\n\nfunc (sm *syncMessage) encode() ([]byte, error) {\n\treturn encode(sm)\n}\n\nfunc (sm *syncMessage) decode(data []byte) error {\n\treturn decode(sm, data)\n}\n"
  },
  {
    "path": "server/cluster_integration_test.go",
    "content": "package server\n\nimport (\n\tlog \"github.com/Sirupsen/logrus\"\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/testutil\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc Test_Cluster_Subscribe_To_Random_Node(t *testing.T) {\n\ttestutil.SkipIfShort(t)\n\ta := assert.New(t)\n\n\tnode1 := newTestClusterNode(t, testClusterNodeConfig{\n\t\tHttpListen: \"localhost:8090\",\n\t\tNodeID:     1,\n\t\tNodePort:   11000,\n\t\tRemotes:    \"localhost:11000\",\n\t})\n\ta.NotNil(node1)\n\tdefer node1.cleanup(true)\n\n\tnode2 := newTestClusterNode(t, testClusterNodeConfig{\n\t\tHttpListen: \"localhost:8091\",\n\t\tNodeID:     2,\n\t\tNodePort:   11001,\n\t\tRemotes:    \"localhost:11000\",\n\t})\n\ta.NotNil(node2)\n\tdefer node2.cleanup(true)\n\n\tclient1, err := node1.client(\"user1\", 10, true)\n\ta.NoError(err)\n\n\terr = client1.Subscribe(\"/foo/bar\")\n\ta.NoError(err, \"Subscribe to first node should work\")\n\n\tclient1.Close()\n\n\ttime.Sleep(50 * time.Millisecond)\n\n\tclient1, err = node2.client(\"user1\", 10, true)\n\ta.NoError(err, \"Connection to second node should return no error\")\n\n\terr = client1.Subscribe(\"/foo/bar\")\n\ta.NoError(err, \"Subscribe to second node should work\")\n\tclient1.Close()\n}\n\nfunc Test_Cluster_Integration(t *testing.T) {\n\ttestutil.SkipIfShort(t)\n\tdefer testutil.ResetDefaultRegistryHealthCheck()\n\n\ta := assert.New(t)\n\n\tnode1 := newTestClusterNode(t, testClusterNodeConfig{\n\t\tHttpListen: \"localhost:8092\",\n\t\tNodeID:     1,\n\t\tNodePort:   11002,\n\t\tRemotes:    \"localhost:11002\",\n\t})\n\ta.NotNil(node1)\n\tdefer node1.cleanup(true)\n\n\tnode2 := newTestClusterNode(t, testClusterNodeConfig{\n\t\tHttpListen: \"localhost:8093\",\n\t\tNodeID:     2,\n\t\tNodePort:   11003,\n\t\tRemotes:    \"localhost:11002\",\n\t})\n\ta.NotNil(node2)\n\tdefer node2.cleanup(true)\n\n\tclient1, err := node1.client(\"user1\", 10, false)\n\ta.NoError(err)\n\n\tclient2, err := node2.client(\"user2\", 10, false)\n\ta.NoError(err)\n\n\terr = client2.Subscribe(\"/testTopic/m\")\n\ta.NoError(err)\n\n\tclient3, err := node1.client(\"user3\", 10, false)\n\ta.NoError(err)\n\n\tnumSent := 3\n\tfor i := 0; i < numSent; i++ {\n\t\terr := client1.Send(\"/testTopic/m\", \"body\", \"{jsonHeader:1}\")\n\t\ta.NoError(err)\n\n\t\terr = client3.Send(\"/testTopic/m\", \"body\", \"{jsonHeader:4}\")\n\t\ta.NoError(err)\n\t}\n\n\tbreakTimer := time.After(3 * time.Second)\n\tnumReceived := 0\n\tidReceived := make(map[uint64]bool)\n\n\t// see if the correct number of messages arrived at the other client, before timeout is reached\nWAIT:\n\tfor {\n\t\tselect {\n\t\tcase incomingMessage := <-client2.Messages():\n\t\t\tnumReceived++\n\t\t\tlogger.WithFields(log.Fields{\n\t\t\t\t\"nodeID\":            incomingMessage.NodeID,\n\t\t\t\t\"path\":              incomingMessage.Path,\n\t\t\t\t\"incomingMsgUserId\": incomingMessage.UserID,\n\t\t\t\t\"headerJson\":        incomingMessage.HeaderJSON,\n\t\t\t\t\"body\":              incomingMessage.BodyAsString(),\n\t\t\t\t\"numReceived\":       numReceived,\n\t\t\t}).Info(\"Client2 received a message\")\n\n\t\t\ta.Equal(protocol.Path(\"/testTopic/m\"), incomingMessage.Path)\n\t\t\ta.Equal(\"body\", incomingMessage.BodyAsString())\n\t\t\ta.True(incomingMessage.ID > 0)\n\t\t\tidReceived[incomingMessage.ID] = true\n\n\t\t\tif 2*numReceived == numSent {\n\t\t\t\tbreak WAIT\n\t\t\t}\n\n\t\tcase <-breakTimer:\n\t\t\tbreak WAIT\n\t\t}\n\t}\n}\n\nvar syncTopic = \"/syncTopic\"\n\n// Test synchronizing messages when a new node is\nfunc TestSynchronizerIntegration(t *testing.T) {\n\ttestutil.SkipIfShort(t)\n\t//TODO REACTIVATE THIS AND see if it is working for future\n\ttestutil.SkipIfDisabled(t)\n\tdefer testutil.EnableDebugForMethod()()\n\n\ta := assert.New(t)\n\n\tnode1 := newTestClusterNode(t, testClusterNodeConfig{\n\t\tHttpListen: \"localhost:8094\",\n\t\tNodeID:     1,\n\t\tNodePort:   11004,\n\t\tRemotes:    \"localhost:11004\",\n\t})\n\ta.NotNil(node1)\n\tdefer node1.cleanup(true)\n\n\ttime.Sleep(2 * time.Second)\n\n\tclient1, err := node1.client(\"client1\", 10, true)\n\ta.NoError(err)\n\n\tclient1.Send(syncTopic, \"nobody\", \"\")\n\tclient1.Send(syncTopic, \"nobody\", \"\")\n\tclient1.Send(syncTopic, \"nobody\", \"\")\n\n\ttime.Sleep(2 * time.Second)\n\n\tnode2 := newTestClusterNode(t, testClusterNodeConfig{\n\t\tHttpListen: \"localhost:8095\",\n\t\tNodeID:     2,\n\t\tNodePort:   11005,\n\t\tRemotes:    \"localhost:11004\",\n\t})\n\ta.NotNil(node2)\n\tdefer node2.cleanup(true)\n\n\tclient2, err := node2.client(\"client2\", 10, true)\n\ta.NoError(err)\n\n\tcmd := &protocol.Cmd{\n\t\tName: protocol.CmdReceive,\n\t\tArg:  syncTopic + \" -3\",\n\t}\n\tdoneC := make(chan struct{})\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase m := <-client2.Messages():\n\t\t\t\tlog.WithField(\"m\", m).Error(\"Message received from first cluster\")\n\t\t\tcase e := <-client2.Errors():\n\t\t\t\tlog.WithField(\"clientError\", e).Error(\"Client error\")\n\t\t\tcase status := <-client2.StatusMessages():\n\t\t\t\tlog.WithField(\"status\", status).Error(\"Client status messasge\")\n\t\t\tcase <-doneC:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\tlog.Error(string(cmd.Bytes()))\n\tclient2.WriteRawMessage(cmd.Bytes())\n\ttime.Sleep(10 * time.Second)\n\tclose(doneC)\n}\n"
  },
  {
    "path": "server/config.go",
    "content": "package server\n\nimport (\n\t\"github.com/Bogh/gcm\"\n\tlog \"github.com/Sirupsen/logrus\"\n\t\"gopkg.in/alecthomas/kingpin.v2\"\n\n\t\"fmt\"\n\t\"net\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/smancke/guble/server/apns\"\n\t\"github.com/smancke/guble/server/fcm\"\n\t\"github.com/smancke/guble/server/sms\"\n)\n\nconst (\n\tdefaultHttpListen      = \":8080\"\n\tdefaultHealthEndpoint  = \"/admin/healthcheck\"\n\tdefaultMetricsEndpoint = \"/admin/metrics\"\n\tdefaultKVSBackend      = \"file\"\n\tdefaultMSBackend       = \"file\"\n\tdefaultStoragePath     = \"/var/lib/guble\"\n\tdefaultNodePort        = \"10000\"\n\tdevelopment            = \"dev\"\n\tintegration            = \"int\"\n\tpreproduction          = \"pre\"\n\tproduction             = \"prod\"\n\tmemProfile             = \"mem\"\n\tcpuProfile             = \"cpu\"\n\tblockProfile           = \"block\"\n)\n\nvar (\n\tdefaultFCMEndpoint = gcm.GcmSendEndpoint\n\tdefaultFCMMetrics  = true\n\tdefaultAPNSMetrics = true\n\tdefaultSMSMetrics  = true\n\tenvironments       = []string{development, integration, preproduction, production}\n)\n\ntype (\n\t// PostgresConfig is used for configuring the Postgresql connection.\n\tPostgresConfig struct {\n\t\tHost     *string\n\t\tPort     *int\n\t\tUser     *string\n\t\tPassword *string\n\t\tDbName   *string\n\t}\n\t// ClusterConfig is used for configuring the cluster component.\n\tClusterConfig struct {\n\t\tNodeID   *uint8\n\t\tNodePort *int\n\t\tRemotes  *tcpAddrList\n\t}\n\t// GubleConfig is used for configuring Guble server (including its modules / connectors).\n\tGubleConfig struct {\n\t\tLog             *string\n\t\tEnvName         *string\n\t\tHttpListen      *string\n\t\tKVS             *string\n\t\tMS              *string\n\t\tStoragePath     *string\n\t\tHealthEndpoint  *string\n\t\tMetricsEndpoint *string\n\t\tProfile         *string\n\t\tPostgres        PostgresConfig\n\t\tFCM             fcm.Config\n\t\tAPNS            apns.Config\n\t\tSMS             sms.Config\n\t\tCluster         ClusterConfig\n\t}\n)\n\nvar (\n\tparsed = false\n\n\t// Config is the active configuration of guble (used when starting-up the server)\n\tConfig = &GubleConfig{\n\t\tLog: kingpin.Flag(\"log\", \"Log level\").\n\t\t\tDefault(log.ErrorLevel.String()).\n\t\t\tEnvar(\"GUBLE_LOG\").\n\t\t\tEnum(logLevels()...),\n\t\tEnvName: kingpin.Flag(\"env\", `Name of the environment on which the application is running`).\n\t\t\tDefault(development).\n\t\t\tEnvar(\"GUBLE_ENV\").\n\t\t\tEnum(environments...),\n\t\tHttpListen: kingpin.Flag(\"http\", `The address to for the HTTP server to listen on (format: \"[Host]:Port\")`).\n\t\t\tDefault(defaultHttpListen).\n\t\t\tEnvar(\"GUBLE_HTTP_LISTEN\").\n\t\t\tString(),\n\t\tKVS: kingpin.Flag(\"kvs\", \"The storage backend for the key-value store to use : file | memory | postgres \").\n\t\t\tDefault(defaultKVSBackend).\n\t\t\tEnvar(\"GUBLE_KVS\").\n\t\t\tString(),\n\t\tMS: kingpin.Flag(\"ms\", \"The message storage backend : file | memory\").\n\t\t\tDefault(defaultMSBackend).\n\t\t\tHintOptions(\"file\", \"memory\").\n\t\t\tEnvar(\"GUBLE_MS\").\n\t\t\tString(),\n\t\tStoragePath: kingpin.Flag(\"storage-path\", \"The path for storing messages and key-value data if 'file' is selected\").\n\t\t\tDefault(defaultStoragePath).\n\t\t\tEnvar(\"GUBLE_STORAGE_PATH\").\n\t\t\tExistingDir(),\n\t\tHealthEndpoint: kingpin.Flag(\"health-endpoint\", `The health endpoint to be used by the HTTP server (value for disabling it: \"\")`).\n\t\t\tDefault(defaultHealthEndpoint).\n\t\t\tEnvar(\"GUBLE_HEALTH_ENDPOINT\").\n\t\t\tString(),\n\t\tMetricsEndpoint: kingpin.Flag(\"metrics-endpoint\", `The metrics endpoint to be used by the HTTP server (value for disabling it: \"\")`).\n\t\t\tDefault(defaultMetricsEndpoint).\n\t\t\tEnvar(\"GUBLE_METRICS_ENDPOINT\").\n\t\t\tString(),\n\t\tProfile: kingpin.Flag(\"profile\", `The profiler to be used (default: none): mem | cpu | block`).\n\t\t\tDefault(\"\").\n\t\t\tEnvar(\"GUBLE_PROFILE\").\n\t\t\tEnum(\"mem\", \"cpu\", \"block\", \"\"),\n\t\tPostgres: PostgresConfig{\n\t\t\tHost: kingpin.Flag(\"pg-host\", \"The PostgreSQL hostname\").\n\t\t\t\tDefault(\"localhost\").\n\t\t\t\tEnvar(\"GUBLE_PG_HOST\").\n\t\t\t\tString(),\n\t\t\tPort: kingpin.Flag(\"pg-port\", \"The PostgreSQL port\").\n\t\t\t\tDefault(\"5432\").\n\t\t\t\tEnvar(\"GUBLE_PG_PORT\").\n\t\t\t\tInt(),\n\t\t\tUser: kingpin.Flag(\"pg-user\", \"The PostgreSQL user\").\n\t\t\t\tDefault(\"guble\").\n\t\t\t\tEnvar(\"GUBLE_PG_USER\").\n\t\t\t\tString(),\n\t\t\tPassword: kingpin.Flag(\"pg-password\", \"The PostgreSQL password\").\n\t\t\t\tDefault(\"guble\").\n\t\t\t\tEnvar(\"GUBLE_PG_PASSWORD\").\n\t\t\t\tString(),\n\t\t\tDbName: kingpin.Flag(\"pg-dbname\", \"The PostgreSQL database name\").\n\t\t\t\tDefault(\"guble\").\n\t\t\t\tEnvar(\"GUBLE_PG_DBNAME\").\n\t\t\t\tString(),\n\t\t},\n\t\tFCM: fcm.Config{\n\t\t\tEnabled: kingpin.Flag(\"fcm\", \"Enable the Google Firebase Cloud Messaging connector\").\n\t\t\t\tEnvar(\"GUBLE_FCM\").\n\t\t\t\tBool(),\n\t\t\tAPIKey: kingpin.Flag(\"fcm-api-key\", \"The Google API Key for Google Firebase Cloud Messaging\").\n\t\t\t\tEnvar(\"GUBLE_FCM_API_KEY\").\n\t\t\t\tString(),\n\t\t\tWorkers: kingpin.Flag(\"fcm-workers\", \"The number of workers handling traffic with Firebase Cloud Messaging (default: number of CPUs)\").\n\t\t\t\tDefault(strconv.Itoa(runtime.NumCPU())).\n\t\t\t\tEnvar(\"GUBLE_FCM_WORKERS\").\n\t\t\t\tInt(),\n\t\t\tEndpoint: kingpin.Flag(\"fcm-endpoint\", \"The Google Firebase Cloud Messaging endpoint\").\n\t\t\t\tDefault(defaultFCMEndpoint).\n\t\t\t\tEnvar(\"GUBLE_FCM_ENDPOINT\").\n\t\t\t\tString(),\n\t\t\tPrefix: kingpin.Flag(\"fcm-prefix\", \"The FCM prefix / endpoint\").\n\t\t\t\tEnvar(\"GUBLE_FCM_PREFIX\").\n\t\t\t\tDefault(\"/fcm/\").\n\t\t\t\tString(),\n\t\t\tIntervalMetrics: &defaultFCMMetrics,\n\t\t},\n\t\tAPNS: apns.Config{\n\t\t\tEnabled: kingpin.Flag(\"apns\", \"Enable the APNS connector (by default, in Development mode)\").\n\t\t\t\tEnvar(\"GUBLE_APNS\").\n\t\t\t\tBool(),\n\t\t\tProduction: kingpin.Flag(\"apns-production\", \"Enable the APNS connector in Production mode\").\n\t\t\t\tEnvar(\"GUBLE_APNS_PRODUCTION\").\n\t\t\t\tBool(),\n\t\t\tCertificateFileName: kingpin.Flag(\"apns-cert-file\", \"The APNS certificate file name\").\n\t\t\t\tEnvar(\"GUBLE_APNS_CERT_FILE\").\n\t\t\t\tString(),\n\t\t\tCertificateBytes: kingpin.Flag(\"apns-cert-bytes\", \"The APNS certificate bytes, as a string of hex-values\").\n\t\t\t\tEnvar(\"GUBLE_APNS_CERT_BYTES\").\n\t\t\t\tHexBytes(),\n\t\t\tCertificatePassword: kingpin.Flag(\"apns-cert-password\", \"The APNS certificate password\").\n\t\t\t\tEnvar(\"GUBLE_APNS_CERT_PASSWORD\").\n\t\t\t\tString(),\n\t\t\tAppTopic: kingpin.Flag(\"apns-app-topic\", \"The APNS topic (as used by the mobile application)\").\n\t\t\t\tEnvar(\"GUBLE_APNS_APP_TOPIC\").\n\t\t\t\tString(),\n\t\t\tPrefix: kingpin.Flag(\"apns-prefix\", \"The APNS prefix / endpoint\").\n\t\t\t\tEnvar(\"GUBLE_APNS_PREFIX\").\n\t\t\t\tDefault(\"/apns/\").\n\t\t\t\tString(),\n\t\t\tWorkers: kingpin.Flag(\"apns-workers\", \"The number of workers handling traffic with APNS (default: number of CPUs)\").\n\t\t\t\tDefault(strconv.Itoa(runtime.NumCPU())).\n\t\t\t\tEnvar(\"GUBLE_APNS_WORKERS\").\n\t\t\t\tInt(),\n\t\t\tIntervalMetrics: &defaultAPNSMetrics,\n\t\t},\n\t\tCluster: ClusterConfig{\n\t\t\tNodeID: kingpin.Flag(\"node-id\", \"(cluster mode) This guble node's own ID: a strictly positive integer number which must be unique in cluster\").\n\t\t\t\tEnvar(\"GUBLE_NODE_ID\").Uint8(),\n\t\t\tNodePort: kingpin.Flag(\"node-port\", \"(cluster mode) This guble node's own local port: a strictly positive integer number\").\n\t\t\t\tDefault(defaultNodePort).Envar(\"GUBLE_NODE_PORT\").Int(),\n\t\t\tRemotes: tcpAddrListParser(kingpin.Flag(\"remotes\", `(cluster mode) The list of TCP addresses of some other guble nodes (format: \"IP:port\")`).\n\t\t\t\tEnvar(\"GUBLE_NODE_REMOTES\")),\n\t\t},\n\t\tSMS: sms.Config{\n\t\t\tEnabled: kingpin.Flag(\"sms\", \"Enable the  SMS  gateway)\").\n\t\t\t\tEnvar(\"GUBLE_SMS\").\n\t\t\t\tBool(),\n\t\t\tAPIKey: kingpin.Flag(\"sms-api-key\", \"The Nexmo API Key for Sending sms\").\n\t\t\t\tEnvar(\"GUBLE_SMS_API_KEY\").\n\t\t\t\tString(),\n\t\t\tAPISecret: kingpin.Flag(\"sms-api-secret\", \"The Nexmo API Secret for Sending sms\").\n\t\t\t\tEnvar(\"GUBLE_SMS_API_SECRET\").\n\t\t\t\tString(),\n\t\t\tSMSTopic: kingpin.Flag(\"sms-topic\", \"The topic for sms route\").\n\t\t\t\tEnvar(\"GUBLE_SMS_TOPIC\").\n\t\t\t\tDefault(sms.SMSDefaultTopic).\n\t\t\t\tString(),\n\n\t\t\tWorkers: kingpin.Flag(\"sms-workers\", \"The number of workers handling traffic with Nexmo sms endpoint(default: number of CPUs)\").\n\t\t\t\tDefault(strconv.Itoa(runtime.NumCPU())).\n\t\t\t\tEnvar(\"GUBLE_SMS_WORKERS\").\n\t\t\t\tInt(),\n\t\t\tIntervalMetrics: &defaultSMSMetrics,\n\t\t},\n\t}\n)\n\nfunc logLevels() (levels []string) {\n\tfor _, level := range log.AllLevels {\n\t\tlevels = append(levels, level.String())\n\t}\n\treturn\n}\n\n// parseConfig parses the flags from command line. Must be used before accessing the config.\n// If there are missing or invalid arguments it will exit the application\n// and display a message.\nfunc parseConfig() {\n\tif parsed {\n\t\treturn\n\t}\n\tkingpin.Parse()\n\tparsed = true\n\treturn\n}\n\ntype tcpAddrList []*net.TCPAddr\n\nfunc (h *tcpAddrList) Set(value string) error {\n\taddresses := strings.Split(value, \" \")\n\n\t// Reset the list also, when running tests we add to the same list and is incorrect\n\t*h = make(tcpAddrList, 0)\n\tfor _, addr := range addresses {\n\t\tlogger.WithField(\"addr\", addr).Info(\"value\")\n\t\tparts := strings.SplitN(addr, \":\", 2)\n\t\tif len(parts) != 2 {\n\t\t\treturn fmt.Errorf(\"expected HEADER:VALUE got '%s'\", addr)\n\t\t}\n\t\taddr, err := net.ResolveTCPAddr(\"tcp\", addr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*h = append(*h, addr)\n\t}\n\treturn nil\n}\n\nfunc tcpAddrListParser(s kingpin.Settings) (target *tcpAddrList) {\n\tslist := make(tcpAddrList, 0)\n\ts.SetValue(&slist)\n\treturn &slist\n}\n\nfunc (h *tcpAddrList) String() string {\n\treturn \"\"\n}\n"
  },
  {
    "path": "server/config_test.go",
    "content": "package server\n\nimport (\n\t\"github.com/stretchr/testify/assert\"\n\t\"net\"\n\t\"os\"\n\t\"testing\"\n)\n\nfunc TestParsingOfEnvironmentVariables(t *testing.T) {\n\ta := assert.New(t)\n\n\toriginalArgs := os.Args\n\tos.Args = []string{os.Args[0]}\n\tdefer func() { os.Args = originalArgs }()\n\n\t// given: some environment variables\n\tos.Setenv(\"GUBLE_HTTP_LISTEN\", \"http_listen\")\n\tdefer os.Unsetenv(\"GUBLE_HTTP_LISTEN\")\n\n\tos.Setenv(\"GUBLE_LOG\", \"debug\")\n\tdefer os.Unsetenv(\"GUBLE_LOG\")\n\n\tos.Setenv(\"GUBLE_ENV\", \"dev\")\n\tdefer os.Unsetenv(\"GUBLE_ENV\")\n\n\tos.Setenv(\"GUBLE_PROFILE\", \"mem\")\n\tdefer os.Unsetenv(\"GUBLE_PROFILE\")\n\n\tos.Setenv(\"GUBLE_KVS\", \"kvs-backend\")\n\tdefer os.Unsetenv(\"GUBLE_KVS\")\n\n\tos.Setenv(\"GUBLE_STORAGE_PATH\", os.TempDir())\n\tdefer os.Unsetenv(\"GUBLE_STORAGE_PATH\")\n\n\tos.Setenv(\"GUBLE_HEALTH_ENDPOINT\", \"health_endpoint\")\n\tdefer os.Unsetenv(\"GUBLE_HEALTH_ENDPOINT\")\n\n\tos.Setenv(\"GUBLE_METRICS_ENDPOINT\", \"metrics_endpoint\")\n\tdefer os.Unsetenv(\"GUBLE_METRICS_ENDPOINT\")\n\n\tos.Setenv(\"GUBLE_MS\", \"ms-backend\")\n\tdefer os.Unsetenv(\"GUBLE_MS\")\n\n\tos.Setenv(\"GUBLE_FCM\", \"true\")\n\tdefer os.Unsetenv(\"GUBLE_FCM\")\n\n\tos.Setenv(\"GUBLE_FCM_API_KEY\", \"fcm-api-key\")\n\tdefer os.Unsetenv(\"GUBLE_FCM_API_KEY\")\n\n\tos.Setenv(\"GUBLE_FCM_WORKERS\", \"3\")\n\tdefer os.Unsetenv(\"GUBLE_FCM_WORKERS\")\n\n\tos.Setenv(\"GUBLE_APNS\", \"true\")\n\tdefer os.Unsetenv(\"GUBLE_APNS\")\n\n\tos.Setenv(\"GUBLE_APNS_PRODUCTION\", \"true\")\n\tdefer os.Unsetenv(\"GUBLE_APNS_PRODUCTION\")\n\n\tos.Setenv(\"GUBLE_APNS_CERT_BYTES\", \"00ff\")\n\tdefer os.Unsetenv(\"GUBLE_APNS_CERT_BYTES\")\n\n\tos.Setenv(\"GUBLE_APNS_CERT_PASSWORD\", \"rotten\")\n\tdefer os.Unsetenv(\"GUBLE_APNS_CERT_PASSWORD\")\n\n\tos.Setenv(\"GUBLE_APNS_APP_TOPIC\", \"com.myapp\")\n\tdefer os.Unsetenv(\"GUBLE_APNS_APP_TOPIC\")\n\n\tos.Setenv(\"GUBLE_NODE_ID\", \"1\")\n\tdefer os.Unsetenv(\"GUBLE_NODE_ID\")\n\n\tos.Setenv(\"GUBLE_NODE_PORT\", \"10000\")\n\tdefer os.Unsetenv(\"GUBLE_NODE_PORT\")\n\n\tos.Setenv(\"GUBLE_PG_HOST\", \"pg-host\")\n\tdefer os.Unsetenv(\"GUBLE_PG_HOST\")\n\n\tos.Setenv(\"GUBLE_PG_PORT\", \"5432\")\n\tdefer os.Unsetenv(\"GUBLE_PG_PORT\")\n\n\tos.Setenv(\"GUBLE_PG_USER\", \"pg-user\")\n\tdefer os.Unsetenv(\"GUBLE_PG_USER\")\n\n\tos.Setenv(\"GUBLE_PG_PASSWORD\", \"pg-password\")\n\tdefer os.Unsetenv(\"GUBLE_PG_PASSWORD\")\n\n\tos.Setenv(\"GUBLE_PG_DBNAME\", \"pg-dbname\")\n\tdefer os.Unsetenv(\"GUBLE_PG_DBNAME\")\n\n\tos.Setenv(\"GUBLE_NODE_REMOTES\", \"127.0.0.1:8080 127.0.0.1:20002\")\n\tdefer os.Unsetenv(\"GUBLE_NODE_REMOTES\")\n\n\t// when we parse the arguments from environment variables\n\tparseConfig()\n\n\t// then the parsed parameters are correctly set\n\tassertArguments(a)\n}\n\nfunc TestParsingArgs(t *testing.T) {\n\ta := assert.New(t)\n\n\toriginalArgs := os.Args\n\n\tdefer func() { os.Args = originalArgs }()\n\n\t// given: a command line\n\tos.Args = []string{os.Args[0],\n\t\t\"--http\", \"http_listen\",\n\t\t\"--env\", \"dev\",\n\t\t\"--log\", \"debug\",\n\t\t\"--profile\", \"mem\",\n\t\t\"--storage-path\", os.TempDir(),\n\t\t\"--kvs\", \"kvs-backend\",\n\t\t\"--ms\", \"ms-backend\",\n\t\t\"--health-endpoint\", \"health_endpoint\",\n\t\t\"--metrics-endpoint\", \"metrics_endpoint\",\n\t\t\"--fcm\",\n\t\t\"--fcm-api-key\", \"fcm-api-key\",\n\t\t\"--fcm-workers\", \"3\",\n\t\t\"--apns\",\n\t\t\"--apns-production\",\n\t\t\"--apns-cert-bytes\", \"00ff\",\n\t\t\"--apns-cert-password\", \"rotten\",\n\t\t\"--apns-app-topic\", \"com.myapp\",\n\t\t\"--node-id\", \"1\",\n\t\t\"--node-port\", \"10000\",\n\t\t\"--pg-host\", \"pg-host\",\n\t\t\"--pg-port\", \"5432\",\n\t\t\"--pg-user\", \"pg-user\",\n\t\t\"--pg-password\", \"pg-password\",\n\t\t\"--pg-dbname\", \"pg-dbname\",\n\t\t\"--remotes\", \"127.0.0.1:8080 127.0.0.1:20002\",\n\t}\n\n\t// when we parse the arguments from command-line flags\n\tparseConfig()\n\n\t// then the parsed parameters are correctly set\n\tassertArguments(a)\n}\n\nfunc assertArguments(a *assert.Assertions) {\n\ta.Equal(\"http_listen\", *Config.HttpListen)\n\ta.Equal(\"kvs-backend\", *Config.KVS)\n\ta.Equal(os.TempDir(), *Config.StoragePath)\n\ta.Equal(\"ms-backend\", *Config.MS)\n\ta.Equal(\"health_endpoint\", *Config.HealthEndpoint)\n\n\ta.Equal(\"metrics_endpoint\", *Config.MetricsEndpoint)\n\n\ta.Equal(true, *Config.FCM.Enabled)\n\ta.Equal(\"fcm-api-key\", *Config.FCM.APIKey)\n\ta.Equal(3, *Config.FCM.Workers)\n\n\ta.Equal(true, *Config.APNS.Enabled)\n\ta.Equal(true, *Config.APNS.Production)\n\ta.Equal([]byte{0, 255}, *Config.APNS.CertificateBytes)\n\ta.Equal(\"rotten\", *Config.APNS.CertificatePassword)\n\ta.Equal(\"com.myapp\", *Config.APNS.AppTopic)\n\n\ta.Equal(uint8(1), *Config.Cluster.NodeID)\n\ta.Equal(10000, *Config.Cluster.NodePort)\n\n\ta.Equal(\"pg-host\", *Config.Postgres.Host)\n\ta.Equal(5432, *Config.Postgres.Port)\n\ta.Equal(\"pg-user\", *Config.Postgres.User)\n\ta.Equal(\"pg-password\", *Config.Postgres.Password)\n\ta.Equal(\"pg-dbname\", *Config.Postgres.DbName)\n\n\ta.Equal(\"debug\", *Config.Log)\n\ta.Equal(\"dev\", *Config.EnvName)\n\ta.Equal(\"mem\", *Config.Profile)\n\n\tassertClusterRemotes(a)\n}\n\nfunc assertClusterRemotes(a *assert.Assertions) {\n\tip1, _ := net.ResolveTCPAddr(\"tcp\", \"127.0.0.1:8080\")\n\tip2, _ := net.ResolveTCPAddr(\"tcp\", \"127.0.0.1:20002\")\n\tipList := make(tcpAddrList, 0)\n\tipList = append(ipList, ip1)\n\tipList = append(ipList, ip2)\n\ta.Equal(ipList, *Config.Cluster.Remotes)\n}\n"
  },
  {
    "path": "server/connector/connector.go",
    "content": "package connector\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"sync\"\n\t\"time\"\n\n\tlog \"github.com/Sirupsen/logrus\"\n\t\"github.com/gorilla/mux\"\n\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/router\"\n\t\"github.com/smancke/guble/server/service\"\n)\n\nconst (\n\tDefaultWorkers = 1\n\tSubstitutePath = \"/substitute/\"\n)\n\nvar (\n\tTopicParam     = \"topic\"\n\tConnectorParam = \"connector\"\n)\n\ntype Sender interface {\n\t// Send takes a Request and returns the response or error\n\tSend(Request) (interface{}, error)\n}\n\ntype SenderSetter interface {\n\tSender() Sender\n\tSetSender(Sender)\n}\n\ntype Metadata struct {\n\tLatency time.Duration\n}\n\ntype ResponseHandler interface {\n\t// HandleResponse handles the response+error (returned by a Sender)\n\tHandleResponse(Request, interface{}, *Metadata, error) error\n}\n\ntype ResponseHandlerSetter interface {\n\tResponseHandler() ResponseHandler\n\tSetResponseHandler(ResponseHandler)\n}\n\ntype Runner interface {\n\tRun(Subscriber)\n}\n\ntype Connector interface {\n\tservice.Startable\n\tservice.Stopable\n\tservice.Endpoint\n\tSenderSetter\n\tResponseHandlerSetter\n\tRunner\n\tManager() Manager\n\tContext() context.Context\n}\n\ntype ResponsiveConnector interface {\n\tConnector\n\tResponseHandler\n}\n\ntype connector struct {\n\tconfig  Config\n\tsender  Sender\n\thandler ResponseHandler\n\tmanager Manager\n\tqueue   Queue\n\trouter  router.Router\n\n\tmux *mux.Router\n\n\tctx    context.Context\n\tcancel context.CancelFunc\n\n\tlogger *log.Entry\n\twg     sync.WaitGroup\n}\n\ntype Config struct {\n\tName       string\n\tSchema     string\n\tPrefix     string\n\tURLPattern string\n\tWorkers    int\n}\n\nfunc NewConnector(router router.Router, sender Sender, config Config) (Connector, error) {\n\tkvs, err := router.KVStore()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif config.Workers <= 0 {\n\t\tconfig.Workers = DefaultWorkers\n\t}\n\n\tc := &connector{\n\t\tconfig:  config,\n\t\tsender:  sender,\n\t\tmanager: NewManager(config.Schema, kvs),\n\t\tqueue:   NewQueue(sender, config.Workers),\n\t\trouter:  router,\n\t\tlogger:  logger.WithField(\"name\", config.Name),\n\t}\n\tc.initMuxRouter()\n\treturn c, nil\n}\n\nfunc (c *connector) initMuxRouter() {\n\tmuxRouter := mux.NewRouter()\n\n\tbaseRouter := muxRouter.PathPrefix(c.GetPrefix()).Subrouter()\n\tbaseRouter.Methods(http.MethodGet).HandlerFunc(c.GetList)\n\tbaseRouter.Methods(http.MethodPost).PathPrefix(SubstitutePath).HandlerFunc(c.Substitute)\n\n\tsubRouter := baseRouter.Path(c.config.URLPattern).Subrouter()\n\tsubRouter.Methods(http.MethodPost).HandlerFunc(c.Post)\n\tsubRouter.Methods(http.MethodDelete).HandlerFunc(c.Delete)\n\tc.mux = muxRouter\n}\n\nfunc (c *connector) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tc.logger.WithFields(log.Fields{\n\t\t\"path\": req.URL.RequestURI(),\n\t}).Info(\"Handling HTTP request\")\n\tc.mux.ServeHTTP(w, req)\n\n}\n\nfunc (c *connector) GetPrefix() string {\n\treturn c.config.Prefix\n}\n\n// GetList returns list of subscribers\nfunc (c *connector) GetList(w http.ResponseWriter, req *http.Request) {\n\tquery := req.URL.Query()\n\tfilters := make(map[string]string, len(query))\n\n\tfor key, value := range query {\n\t\tif len(value) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tfilters[key] = value[0]\n\t}\n\n\tc.logger.WithField(\"filters\", filters).Info(\"Get list of subscriptions\")\n\tif len(filters) == 0 {\n\t\thttp.Error(w, `{\"error\":\"Missing filters\"}`, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tsubscribers := c.manager.Filter(filters)\n\ttopics := make([]string, 0, len(subscribers))\n\tfor _, s := range subscribers {\n\t\ttopics = append(topics, s.Route().Path.RemovePrefixSlash())\n\t}\n\n\tencoder := json.NewEncoder(w)\n\terr := encoder.Encode(topics)\n\tif err != nil {\n\t\thttp.Error(w, \"Error encoding data.\", http.StatusInternalServerError)\n\t\tc.logger.WithField(\"error\", err.Error()).Error(\"Error encoding data.\")\n\t\treturn\n\t}\n}\n\n// Post creates a new subscriber\nfunc (c *connector) Post(w http.ResponseWriter, req *http.Request) {\n\tparams := mux.Vars(req)\n\tc.logger.WithField(\"params\", params).Info(\"POST subscription\")\n\ttopic, ok := params[TopicParam]\n\tif !ok {\n\t\tfmt.Fprintf(w, \"Missing topic parameter.\")\n\t\treturn\n\t}\n\tdelete(params, TopicParam)\n\tparams[ConnectorParam] = c.config.Name\n\tc.logger.WithField(\"params\", params).WithField(\"topic\", topic).Info(\"Creating subscription\")\n\tsubscriber, err := c.manager.Create(protocol.Path(\"/\"+topic), params)\n\tif err != nil {\n\t\tif err == ErrSubscriberExists {\n\t\t\tfmt.Fprintf(w, `{\"error\":\"subscription already exists\"}`)\n\t\t} else {\n\t\t\thttp.Error(w, fmt.Sprintf(`{\"error\":\"unknown error: %s\"}`, err.Error()), http.StatusInternalServerError)\n\t\t}\n\t\treturn\n\t}\n\tgo c.Run(subscriber)\n\tc.logger.WithField(\"topic\", topic).Info(\"Subscription created\")\n\tfmt.Fprintf(w, `{\"subscribed\":\"/%v\"}`, topic)\n}\n\n// Delete removes a subscriber\nfunc (c *connector) Delete(w http.ResponseWriter, req *http.Request) {\n\tparams := mux.Vars(req)\n\tc.logger.WithField(\"params\", params).Info(\"DELETE subscription\")\n\ttopic, ok := params[TopicParam]\n\tif !ok {\n\t\tfmt.Fprintf(w, \"Missing topic parameter.\")\n\t\treturn\n\t}\n\tdelete(params, TopicParam)\n\tparams[ConnectorParam] = c.config.Name\n\tc.logger.WithField(\"params\", params).WithField(\"topic\", topic).Info(\"Finding subscription to delete it\")\n\tsubscriber := c.manager.Find(GenerateKey(\"/\"+topic, params))\n\tif subscriber == nil {\n\t\thttp.Error(w, `{\"error\":\"subscription not found\"}`, http.StatusNotFound)\n\t\treturn\n\t}\n\tc.logger.WithField(\"params\", params).WithField(\"topic\", topic).Info(\"Deleting subscription\")\n\terr := c.manager.Remove(subscriber)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(`{\"error\":\"unknown error: %s\"}`, err.Error()), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tfmt.Fprintf(w, `{\"unsubscribed\":\"/%v\"}`, topic)\n}\n\nfunc (c *connector) Substitute(w http.ResponseWriter, req *http.Request) {\n\ts := new(substitution)\n\terr := json.NewDecoder(req.Body).Decode(&s)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(`{\"error\":\"json body could not be decoded: %s\"}`, err.Error()), http.StatusBadRequest)\n\t\treturn\n\t}\n\tif !s.isValid() {\n\t\thttp.Error(w, `{\"error\":\"not all required values were supplied\"}`, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tfilters := map[string]string{}\n\tfilters[s.FieldName] = s.OldValue\n\tsubscribers := c.manager.Filter(filters)\n\ttotalSubscribersUpdated := 0\n\tfor _, sub := range subscribers {\n\t\tsub.Route().Set(s.FieldName, s.NewValue)\n\t\terr = c.manager.Update(sub)\n\t\tif err != nil {\n\t\t\thttp.Error(w, fmt.Sprintf(`{\"error\":\"%s\"}`, err.Error()), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\ttotalSubscribersUpdated++\n\t}\n\n\tc.logger.WithField(\"subscribers\", subscribers).WithField(\"req\", s).Info(\"Substituted subscriber info \")\n\tfmt.Fprintf(w, `{\"modified\":\"%d\"}`, totalSubscribersUpdated)\n}\n\n// Start will run start all current subscriptions and workers to process the messages\nfunc (c *connector) Start() error {\n\tc.queue.Start()\n\n\tc.logger.Info(\"Starting connector\")\n\tc.ctx, c.cancel = context.WithCancel(context.Background())\n\n\tc.logger.Info(\"Loading subscriptions\")\n\terr := c.manager.Load()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.logger.Info(\"Starting subscriptions\")\n\tfor _, s := range c.manager.List() {\n\t\tgo c.Run(s)\n\t}\n\n\tc.logger.Info(\"Started connector\")\n\treturn nil\n}\n\nfunc (c *connector) Run(s Subscriber) {\n\tc.wg.Add(1)\n\tdefer c.wg.Done()\n\n\tvar provideErr error\n\tgo func() {\n\t\terr := s.Route().Provide(c.router, true)\n\t\tif err != nil {\n\t\t\t// cancel subscription loop if there is an error on the provider\n\t\t\tprovideErr = err\n\t\t\ts.Cancel()\n\t\t}\n\t}()\n\n\terr := s.Loop(c.ctx, c.queue)\n\tif err != nil && provideErr == nil {\n\t\tc.logger.WithField(\"error\", err.Error()).Error(\"Error returned by subscriber loop\")\n\t\t// if context cancelled loop then unsubscribe the route from router\n\t\t// in case it's been subscribed\n\t\tif err == context.Canceled {\n\t\t\tc.router.Unsubscribe(s.Route())\n\t\t\treturn\n\t\t}\n\n\t\t// If Route channel closed try restarting\n\t\tif err == ErrRouteChannelClosed {\n\t\t\tc.restart(s)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif provideErr != nil {\n\t\t// TODO Bogdan Treat errors where a subscription provide fails\n\t\tc.logger.WithField(\"error\", provideErr.Error()).Error(\"Route provide error\")\n\n\t\t// Router closed the route, try restart\n\t\tif provideErr == router.ErrInvalidRoute {\n\t\t\tc.restart(s)\n\t\t\treturn\n\t\t}\n\t\t// Router module is stopping, exit the process\n\t\tif _, ok := provideErr.(*router.ModuleStoppingError); ok {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (c *connector) restart(s Subscriber) error {\n\ts.Cancel()\n\terr := s.Reset()\n\tif err != nil {\n\t\tc.logger.WithField(\"err\", err.Error()).Error(\"Error reseting subscriber\")\n\t\treturn err\n\t}\n\tgo c.Run(s)\n\treturn nil\n}\n\n// Stop the connector (the context, the queue, the subscription loops)\nfunc (c *connector) Stop() error {\n\tc.logger.Info(\"Stopping connector\")\n\tc.cancel()\n\tc.queue.Stop()\n\tc.wg.Wait()\n\tc.logger.Info(\"Stopped connector\")\n\treturn nil\n}\n\nfunc (c *connector) Manager() Manager {\n\treturn c.manager\n}\n\nfunc (c *connector) Context() context.Context {\n\treturn c.ctx\n}\n\nfunc (c *connector) ResponseHandler() ResponseHandler {\n\treturn c.handler\n}\n\nfunc (c *connector) SetResponseHandler(handler ResponseHandler) {\n\tc.handler = handler\n\tc.queue.SetResponseHandler(handler)\n}\n\nfunc (c *connector) Sender() Sender {\n\treturn c.sender\n}\n\nfunc (c *connector) SetSender(s Sender) {\n\tc.sender = s\n\tc.queue.SetSender(s)\n}\n"
  },
  {
    "path": "server/connector/connector_test.go",
    "content": "package connector\n\nimport (\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/router\"\n\t\"github.com/smancke/guble/testutil\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\ntype connectorMocks struct {\n\trouter  *MockRouter\n\tsender  *MockSender\n\tqueue   *MockQueue\n\tmanager *MockManager\n\tkvstore *MockKVStore\n}\n\n// Ensure the subscription is started when posting\nfunc TestConnector_PostSubscription(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\ta := assert.New(t)\n\n\trecorder := httptest.NewRecorder()\n\tconn, mocks := getTestConnector(t, Config{\n\t\tName:       \"test\",\n\t\tSchema:     \"test\",\n\t\tPrefix:     \"/connector/\",\n\t\tURLPattern: \"/{device_token}/{user_id}/{topic:.*}\",\n\t}, true, false)\n\n\tmocks.manager.EXPECT().Load().Return(nil)\n\tmocks.manager.EXPECT().List().Return(make([]Subscriber, 0))\n\terr := conn.Start()\n\ta.NoError(err)\n\tdefer conn.Stop()\n\n\tsubscriber := NewMockSubscriber(testutil.MockCtrl)\n\tmocks.manager.EXPECT().Create(gomock.Eq(protocol.Path(\"/topic1\")), gomock.Eq(router.RouteParams{\n\t\t\"device_token\": \"device1\",\n\t\t\"user_id\":      \"user1\",\n\t\t\"connector\":    \"test\",\n\t})).Return(subscriber, nil)\n\n\tsubscriber.EXPECT().Loop(gomock.Any(), gomock.Any())\n\tr := router.NewRoute(router.RouteConfig{\n\t\tPath: protocol.Path(\"topic1\"),\n\t\tRouteParams: router.RouteParams{\n\t\t\t\"device_token\": \"device1\",\n\t\t\t\"user_id\":      \"user1\",\n\t\t},\n\t})\n\tsubscriber.EXPECT().Route().Return(r)\n\tmocks.router.EXPECT().Subscribe(gomock.Eq(r)).Return(r, nil)\n\n\treq, err := http.NewRequest(http.MethodPost, \"/connector/device1/user1/topic1\", strings.NewReader(\"\"))\n\ta.NoError(err)\n\tconn.ServeHTTP(recorder, req)\n\ta.Equal(`{\"subscribed\":\"/topic1\"}`, recorder.Body.String())\n\ttime.Sleep(100 * time.Millisecond)\n}\n\nfunc TestConnector_PostSubscriptionNoMocks(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\ta := assert.New(t)\n\n\trecorder := httptest.NewRecorder()\n\tconn, mocks := getTestConnector(t, Config{\n\t\tName:       \"name\",\n\t\tSchema:     \"schema\",\n\t\tPrefix:     \"/connector/\",\n\t\tURLPattern: \"/{device_token}/{user_id}/{topic:.*}\",\n\t}, false, false)\n\n\tentriesC := make(chan [2]string)\n\tmocks.kvstore.EXPECT().Iterate(gomock.Eq(\"schema\"), gomock.Eq(\"\")).Return(entriesC)\n\tclose(entriesC)\n\n\tmocks.kvstore.EXPECT().Put(gomock.Eq(\"schema\"), gomock.Eq(GenerateKey(\"/topic1\", map[string]string{\n\t\t\"device_token\": \"device1\",\n\t\t\"user_id\":      \"user1\",\n\t\t\"connector\":    \"name\",\n\t})), gomock.Any())\n\n\tmocks.router.EXPECT().Subscribe(gomock.Any())\n\n\terr := conn.Start()\n\ta.NoError(err)\n\tdefer conn.Stop()\n\n\treq, err := http.NewRequest(http.MethodPost, \"/connector/device1/user1/topic1\", strings.NewReader(\"\"))\n\ta.NoError(err)\n\tconn.ServeHTTP(recorder, req)\n\ta.Equal(`{\"subscribed\":\"/topic1\"}`, recorder.Body.String())\n\ttime.Sleep(100 * time.Millisecond)\n}\n\nfunc TestConnector_DeleteSubscription(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\ta := assert.New(t)\n\n\trecorder := httptest.NewRecorder()\n\tconn, mocks := getTestConnector(t, Config{\n\t\tName:       \"name\",\n\t\tSchema:     \"schema\",\n\t\tPrefix:     \"/connector/\",\n\t\tURLPattern: \"/{device_token}/{user_id}/{topic:.*}\",\n\t}, true, false)\n\n\tsubscriber := NewMockSubscriber(testutil.MockCtrl)\n\tmocks.manager.EXPECT().Find(gomock.Eq(GenerateKey(\"/topic1\", map[string]string{\n\t\t\"device_token\": \"device1\",\n\t\t\"user_id\":      \"user1\",\n\t\t\"connector\":    \"name\",\n\t}))).Return(subscriber)\n\tmocks.manager.EXPECT().Remove(subscriber).Return(nil)\n\n\treq, err := http.NewRequest(http.MethodDelete, \"/connector/device1/user1/topic1\", strings.NewReader(\"\"))\n\ta.NoError(err)\n\tconn.ServeHTTP(recorder, req)\n\ta.Equal(`{\"unsubscribed\":\"/topic1\"}`, recorder.Body.String())\n\ttime.Sleep(200 * time.Millisecond)\n}\n\nfunc TestConnector_GetList_And_Getters(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\ta := assert.New(t)\n\n\trecorder := httptest.NewRecorder()\n\tconn, mocks := getTestConnector(t, Config{\n\t\tName:       \"test\",\n\t\tSchema:     \"test\",\n\t\tPrefix:     \"/connector/\",\n\t\tURLPattern: \"/{device_token}/{user_id}/{topic:.*}\",\n\t}, true, false)\n\n\treq, err := http.NewRequest(http.MethodGet, \"/connector/\", strings.NewReader(\"\"))\n\ta.NoError(err)\n\n\tconn.ServeHTTP(recorder, req)\n\texpectedJSON := `{\"error\":\"Missing filters\"}`\n\ta.JSONEq(expectedJSON, recorder.Body.String())\n\ta.Equal(http.StatusBadRequest, recorder.Code)\n\n\ta.Equal(\"/connector/\", conn.GetPrefix())\n\ta.Equal(mocks.manager, conn.Manager())\n\ta.Equal(nil, conn.ResponseHandler())\n}\n\nfunc TestConnector_GetListWithFilters(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\ta := assert.New(t)\n\n\trecorder := httptest.NewRecorder()\n\tconn, mocks := getTestConnector(t, Config{\n\t\tName:       \"test\",\n\t\tSchema:     \"test\",\n\t\tPrefix:     \"/connector/\",\n\t\tURLPattern: \"/{device_token}/{user_id}/{topic:.*}\",\n\t}, true, false)\n\n\tmocks.manager.EXPECT().Filter(gomock.Eq(map[string]string{\n\t\t\"filter1\": \"value1\",\n\t\t\"filter2\": \"value2\",\n\t})).Return([]Subscriber{})\n\n\treq, err := http.NewRequest(\n\t\thttp.MethodGet,\n\t\t\"/connector/?filter1=value1&filter2=value2\",\n\t\tstrings.NewReader(\"\"))\n\ta.NoError(err)\n\n\tconn.ServeHTTP(recorder, req)\n}\n\nfunc TestConnector_StartWithSubscriptions(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\ta := assert.New(t)\n\tconn, mocks := getTestConnector(t, Config{\n\t\tName:       \"test\",\n\t\tSchema:     \"test\",\n\t\tPrefix:     \"/connector/\",\n\t\tURLPattern: \"/{device_token}/{user_id}/{topic:.*}\",\n\t}, false, false)\n\n\tentriesC := make(chan [2]string)\n\tmocks.kvstore.EXPECT().Iterate(gomock.Eq(\"test\"), gomock.Eq(\"\")).Return(entriesC)\n\tclose(entriesC)\n\tmocks.kvstore.EXPECT().Put(gomock.Any(), gomock.Any(), gomock.Any()).Times(4)\n\n\terr := conn.Start()\n\ta.NoError(err)\n\n\troutes := make([]*router.Route, 0, 4)\n\tmocks.router.EXPECT().Subscribe(gomock.Any()).Do(func(r *router.Route) (*router.Route, error) {\n\t\troutes = append(routes, r)\n\t\treturn r, nil\n\t}).Times(4)\n\n\t// create subscriptions\n\tcreateSubscriptions(t, conn, 4)\n\ttime.Sleep(100 * time.Millisecond)\n\n\tmocks.sender.EXPECT().Send(gomock.Any()).Return(nil, nil).Times(4)\n\n\t// send message in route channel\n\tfor i, r := range routes {\n\t\tr.Deliver(&protocol.Message{\n\t\t\tID:   uint64(i),\n\t\t\tPath: protocol.Path(\"/topic\"),\n\t\t\tBody: []byte(\"test body\"),\n\t\t}, true)\n\t}\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\terr = conn.Stop()\n\ta.NoError(err)\n}\n\nfunc TestConnector_Substitute(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\ta := assert.New(t)\n\tconn, mocks := getTestConnector(t, Config{\n\t\tName:       \"test\",\n\t\tSchema:     \"test\",\n\t\tPrefix:     \"/connector/\",\n\t\tURLPattern: \"/{device_token}/{user_id}/{topic:.*}\",\n\t}, false, false)\n\n\tentriesC := make(chan [2]string)\n\tmocks.kvstore.EXPECT().Iterate(gomock.Eq(\"test\"), gomock.Eq(\"\")).Return(entriesC)\n\tclose(entriesC)\n\tmocks.kvstore.EXPECT().Put(gomock.Any(), gomock.Any(), gomock.Any()).Times(4)\n\n\terr := conn.Start()\n\ta.NoError(err)\n\n\troutes := make([]*router.Route, 0, 4)\n\tmocks.router.EXPECT().Subscribe(gomock.Any()).Do(func(r *router.Route) (*router.Route, error) {\n\t\troutes = append(routes, r)\n\t\treturn r, nil\n\t}).Times(4)\n\n\t// create subscriptions\n\tcreateSubscriptions(t, conn, 4)\n\ttime.Sleep(100 * time.Millisecond)\n\n\tpostBody := `{\n\t\t\t\"field\":\"device_token\",\n\t\t\t\"old_value\":\"device1\",\n\t\t\t\"new_value\":\"asgasgasgagasgaasg2\"\n\t\t\t}\n\t`\n\tmocks.kvstore.EXPECT().Put(gomock.Any(), gomock.Any(), gomock.Any()).Times(1)\n\trecorder := httptest.NewRecorder()\n\treq, err := http.NewRequest(http.MethodPost, \"/connector\"+SubstitutePath, strings.NewReader(postBody))\n\tconn.ServeHTTP(recorder, req)\n\n\ta.Equal(http.StatusOK, recorder.Code)\n\ta.Equal(`{\"modified\":\"1\"}`, recorder.Body.String())\n}\n\nfunc TestConnector_SubstituteWrongPostBody(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\ta := assert.New(t)\n\tconn, mocks := getTestConnector(t, Config{\n\t\tName:       \"test\",\n\t\tSchema:     \"test\",\n\t\tPrefix:     \"/connector/\",\n\t\tURLPattern: \"/{device_token}/{user_id}/{topic:.*}\",\n\t}, false, false)\n\n\tentriesC := make(chan [2]string)\n\tmocks.kvstore.EXPECT().Iterate(gomock.Eq(\"test\"), gomock.Eq(\"\")).Return(entriesC)\n\tclose(entriesC)\n\tmocks.kvstore.EXPECT().Put(gomock.Any(), gomock.Any(), gomock.Any()).Times(4)\n\n\terr := conn.Start()\n\ta.NoError(err)\n\n\troutes := make([]*router.Route, 0, 4)\n\tmocks.router.EXPECT().Subscribe(gomock.Any()).Do(func(r *router.Route) (*router.Route, error) {\n\t\troutes = append(routes, r)\n\t\treturn r, nil\n\t}).Times(4)\n\n\t// create subscriptions\n\tcreateSubscriptions(t, conn, 4)\n\ttime.Sleep(100 * time.Millisecond)\n\n\tpostBody := `{\n\t\t\t\"field_invalid\":\"device_token\",\n\t\t\t\"old_value\":\"device1\",\n\t\t\t\"new_value\":\"asgasgasgagasgaasg2\"\n\t\t\t}\n\t`\n\trecorder := httptest.NewRecorder()\n\treq, err := http.NewRequest(http.MethodPost, \"/connector\"+SubstitutePath, strings.NewReader(postBody))\n\tconn.ServeHTTP(recorder, req)\n\n\ta.Equal(http.StatusBadRequest, recorder.Code)\n}\n\nfunc createSubscriptions(t *testing.T, conn Connector, count int) {\n\ta := assert.New(t)\n\tfor i := 1; i <= count; i++ {\n\t\trecorder := httptest.NewRecorder()\n\t\tr, err := http.NewRequest(\n\t\t\thttp.MethodPost,\n\t\t\tfmt.Sprintf(\"/connector/device%d/user%d/topic\", i, i),\n\t\t\tstrings.NewReader(\"\"))\n\t\ta.NoError(err)\n\t\tconn.ServeHTTP(recorder, r)\n\t\ta.Equal(200, recorder.Code)\n\t\ta.Equal(`{\"subscribed\":\"/topic\"}`, recorder.Body.String())\n\t}\n}\n\nfunc TestConnector_StartAndStopWithoutSubscribers(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\ta := assert.New(t)\n\n\tconn, mocks := getTestConnector(t, Config{\n\t\tName:       \"test\",\n\t\tSchema:     \"test\",\n\t\tPrefix:     \"/connector/\",\n\t\tURLPattern: \"/{device_token}/{user_id}/{topic:.*}\",\n\t}, true, true)\n\tmocks.manager.EXPECT().Load().Return(nil)\n\tmocks.manager.EXPECT().List().Return(nil)\n\tmocks.queue.EXPECT().Start().Return(nil)\n\tmocks.queue.EXPECT().Stop().Return(nil)\n\n\terr := conn.Start()\n\ta.NoError(err)\n\n\terr = conn.Stop()\n\ta.NoError(err)\n}\n\nfunc getTestConnector(t *testing.T, config Config, mockManager bool, mockQueue bool) (Connector, *connectorMocks) {\n\ta := assert.New(t)\n\n\tvar (\n\t\tmManager *MockManager\n\t\tmQueue   *MockQueue\n\t)\n\n\tmKVS := NewMockKVStore(testutil.MockCtrl)\n\tmRouter := NewMockRouter(testutil.MockCtrl)\n\tmRouter.EXPECT().KVStore().Return(mKVS, nil).AnyTimes()\n\tmSender := NewMockSender(testutil.MockCtrl)\n\n\tconn, err := NewConnector(mRouter, mSender, config)\n\ta.NoError(err)\n\n\tif mockManager {\n\t\tmManager = NewMockManager(testutil.MockCtrl)\n\t\tconn.(*connector).manager = mManager\n\t}\n\tif mockQueue {\n\t\tmQueue = NewMockQueue(testutil.MockCtrl)\n\t\tconn.(*connector).queue = mQueue\n\t}\n\n\treturn conn, &connectorMocks{\n\t\tmRouter,\n\t\tmSender,\n\t\tmQueue,\n\t\tmManager,\n\t\tmKVS,\n\t}\n}\n"
  },
  {
    "path": "server/connector/logger.go",
    "content": "package connector\n\nimport (\n\tlog \"github.com/Sirupsen/logrus\"\n)\n\nvar logger = log.WithField(\"module\", \"connector\")\n"
  },
  {
    "path": "server/connector/manager.go",
    "content": "package connector\n\nimport (\n\t\"sync\"\n\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/kvstore\"\n\t\"github.com/smancke/guble/server/router\"\n)\n\ntype Manager interface {\n\tLoad() error\n\tList() []Subscriber\n\tFilter(map[string]string) []Subscriber\n\tFind(string) Subscriber\n\tExists(string) bool\n\tCreate(protocol.Path, router.RouteParams) (Subscriber, error)\n\tAdd(Subscriber) error\n\tUpdate(Subscriber) error\n\tRemove(Subscriber) error\n}\n\ntype manager struct {\n\tsync.RWMutex\n\tschema      string\n\tkvstore     kvstore.KVStore\n\tsubscribers map[string]Subscriber\n}\n\nfunc NewManager(schema string, kvstore kvstore.KVStore) Manager {\n\treturn &manager{\n\t\tschema:      schema,\n\t\tkvstore:     kvstore,\n\t\tsubscribers: make(map[string]Subscriber, 0),\n\t}\n}\n\nfunc (m *manager) Load() error {\n\t// try to load s from kvstore\n\tentries := m.kvstore.Iterate(m.schema, \"\")\n\tfor e := range entries {\n\t\tsubscriber, err := NewSubscriberFromJSON([]byte(e[1]))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tm.subscribers[subscriber.Key()] = subscriber\n\t}\n\treturn nil\n}\n\nfunc (m *manager) Find(key string) Subscriber {\n\tm.RLock()\n\tdefer m.RUnlock()\n\n\tif s, exists := m.subscribers[key]; exists {\n\t\treturn s\n\t}\n\treturn nil\n}\n\nfunc (m *manager) Create(topic protocol.Path, params router.RouteParams) (Subscriber, error) {\n\tkey := GenerateKey(string(topic), params)\n\t//TODO MARIAN  remove this logs   when 503 is done.\n\tlogger.WithField(\"key\", key).Info(\"Create generated key\")\n\tif m.Exists(key) {\n\t\tlogger.WithField(\"key\", key).Info(\"Create key exists already\")\n\t\treturn nil, ErrSubscriberExists\n\t}\n\n\ts := NewSubscriber(topic, params, 0)\n\n\tlogger.WithField(\"subscriber\", s).Info(\"Created new subscriber\")\n\terr := m.Add(s)\n\tif err != nil {\n\t\tlogger.WithField(\"error\", err.Error()).Info(\"Create Manager Add failed\")\n\t\treturn nil, err\n\t}\n\tlogger.Info(\"Create finished\")\n\treturn s, nil\n}\n\nfunc (m *manager) List() []Subscriber {\n\tm.RLock()\n\tdefer m.RUnlock()\n\n\tl := make([]Subscriber, 0, len(m.subscribers))\n\tfor _, s := range m.subscribers {\n\t\tl = append(l, s)\n\t}\n\treturn l\n}\n\nfunc (m *manager) Filter(filters map[string]string) (subscribers []Subscriber) {\n\tm.RLock()\n\tdefer m.RUnlock()\n\n\tfor _, s := range m.subscribers {\n\t\tif s.Filter(filters) {\n\t\t\tsubscribers = append(subscribers, s)\n\t\t}\n\t}\n\treturn\n}\n\nfunc (m *manager) Add(s Subscriber) error {\n\tlogger.WithField(\"subscriber\", s).WithField(\"lock\", m.RWMutex).Info(\"Add subscriber started\")\n\n\tif m.Exists(s.Key()) {\n\t\treturn ErrSubscriberExists\n\t}\n\n\tif err := m.updateStore(s); err != nil {\n\t\treturn err\n\t}\n\n\tm.putSubscriber(s)\n\tlogger.WithField(\"subscriber\", s).Info(\"Add subscriber finished\")\n\treturn nil\n}\n\nfunc (m *manager) Update(s Subscriber) error {\n\tlogger.WithField(\"subscriber\", s).Info(\"Update subscriber started\")\n\tif !m.Exists(s.Key()) {\n\t\treturn ErrSubscriberDoesNotExist\n\t}\n\n\terr := m.updateStore(s)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm.putSubscriber(s)\n\tlogger.WithField(\"subscriber\", s).Info(\"Update subscriber finished\")\n\treturn nil\n}\n\nfunc (m *manager) putSubscriber(s Subscriber) {\n\tm.Lock()\n\tdefer m.Unlock()\n\tm.subscribers[s.Key()] = s\n}\n\nfunc (m *manager) deleteSubscriber(s Subscriber) {\n\tm.Lock()\n\tdefer m.Unlock()\n\tdelete(m.subscribers, s.Key())\n}\n\nfunc (m *manager) Exists(key string) bool {\n\tm.RLock()\n\tdefer m.RUnlock()\n\n\t_, found := m.subscribers[key]\n\treturn found\n}\n\nfunc (m *manager) Remove(s Subscriber) error {\n\tlogger.WithField(\"subscriber\", s).Info(\"Remove subscriber started\")\n\tm.cancelSubscriber(s)\n\n\tif !m.Exists(s.Key()) {\n\t\treturn ErrSubscriberDoesNotExist\n\t}\n\n\terr := m.removeStore(s)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm.deleteSubscriber(s)\n\tlogger.WithField(\"subscriber\", s).Info(\"Remove subscriber finished\")\n\treturn nil\n}\n\nfunc (m *manager) cancelSubscriber(s Subscriber) {\n\tm.Lock()\n\tdefer m.Unlock()\n\n\ts.Cancel()\n}\n\nfunc (m *manager) updateStore(s Subscriber) error {\n\tdata, err := s.Encode()\n\tif err != nil {\n\t\treturn err\n\t}\n\t//TODO MARIAN also remove this logs.\n\tlogger.WithField(\"subscriber\", s).Info(\"UpdateStore\")\n\treturn m.kvstore.Put(m.schema, s.Key(), data)\n}\n\nfunc (m *manager) removeStore(s Subscriber) error {\n\t//TODO MARIAN also remove this logs.\n\tlogger.WithField(\"subscriber\", s).Info(\"RemoveStore\")\n\treturn m.kvstore.Delete(m.schema, s.Key())\n}\n"
  },
  {
    "path": "server/connector/manager_test.go",
    "content": "package connector\n"
  },
  {
    "path": "server/connector/mocks_connector_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/smancke/guble/server/connector (interfaces: Connector,Sender,ResponseHandler,Manager,Queue,Request,Subscriber)\n\npackage connector\n\nimport (\n\t\"context\"\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/smancke/guble/protocol\"\n\n\t\"github.com/smancke/guble/server/router\"\n\t\"net/http\"\n)\n\n// Mock of Connector interface\ntype MockConnector struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockConnectorRecorder\n}\n\n// Recorder for MockConnector (not exported)\ntype _MockConnectorRecorder struct {\n\tmock *MockConnector\n}\n\nfunc NewMockConnector(ctrl *gomock.Controller) *MockConnector {\n\tmock := &MockConnector{ctrl: ctrl}\n\tmock.recorder = &_MockConnectorRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockConnector) EXPECT() *_MockConnectorRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockConnector) Context() context.Context {\n\tret := _m.ctrl.Call(_m, \"Context\")\n\tret0, _ := ret[0].(context.Context)\n\treturn ret0\n}\n\nfunc (_mr *_MockConnectorRecorder) Context() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Context\")\n}\n\nfunc (_m *MockConnector) GetPrefix() string {\n\tret := _m.ctrl.Call(_m, \"GetPrefix\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}\n\nfunc (_mr *_MockConnectorRecorder) GetPrefix() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"GetPrefix\")\n}\n\nfunc (_m *MockConnector) Manager() Manager {\n\tret := _m.ctrl.Call(_m, \"Manager\")\n\tret0, _ := ret[0].(Manager)\n\treturn ret0\n}\n\nfunc (_mr *_MockConnectorRecorder) Manager() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Manager\")\n}\n\nfunc (_m *MockConnector) ResponseHandler() ResponseHandler {\n\tret := _m.ctrl.Call(_m, \"ResponseHandler\")\n\tret0, _ := ret[0].(ResponseHandler)\n\treturn ret0\n}\n\nfunc (_mr *_MockConnectorRecorder) ResponseHandler() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"ResponseHandler\")\n}\n\nfunc (_m *MockConnector) Run(_param0 Subscriber) {\n\t_m.ctrl.Call(_m, \"Run\", _param0)\n}\n\nfunc (_mr *_MockConnectorRecorder) Run(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Run\", arg0)\n}\n\nfunc (_m *MockConnector) Sender() Sender {\n\tret := _m.ctrl.Call(_m, \"Sender\")\n\tret0, _ := ret[0].(Sender)\n\treturn ret0\n}\n\nfunc (_mr *_MockConnectorRecorder) Sender() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Sender\")\n}\n\nfunc (_m *MockConnector) ServeHTTP(_param0 http.ResponseWriter, _param1 *http.Request) {\n\t_m.ctrl.Call(_m, \"ServeHTTP\", _param0, _param1)\n}\n\nfunc (_mr *_MockConnectorRecorder) ServeHTTP(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"ServeHTTP\", arg0, arg1)\n}\n\nfunc (_m *MockConnector) SetResponseHandler(_param0 ResponseHandler) {\n\t_m.ctrl.Call(_m, \"SetResponseHandler\", _param0)\n}\n\nfunc (_mr *_MockConnectorRecorder) SetResponseHandler(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"SetResponseHandler\", arg0)\n}\n\nfunc (_m *MockConnector) SetSender(_param0 Sender) {\n\t_m.ctrl.Call(_m, \"SetSender\", _param0)\n}\n\nfunc (_mr *_MockConnectorRecorder) SetSender(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"SetSender\", arg0)\n}\n\nfunc (_m *MockConnector) Start() error {\n\tret := _m.ctrl.Call(_m, \"Start\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockConnectorRecorder) Start() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Start\")\n}\n\nfunc (_m *MockConnector) Stop() error {\n\tret := _m.ctrl.Call(_m, \"Stop\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockConnectorRecorder) Stop() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Stop\")\n}\n\n// Mock of Sender interface\ntype MockSender struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockSenderRecorder\n}\n\n// Recorder for MockSender (not exported)\ntype _MockSenderRecorder struct {\n\tmock *MockSender\n}\n\nfunc NewMockSender(ctrl *gomock.Controller) *MockSender {\n\tmock := &MockSender{ctrl: ctrl}\n\tmock.recorder = &_MockSenderRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockSender) EXPECT() *_MockSenderRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockSender) Send(_param0 Request) (interface{}, error) {\n\tret := _m.ctrl.Call(_m, \"Send\", _param0)\n\tret0, _ := ret[0].(interface{})\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockSenderRecorder) Send(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Send\", arg0)\n}\n\n// Mock of ResponseHandler interface\ntype MockResponseHandler struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockResponseHandlerRecorder\n}\n\n// Recorder for MockResponseHandler (not exported)\ntype _MockResponseHandlerRecorder struct {\n\tmock *MockResponseHandler\n}\n\nfunc NewMockResponseHandler(ctrl *gomock.Controller) *MockResponseHandler {\n\tmock := &MockResponseHandler{ctrl: ctrl}\n\tmock.recorder = &_MockResponseHandlerRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockResponseHandler) EXPECT() *_MockResponseHandlerRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockResponseHandler) HandleResponse(_param0 Request, _param1 interface{}, _param2 *Metadata, _param3 error) error {\n\tret := _m.ctrl.Call(_m, \"HandleResponse\", _param0, _param1, _param2, _param3)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockResponseHandlerRecorder) HandleResponse(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"HandleResponse\", arg0, arg1, arg2, arg3)\n}\n\n// Mock of Manager interface\ntype MockManager struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockManagerRecorder\n}\n\n// Recorder for MockManager (not exported)\ntype _MockManagerRecorder struct {\n\tmock *MockManager\n}\n\nfunc NewMockManager(ctrl *gomock.Controller) *MockManager {\n\tmock := &MockManager{ctrl: ctrl}\n\tmock.recorder = &_MockManagerRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockManager) EXPECT() *_MockManagerRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockManager) Add(_param0 Subscriber) error {\n\tret := _m.ctrl.Call(_m, \"Add\", _param0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockManagerRecorder) Add(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Add\", arg0)\n}\n\nfunc (_m *MockManager) Create(_param0 protocol.Path, _param1 router.RouteParams) (Subscriber, error) {\n\tret := _m.ctrl.Call(_m, \"Create\", _param0, _param1)\n\tret0, _ := ret[0].(Subscriber)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockManagerRecorder) Create(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Create\", arg0, arg1)\n}\n\nfunc (_m *MockManager) Exists(_param0 string) bool {\n\tret := _m.ctrl.Call(_m, \"Exists\", _param0)\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}\n\nfunc (_mr *_MockManagerRecorder) Exists(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Exists\", arg0)\n}\n\nfunc (_m *MockManager) Filter(_param0 map[string]string) []Subscriber {\n\tret := _m.ctrl.Call(_m, \"Filter\", _param0)\n\tret0, _ := ret[0].([]Subscriber)\n\treturn ret0\n}\n\nfunc (_mr *_MockManagerRecorder) Filter(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Filter\", arg0)\n}\n\nfunc (_m *MockManager) Find(_param0 string) Subscriber {\n\tret := _m.ctrl.Call(_m, \"Find\", _param0)\n\tret0, _ := ret[0].(Subscriber)\n\treturn ret0\n}\n\nfunc (_mr *_MockManagerRecorder) Find(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Find\", arg0)\n}\n\nfunc (_m *MockManager) List() []Subscriber {\n\tret := _m.ctrl.Call(_m, \"List\")\n\tret0, _ := ret[0].([]Subscriber)\n\treturn ret0\n}\n\nfunc (_mr *_MockManagerRecorder) List() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"List\")\n}\n\nfunc (_m *MockManager) Load() error {\n\tret := _m.ctrl.Call(_m, \"Load\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockManagerRecorder) Load() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Load\")\n}\n\nfunc (_m *MockManager) Remove(_param0 Subscriber) error {\n\tret := _m.ctrl.Call(_m, \"Remove\", _param0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockManagerRecorder) Remove(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Remove\", arg0)\n}\n\nfunc (_m *MockManager) Update(_param0 Subscriber) error {\n\tret := _m.ctrl.Call(_m, \"Update\", _param0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockManagerRecorder) Update(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Update\", arg0)\n}\n\n// Mock of Queue interface\ntype MockQueue struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockQueueRecorder\n}\n\n// Recorder for MockQueue (not exported)\ntype _MockQueueRecorder struct {\n\tmock *MockQueue\n}\n\nfunc NewMockQueue(ctrl *gomock.Controller) *MockQueue {\n\tmock := &MockQueue{ctrl: ctrl}\n\tmock.recorder = &_MockQueueRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockQueue) EXPECT() *_MockQueueRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockQueue) Push(_param0 Request) error {\n\tret := _m.ctrl.Call(_m, \"Push\", _param0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockQueueRecorder) Push(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Push\", arg0)\n}\n\nfunc (_m *MockQueue) ResponseHandler() ResponseHandler {\n\tret := _m.ctrl.Call(_m, \"ResponseHandler\")\n\tret0, _ := ret[0].(ResponseHandler)\n\treturn ret0\n}\n\nfunc (_mr *_MockQueueRecorder) ResponseHandler() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"ResponseHandler\")\n}\n\nfunc (_m *MockQueue) Sender() Sender {\n\tret := _m.ctrl.Call(_m, \"Sender\")\n\tret0, _ := ret[0].(Sender)\n\treturn ret0\n}\n\nfunc (_mr *_MockQueueRecorder) Sender() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Sender\")\n}\n\nfunc (_m *MockQueue) SetResponseHandler(_param0 ResponseHandler) {\n\t_m.ctrl.Call(_m, \"SetResponseHandler\", _param0)\n}\n\nfunc (_mr *_MockQueueRecorder) SetResponseHandler(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"SetResponseHandler\", arg0)\n}\n\nfunc (_m *MockQueue) SetSender(_param0 Sender) {\n\t_m.ctrl.Call(_m, \"SetSender\", _param0)\n}\n\nfunc (_mr *_MockQueueRecorder) SetSender(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"SetSender\", arg0)\n}\n\nfunc (_m *MockQueue) Start() error {\n\tret := _m.ctrl.Call(_m, \"Start\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockQueueRecorder) Start() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Start\")\n}\n\nfunc (_m *MockQueue) Stop() error {\n\tret := _m.ctrl.Call(_m, \"Stop\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockQueueRecorder) Stop() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Stop\")\n}\n\n// Mock of Request interface\ntype MockRequest struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockRequestRecorder\n}\n\n// Recorder for MockRequest (not exported)\ntype _MockRequestRecorder struct {\n\tmock *MockRequest\n}\n\nfunc NewMockRequest(ctrl *gomock.Controller) *MockRequest {\n\tmock := &MockRequest{ctrl: ctrl}\n\tmock.recorder = &_MockRequestRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockRequest) EXPECT() *_MockRequestRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockRequest) Message() *protocol.Message {\n\tret := _m.ctrl.Call(_m, \"Message\")\n\tret0, _ := ret[0].(*protocol.Message)\n\treturn ret0\n}\n\nfunc (_mr *_MockRequestRecorder) Message() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Message\")\n}\n\nfunc (_m *MockRequest) Subscriber() Subscriber {\n\tret := _m.ctrl.Call(_m, \"Subscriber\")\n\tret0, _ := ret[0].(Subscriber)\n\treturn ret0\n}\n\nfunc (_mr *_MockRequestRecorder) Subscriber() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Subscriber\")\n}\n\n// Mock of Subscriber interface\ntype MockSubscriber struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockSubscriberRecorder\n}\n\n// Recorder for MockSubscriber (not exported)\ntype _MockSubscriberRecorder struct {\n\tmock *MockSubscriber\n}\n\nfunc NewMockSubscriber(ctrl *gomock.Controller) *MockSubscriber {\n\tmock := &MockSubscriber{ctrl: ctrl}\n\tmock.recorder = &_MockSubscriberRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockSubscriber) EXPECT() *_MockSubscriberRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockSubscriber) Cancel() {\n\t_m.ctrl.Call(_m, \"Cancel\")\n}\n\nfunc (_mr *_MockSubscriberRecorder) Cancel() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Cancel\")\n}\n\nfunc (_m *MockSubscriber) Encode() ([]byte, error) {\n\tret := _m.ctrl.Call(_m, \"Encode\")\n\tret0, _ := ret[0].([]byte)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockSubscriberRecorder) Encode() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Encode\")\n}\n\nfunc (_m *MockSubscriber) Filter(_param0 map[string]string) bool {\n\tret := _m.ctrl.Call(_m, \"Filter\", _param0)\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}\n\nfunc (_mr *_MockSubscriberRecorder) Filter(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Filter\", arg0)\n}\n\nfunc (_m *MockSubscriber) Key() string {\n\tret := _m.ctrl.Call(_m, \"Key\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}\n\nfunc (_mr *_MockSubscriberRecorder) Key() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Key\")\n}\n\nfunc (_m *MockSubscriber) Loop(_param0 context.Context, _param1 Queue) error {\n\tret := _m.ctrl.Call(_m, \"Loop\", _param0, _param1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockSubscriberRecorder) Loop(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Loop\", arg0, arg1)\n}\n\nfunc (_m *MockSubscriber) Reset() error {\n\tret := _m.ctrl.Call(_m, \"Reset\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockSubscriberRecorder) Reset() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Reset\")\n}\n\nfunc (_m *MockSubscriber) Route() *router.Route {\n\tret := _m.ctrl.Call(_m, \"Route\")\n\tret0, _ := ret[0].(*router.Route)\n\treturn ret0\n}\n\nfunc (_mr *_MockSubscriberRecorder) Route() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Route\")\n}\n\nfunc (_m *MockSubscriber) SetLastID(_param0 uint64) {\n\t_m.ctrl.Call(_m, \"SetLastID\", _param0)\n}\n\nfunc (_mr *_MockSubscriberRecorder) SetLastID(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"SetLastID\", arg0)\n}\n"
  },
  {
    "path": "server/connector/mocks_kvstore_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/smancke/guble/server/kvstore (interfaces: KVStore)\n\npackage connector\n\nimport (\n\tgomock \"github.com/golang/mock/gomock\"\n)\n\n// Mock of KVStore interface\ntype MockKVStore struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockKVStoreRecorder\n}\n\n// Recorder for MockKVStore (not exported)\ntype _MockKVStoreRecorder struct {\n\tmock *MockKVStore\n}\n\nfunc NewMockKVStore(ctrl *gomock.Controller) *MockKVStore {\n\tmock := &MockKVStore{ctrl: ctrl}\n\tmock.recorder = &_MockKVStoreRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockKVStore) EXPECT() *_MockKVStoreRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockKVStore) Delete(_param0 string, _param1 string) error {\n\tret := _m.ctrl.Call(_m, \"Delete\", _param0, _param1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockKVStoreRecorder) Delete(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Delete\", arg0, arg1)\n}\n\nfunc (_m *MockKVStore) Get(_param0 string, _param1 string) ([]byte, bool, error) {\n\tret := _m.ctrl.Call(_m, \"Get\", _param0, _param1)\n\tret0, _ := ret[0].([]byte)\n\tret1, _ := ret[1].(bool)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}\n\nfunc (_mr *_MockKVStoreRecorder) Get(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Get\", arg0, arg1)\n}\n\nfunc (_m *MockKVStore) Iterate(_param0 string, _param1 string) chan [2]string {\n\tret := _m.ctrl.Call(_m, \"Iterate\", _param0, _param1)\n\tret0, _ := ret[0].(chan [2]string)\n\treturn ret0\n}\n\nfunc (_mr *_MockKVStoreRecorder) Iterate(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Iterate\", arg0, arg1)\n}\n\nfunc (_m *MockKVStore) IterateKeys(_param0 string, _param1 string) chan string {\n\tret := _m.ctrl.Call(_m, \"IterateKeys\", _param0, _param1)\n\tret0, _ := ret[0].(chan string)\n\treturn ret0\n}\n\nfunc (_mr *_MockKVStoreRecorder) IterateKeys(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"IterateKeys\", arg0, arg1)\n}\n\nfunc (_m *MockKVStore) Put(_param0 string, _param1 string, _param2 []byte) error {\n\tret := _m.ctrl.Call(_m, \"Put\", _param0, _param1, _param2)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockKVStoreRecorder) Put(arg0, arg1, arg2 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Put\", arg0, arg1, arg2)\n}\n"
  },
  {
    "path": "server/connector/mocks_router_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/smancke/guble/server/router (interfaces: Router)\n\npackage connector\n\nimport (\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/auth\"\n\t\"github.com/smancke/guble/server/cluster\"\n\t\"github.com/smancke/guble/server/kvstore\"\n\t\"github.com/smancke/guble/server/router\"\n\t\"github.com/smancke/guble/server/store\"\n)\n\n// Mock of Router interface\ntype MockRouter struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockRouterRecorder\n}\n\n// Recorder for MockRouter (not exported)\ntype _MockRouterRecorder struct {\n\tmock *MockRouter\n}\n\nfunc NewMockRouter(ctrl *gomock.Controller) *MockRouter {\n\tmock := &MockRouter{ctrl: ctrl}\n\tmock.recorder = &_MockRouterRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockRouter) EXPECT() *_MockRouterRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockRouter) AccessManager() (auth.AccessManager, error) {\n\tret := _m.ctrl.Call(_m, \"AccessManager\")\n\tret0, _ := ret[0].(auth.AccessManager)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) AccessManager() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"AccessManager\")\n}\n\nfunc (_m *MockRouter) Cluster() *cluster.Cluster {\n\tret := _m.ctrl.Call(_m, \"Cluster\")\n\tret0, _ := ret[0].(*cluster.Cluster)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) Cluster() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Cluster\")\n}\n\nfunc (_m *MockRouter) Done() <-chan bool {\n\tret := _m.ctrl.Call(_m, \"Done\")\n\tret0, _ := ret[0].(<-chan bool)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) Done() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Done\")\n}\n\nfunc (_m *MockRouter) Fetch(_param0 *store.FetchRequest) error {\n\tret := _m.ctrl.Call(_m, \"Fetch\", _param0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) Fetch(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Fetch\", arg0)\n}\n\nfunc (_m *MockRouter) GetSubscribers(_param0 string) ([]byte, error) {\n\tret := _m.ctrl.Call(_m, \"GetSubscribers\", _param0)\n\tret0, _ := ret[0].([]byte)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) GetSubscribers(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"GetSubscribers\", arg0)\n}\n\nfunc (_m *MockRouter) HandleMessage(_param0 *protocol.Message) error {\n\tret := _m.ctrl.Call(_m, \"HandleMessage\", _param0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) HandleMessage(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"HandleMessage\", arg0)\n}\n\nfunc (_m *MockRouter) KVStore() (kvstore.KVStore, error) {\n\tret := _m.ctrl.Call(_m, \"KVStore\")\n\tret0, _ := ret[0].(kvstore.KVStore)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) KVStore() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"KVStore\")\n}\n\nfunc (_m *MockRouter) MessageStore() (store.MessageStore, error) {\n\tret := _m.ctrl.Call(_m, \"MessageStore\")\n\tret0, _ := ret[0].(store.MessageStore)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) MessageStore() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"MessageStore\")\n}\n\nfunc (_m *MockRouter) Subscribe(_param0 *router.Route) (*router.Route, error) {\n\tret := _m.ctrl.Call(_m, \"Subscribe\", _param0)\n\tret0, _ := ret[0].(*router.Route)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) Subscribe(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Subscribe\", arg0)\n}\n\nfunc (_m *MockRouter) Unsubscribe(_param0 *router.Route) {\n\t_m.ctrl.Call(_m, \"Unsubscribe\", _param0)\n}\n\nfunc (_mr *_MockRouterRecorder) Unsubscribe(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Unsubscribe\", arg0)\n}\n"
  },
  {
    "path": "server/connector/queue.go",
    "content": "package connector\n\nimport (\n\t\"sync\"\n\n\t\"time\"\n\n\tlog \"github.com/Sirupsen/logrus\"\n)\n\n// Queue is an interface modeling a task-queue (it is started and more Requests can be pushed to it, and finally it is stopped after all requests are handled).\ntype Queue interface {\n\tResponseHandlerSetter\n\tSenderSetter\n\n\tStart() error\n\tPush(request Request) error\n\tStop() error\n}\n\ntype queue struct {\n\tsender          Sender\n\tresponseHandler ResponseHandler\n\trequestsC       chan Request\n\tnWorkers        int\n\tmetrics         bool\n\twg              sync.WaitGroup\n}\n\n// NewQueue returns a new Queue (not started).\nfunc NewQueue(sender Sender, nWorkers int) Queue {\n\tq := &queue{\n\t\tsender:   sender,\n\t\tnWorkers: nWorkers,\n\t\tmetrics:  true,\n\t}\n\treturn q\n}\n\nfunc (q *queue) SetResponseHandler(rh ResponseHandler) {\n\tq.responseHandler = rh\n}\n\nfunc (q *queue) ResponseHandler() ResponseHandler {\n\treturn q.responseHandler\n}\n\nfunc (q *queue) Sender() Sender {\n\treturn q.sender\n}\n\nfunc (q *queue) SetSender(s Sender) {\n\tq.sender = s\n}\n\n// Start a fixed number of goroutines to handle requests and responses w.r.t. external push-notification services.\nfunc (q *queue) Start() error {\n\tq.requestsC = make(chan Request)\n\tfor i := 1; i <= q.nWorkers; i++ {\n\t\tgo q.worker(i)\n\t}\n\treturn nil\n}\n\nfunc (q *queue) worker(i int) {\n\tlogger.WithField(\"worker\", i).Info(\"starting queue worker\")\n\tfor request := range q.requestsC {\n\t\tq.handle(request)\n\t}\n}\n\nfunc (q *queue) handle(request Request) {\n\tq.wg.Add(1)\n\tdefer q.wg.Done()\n\n\tvar beforeSend time.Time\n\tif q.metrics {\n\t\tbeforeSend = time.Now()\n\t}\n\tresponse, err := q.sender.Send(request)\n\tif q.responseHandler != nil {\n\t\tvar metadata *Metadata\n\t\tif q.metrics {\n\t\t\tmetadata = &Metadata{time.Since(beforeSend)}\n\t\t}\n\t\terr = q.responseHandler.HandleResponse(request, response, metadata, err)\n\t\tif err != nil {\n\t\t\tlogger.WithFields(log.Fields{\n\t\t\t\t\"error\":      err.Error(),\n\t\t\t\t\"subscriber\": request.Subscriber(),\n\t\t\t\t\"message\":    request.Message(),\n\t\t\t}).Error(\"error handling connector response\")\n\t\t}\n\t} else if err == nil {\n\t\tlogger.WithField(\"response\", response).Info(\"no response handler was set\")\n\t} else {\n\t\tlogger.WithField(\"error\", err.Error()).Error(\"error while sending, and no response handler was set\")\n\t}\n}\n\nfunc (q *queue) Push(request Request) error {\n\t// recover if the channel been closed\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tswitch x := r.(type) {\n\t\t\tcase error:\n\t\t\t\tlogger.WithError(x).Error(\"recovered from error\")\n\t\t\tdefault:\n\t\t\t\tpanic(r)\n\t\t\t}\n\t\t}\n\t}()\n\n\tq.requestsC <- request\n\treturn nil\n}\n\nfunc (q *queue) Stop() error {\n\tclose(q.requestsC)\n\tq.wg.Wait()\n\treturn nil\n}\n"
  },
  {
    "path": "server/connector/request.go",
    "content": "package connector\n\nimport \"github.com/smancke/guble/protocol\"\n\ntype Request interface {\n\tSubscriber() Subscriber\n\tMessage() *protocol.Message\n}\n\ntype request struct {\n\tsubscriber Subscriber\n\tmessage    *protocol.Message\n}\n\nfunc NewRequest(s Subscriber, m *protocol.Message) Request {\n\treturn &request{s, m}\n}\n\nfunc (r *request) Subscriber() Subscriber {\n\treturn r.subscriber\n}\n\nfunc (r *request) Message() *protocol.Message {\n\treturn r.message\n}\n"
  },
  {
    "path": "server/connector/subscriber.go",
    "content": "package connector\n\nimport (\n\t\"context\"\n\t\"crypto/sha1\"\n\t\"encoding/hex\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"sort\"\n\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/router\"\n\t\"github.com/smancke/guble/server/store\"\n)\n\nvar (\n\tErrSubscriberExists       = errors.New(\"Subscriber exists.\")\n\tErrSubscriberDoesNotExist = errors.New(\"Subscriber does not exist.\")\n\n\tErrRouteChannelClosed = errors.New(\"Subscriber route channel has been closed.\")\n)\n\ntype Subscriber interface {\n\t// Reset will recreate the route inside the subscribe with the information stored\n\t// in the subscriber data\n\tReset() error\n\tKey() string\n\tRoute() *router.Route\n\tFilter(map[string]string) bool\n\tLoop(context.Context, Queue) error\n\tSetLastID(ID uint64)\n\tCancel()\n\tEncode() ([]byte, error)\n}\n\ntype SubscriberData struct {\n\tTopic  protocol.Path\n\tParams router.RouteParams\n\tLastID uint64\n}\n\nfunc (sd *SubscriberData) newRoute() *router.Route {\n\tvar fr *store.FetchRequest\n\tif sd.LastID > 0 {\n\t\tfr = store.NewFetchRequest(sd.Topic.Partition(), sd.LastID, 0, store.DirectionForward, -1)\n\t}\n\treturn router.NewRoute(router.RouteConfig{\n\t\tPath:         sd.Topic,\n\t\tRouteParams:  sd.Params,\n\t\tFetchRequest: fr,\n\t})\n}\n\ntype subscriber struct {\n\tdata SubscriberData\n\n\tkey    string\n\troute  *router.Route\n\tcancel context.CancelFunc\n}\n\nfunc NewSubscriber(topic protocol.Path, params router.RouteParams, lastID uint64) Subscriber {\n\treturn NewSubscriberFromData(SubscriberData{\n\t\tTopic:  topic,\n\t\tParams: params,\n\t\tLastID: lastID,\n\t})\n}\n\nfunc NewSubscriberFromData(data SubscriberData) Subscriber {\n\treturn &subscriber{\n\t\tdata:  data,\n\t\troute: data.newRoute(),\n\t}\n}\n\nfunc NewSubscriberFromJSON(data []byte) (Subscriber, error) {\n\tsd := SubscriberData{}\n\terr := json.Unmarshal(data, &sd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn NewSubscriberFromData(sd), nil\n}\n\nfunc (s *subscriber) String() string {\n\treturn s.Key()\n}\n\nfunc (s *subscriber) Reset() error {\n\ts.route = s.data.newRoute()\n\ts.cancel = nil\n\treturn nil\n}\n\nfunc (s *subscriber) Key() string {\n\tif s.key == \"\" {\n\t\ts.key = GenerateKey(string(s.data.Topic), s.data.Params)\n\t}\n\treturn s.key\n}\n\nfunc (s *subscriber) Filter(filters map[string]string) bool {\n\treturn s.route.Filter(filters)\n}\n\nfunc (s *subscriber) Route() *router.Route {\n\treturn s.route\n}\n\nfunc (s *subscriber) Loop(ctx context.Context, q Queue) error {\n\tvar m *protocol.Message\n\tsCtx, cancel := context.WithCancel(ctx)\n\ts.cancel = cancel\n\tdefer func() { s.cancel = nil }()\n\n\topened := true\n\tfor opened {\n\t\tselect {\n\t\tcase m, opened = <-s.route.MessagesChannel():\n\t\t\tif !opened {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tq.Push(NewRequest(s, m))\n\t\tcase <-sCtx.Done():\n\t\t\t// If the parent context is still running then only this subscriber context\n\t\t\t// has been cancelled\n\t\t\tif ctx.Err() == nil {\n\t\t\t\treturn sCtx.Err()\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t//TODO Cosmin Bogdan returning this error can mean 2 things: overflow of route's channel, or intentional stopping of router / gubled.\n\treturn ErrRouteChannelClosed\n}\n\nfunc (s *subscriber) SetLastID(ID uint64) {\n\ts.data.LastID = ID\n}\n\nfunc (s *subscriber) Cancel() {\n\tif s.cancel != nil {\n\t\ts.cancel()\n\t}\n}\n\nfunc (s *subscriber) Encode() ([]byte, error) {\n\treturn json.Marshal(s.data)\n}\n\nfunc GenerateKey(topic string, params map[string]string) string {\n\t// compute the key from params\n\th := sha1.New()\n\tio.WriteString(h, topic)\n\n\t// compute the hash with ordered params keys\n\tkeys := make([]string, 0, len(params))\n\tfor k := range params {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\n\tfor _, k := range keys {\n\t\tio.WriteString(h, fmt.Sprintf(\"%s:%s\", k, params[k]))\n\t}\n\tsum := h.Sum(nil)\n\treturn hex.EncodeToString(sum[:])\n}\n"
  },
  {
    "path": "server/connector/substitution.go",
    "content": "package connector\n\ntype substitution struct {\n\tFieldName string `json:\"field\"`\n\tOldValue  string `json:\"old_value\"`\n\tNewValue  string `json:\"new_value\"`\n}\n\nfunc (s *substitution) isValid() bool {\n\treturn s.FieldName != \"\" && s.NewValue != \"\" && s.OldValue != \"\"\n}\n"
  },
  {
    "path": "server/fcm/fcm.go",
    "content": "package fcm\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/Bogh/gcm\"\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/connector\"\n\t\"github.com/smancke/guble/server/metrics\"\n\t\"github.com/smancke/guble/server/router\"\n\t\"time\"\n)\n\nconst (\n\t// schema is the default database schema for FCM\n\tschema = \"fcm_registration\"\n\n\tdeviceTokenKey = \"device_token\"\n\tuserIDKEy      = \"user_id\"\n)\n\n// Config is used for configuring the Firebase Cloud Messaging component.\ntype Config struct {\n\tEnabled              *bool\n\tAPIKey               *string\n\tWorkers              *int\n\tEndpoint             *string\n\tPrefix               *string\n\tIntervalMetrics      *bool\n\tAfterMessageDelivery protocol.MessageDeliveryCallback\n}\n\n// Connector is the structure for handling the communication with Firebase Cloud Messaging\ntype fcm struct {\n\tConfig\n\tconnector.Connector\n}\n\n// New creates a new *fcm and returns it as an connector.ResponsiveConnector\nfunc New(router router.Router, sender connector.Sender, config Config) (connector.ResponsiveConnector, error) {\n\tbaseConn, err := connector.NewConnector(router, sender, connector.Config{\n\t\tName:       \"fcm\",\n\t\tSchema:     schema,\n\t\tPrefix:     *config.Prefix,\n\t\tURLPattern: fmt.Sprintf(\"/{%s}/{%s}/{%s:.*}\", deviceTokenKey, userIDKEy, connector.TopicParam),\n\t\tWorkers:    *config.Workers,\n\t})\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"Base connector error\")\n\t\treturn nil, err\n\t}\n\n\tf := &fcm{config, baseConn}\n\tf.SetResponseHandler(f)\n\treturn f, nil\n}\n\nfunc (f *fcm) Start() error {\n\terr := f.Connector.Start()\n\tif err == nil {\n\t\tf.startMetrics()\n\t}\n\treturn err\n}\n\nfunc (f *fcm) startMetrics() {\n\tmTotalSentMessages.Set(0)\n\tmTotalSendErrors.Set(0)\n\tmTotalResponseErrors.Set(0)\n\tmTotalResponseInternalErrors.Set(0)\n\tmTotalResponseNotRegisteredErrors.Set(0)\n\tmTotalReplacedCanonicalErrors.Set(0)\n\tmTotalResponseOtherErrors.Set(0)\n\n\tif *f.IntervalMetrics {\n\t\tf.startIntervalMetric(mMinute, time.Minute)\n\t\tf.startIntervalMetric(mHour, time.Hour)\n\t\tf.startIntervalMetric(mDay, time.Hour*24)\n\t}\n}\n\nfunc (f *fcm) startIntervalMetric(m metrics.Map, td time.Duration) {\n\tmetrics.RegisterInterval(f.Context(), m, td, resetIntervalMetrics, processAndResetIntervalMetrics)\n}\n\nfunc (f *fcm) HandleResponse(request connector.Request, responseIface interface{}, metadata *connector.Metadata, err error) error {\n\tif err != nil && !isValidResponseError(err) {\n\t\tlogger.WithField(\"error\", err.Error()).Error(\"Error sending message to FCM\")\n\t\tmTotalSendErrors.Add(1)\n\t\tif *f.IntervalMetrics && metadata != nil {\n\t\t\taddToLatenciesAndCountsMaps(currentTotalErrorsLatenciesKey, currentTotalErrorsKey, metadata.Latency)\n\t\t}\n\t\treturn err\n\t}\n\tmessage := request.Message()\n\tsubscriber := request.Subscriber()\n\n\tresponse, ok := responseIface.(*gcm.Response)\n\tif !ok {\n\t\tmTotalResponseErrors.Add(1)\n\t\treturn fmt.Errorf(\"Invalid FCM Response\")\n\t}\n\n\tlogger.WithField(\"messageID\", message.ID).Debug(\"Delivered message to FCM\")\n\tsubscriber.SetLastID(message.ID)\n\tif err := f.Manager().Update(request.Subscriber()); err != nil {\n\t\tlogger.WithField(\"error\", err.Error()).Error(\"Manager could not update subscription\")\n\t\tmTotalResponseInternalErrors.Add(1)\n\t\treturn err\n\t}\n\tif response.Ok() {\n\t\tmTotalSentMessages.Add(1)\n\t\tif *f.IntervalMetrics && metadata != nil {\n\t\t\taddToLatenciesAndCountsMaps(currentTotalMessagesLatenciesKey, currentTotalMessagesKey, metadata.Latency)\n\t\t}\n\t\treturn nil\n\t}\n\n\tlogger.WithField(\"success\", response.Success).Debug(\"Handling FCM Error\")\n\n\tswitch errText := response.Error.Error(); errText {\n\tcase \"NotRegistered\":\n\t\tlogger.Debug(\"Removing not registered FCM subscription\")\n\t\tf.Manager().Remove(subscriber)\n\t\tmTotalResponseNotRegisteredErrors.Add(1)\n\t\treturn response.Error\n\tcase \"InvalidRegistration\":\n\t\tlogger.WithField(\"jsonError\", errText).Error(\"InvalidRegistration of FCM subscription\")\n\tdefault:\n\t\tlogger.WithField(\"jsonError\", errText).Error(\"Unexpected error while sending to FCM\")\n\t}\n\n\tif response.CanonicalIDs != 0 {\n\t\tmTotalReplacedCanonicalErrors.Add(1)\n\t\t// we only send to one receiver, so we know that we can replace the old id with the first registration id (=canonical id)\n\t\treturn f.replaceCanonical(request.Subscriber(), response.Results[0].RegistrationID)\n\t}\n\tmTotalResponseOtherErrors.Add(1)\n\treturn nil\n}\n\nfunc (f *fcm) replaceCanonical(subscriber connector.Subscriber, newToken string) error {\n\tmanager := f.Manager()\n\terr := manager.Remove(subscriber)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttopic := subscriber.Route().Path\n\tparams := subscriber.Route().RouteParams.Copy()\n\n\tparams[deviceTokenKey] = newToken\n\n\tnewSubscriber, err := manager.Create(topic, params)\n\tgo f.Run(newSubscriber)\n\treturn err\n}\n"
  },
  {
    "path": "server/fcm/fcm_metrics.go",
    "content": "package fcm\n\nimport (\n\t\"github.com/smancke/guble/server/metrics\"\n\t\"time\"\n)\n\nvar (\n\tns                                = metrics.NS(\"fcm\")\n\tmTotalSentMessages                = ns.NewInt(\"total_sent_messages\")\n\tmTotalSendErrors                  = ns.NewInt(\"total_sent_message_errors\")\n\tmTotalResponseErrors              = ns.NewInt(\"total_response_errors\")\n\tmTotalResponseInternalErrors      = ns.NewInt(\"total_response_internal_errors\")\n\tmTotalResponseNotRegisteredErrors = ns.NewInt(\"total_response_not_registered_errors\")\n\tmTotalReplacedCanonicalErrors     = ns.NewInt(\"total_replaced_canonical_errors\")\n\tmTotalResponseOtherErrors         = ns.NewInt(\"total_response_other_errors\")\n\tmMinute                           = ns.NewMap(\"minute\")\n\tmHour                             = ns.NewMap(\"hour\")\n\tmDay                              = ns.NewMap(\"day\")\n)\n\nconst (\n\tcurrentTotalMessagesLatenciesKey = \"current_messages_total_latencies_nanos\"\n\tcurrentTotalMessagesKey          = \"current_messages_count\"\n\tcurrentTotalErrorsLatenciesKey   = \"current_errors_total_latencies_nanos\"\n\tcurrentTotalErrorsKey            = \"current_errors_count\"\n)\n\nfunc processAndResetIntervalMetrics(m metrics.Map, td time.Duration, t time.Time) {\n\tmsgLatenciesValue := m.Get(currentTotalMessagesLatenciesKey)\n\tmsgNumberValue := m.Get(currentTotalMessagesKey)\n\terrLatenciesValue := m.Get(currentTotalErrorsLatenciesKey)\n\terrNumberValue := m.Get(currentTotalErrorsKey)\n\n\tm.Init()\n\tresetIntervalMetrics(m, t)\n\tmetrics.SetRate(m, \"last_messages_rate_sec\", msgNumberValue, td, time.Second)\n\tmetrics.SetRate(m, \"last_errors_rate_sec\", errNumberValue, td, time.Second)\n\tmetrics.SetAverage(m, \"last_messages_average_latency_msec\",\n\t\tmsgLatenciesValue, msgNumberValue, metrics.MilliPerNano, metrics.DefaultAverageLatencyJSONValue)\n\tmetrics.SetAverage(m, \"last_errors_average_latency_msec\",\n\t\terrLatenciesValue, errNumberValue, metrics.MilliPerNano, metrics.DefaultAverageLatencyJSONValue)\n}\n\nfunc resetIntervalMetrics(m metrics.Map, t time.Time) {\n\tm.Set(\"current_interval_start\", metrics.NewTime(t))\n\tmetrics.AddToMaps(currentTotalMessagesLatenciesKey, 0, m)\n\tmetrics.AddToMaps(currentTotalMessagesKey, 0, m)\n\tmetrics.AddToMaps(currentTotalErrorsLatenciesKey, 0, m)\n\tmetrics.AddToMaps(currentTotalErrorsKey, 0, m)\n}\n\nfunc addToLatenciesAndCountsMaps(latenciesKey string, countKey string, latency time.Duration) {\n\tmetrics.AddToMaps(latenciesKey, int64(latency), mMinute, mHour, mDay)\n\tmetrics.AddToMaps(countKey, 1, mMinute, mHour, mDay)\n}\n"
  },
  {
    "path": "server/fcm/fcm_sender.go",
    "content": "package fcm\n\nimport (\n\t\"encoding/json\"\n\t\"time\"\n\n\tlog \"github.com/Sirupsen/logrus\"\n\n\t\"github.com/Bogh/gcm\"\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/connector\"\n)\n\nconst (\n\t// sendRetries is the number of retries when something fails\n\tsendRetries = 5\n\n\t// sendTimeout timeout to wait for response from FCM\n\tsendTimeout = time.Second\n)\n\ntype sender struct {\n\tgcmSender gcm.Sender\n}\n\nfunc NewSender(apiKey string) *sender {\n\treturn &sender{\n\t\tgcmSender: gcm.NewSender(apiKey, sendRetries, sendTimeout),\n\t}\n}\n\nfunc (s *sender) Send(request connector.Request) (interface{}, error) {\n\tdeviceToken := request.Subscriber().Route().Get(deviceTokenKey)\n\tfcmMessage := fcmMessage(request.Message())\n\tfcmMessage.To = deviceToken\n\tlogger.WithFields(log.Fields{\"deviceToken\": fcmMessage.To}).Debug(\"sending message\")\n\treturn s.gcmSender.Send(fcmMessage)\n}\n\nfunc fcmMessage(message *protocol.Message) *gcm.Message {\n\tm := &gcm.Message{}\n\n\terr := json.Unmarshal(message.Body, m)\n\tif err != nil {\n\t\tlogger.WithFields(log.Fields{\n\t\t\t\"error\":     err.Error(),\n\t\t\t\"body\":      string(message.Body),\n\t\t\t\"messageID\": message.ID,\n\t\t}).Debug(\"Could not decode gcm.Message from guble message body\")\n\t} else if m.Notification != nil && m.Data != nil {\n\t\treturn m\n\t}\n\n\terr = json.Unmarshal(message.Body, &m.Data)\n\tif err != nil {\n\t\tm.Data = map[string]interface{}{\n\t\t\t\"message\": message.Body,\n\t\t}\n\t}\n\n\treturn m\n}\n\n// isValidResponseError returns True if the error is accepted as a valid response\n// cases are InvalidRegistration and NotRegistered\nfunc isValidResponseError(err error) bool {\n\treturn err.Error() == \"InvalidRegistration\" || err.Error() == \"NotRegistered\"\n}\n"
  },
  {
    "path": "server/fcm/fcm_test.go",
    "content": "package fcm\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/Bogh/gcm\"\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/connector\"\n\t\"github.com/smancke/guble/server/kvstore\"\n\t\"github.com/smancke/guble/server/router\"\n\t\"github.com/smancke/guble/testutil\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\nvar fullFCMMessage = `{\n\t\"notification\": {\n\t\t\"title\": \"TEST\",\n\t\t\"body\": \"notification body\",\n\t\t\"icon\": \"ic_notification_test_icon\",\n\t\t\"click_action\": \"estimated_arrival\"\n\t},\n\t\"data\": {\"field1\": \"value1\", \"field2\": \"value2\"}\n}`\n\ntype mocks struct {\n\trouter    *MockRouter\n\tstore     *MockMessageStore\n\tgcmSender *MockSender\n}\n\nfunc TestConnector_GetErrorMessageFromFCM(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\ta := assert.New(t)\n\tfcm, mocks := testFCM(t, true)\n\n\terr := fcm.Start()\n\ta.NoError(err)\n\n\tvar route *router.Route\n\n\tmocks.router.EXPECT().Subscribe(gomock.Any()).Do(func(r *router.Route) (*router.Route, error) {\n\t\ta.Equal(\"/topic\", string(r.Path))\n\t\ta.Equal(\"user01\", r.Get(\"user_id\"))\n\t\ta.Equal(\"device01\", r.Get(deviceTokenKey))\n\t\troute = r\n\t\treturn r, nil\n\t})\n\n\t// put a dummy FCM message with minimum information\n\tpostSubscription(t, fcm, \"user01\", \"device01\", \"topic\")\n\ttime.Sleep(100 * time.Millisecond)\n\ta.NoError(err)\n\ta.NotNil(route)\n\n\t// expect the route unsubscribed\n\tmocks.router.EXPECT().Unsubscribe(gomock.Any()).Do(func(route *router.Route) {\n\t\ta.Equal(\"/topic\", string(route.Path))\n\t\ta.Equal(\"device01\", route.Get(deviceTokenKey))\n\t})\n\n\t// expect the route subscribe with the new canonicalID from replaceSubscriptionWithCanonicalID\n\tmocks.router.EXPECT().Subscribe(gomock.Any()).Do(func(route *router.Route) {\n\t\ta.Equal(\"/topic\", string(route.Path))\n\t\ta.Equal(\"user01\", route.Get(\"user_id\"))\n\t\tappid := route.Get(deviceTokenKey)\n\t\ta.Equal(\"fcmCanonicalID\", appid)\n\t})\n\t// mocks.store.EXPECT().MaxMessageID(gomock.Any()).Return(uint64(4), nil)\n\n\tresponse := new(gcm.Response)\n\terr = json.Unmarshal([]byte(ErrorFCMResponse), response)\n\ta.NoError(err)\n\tmocks.gcmSender.EXPECT().Send(gomock.Any()).Return(response, nil)\n\n\t// send the message into the subscription route channel\n\troute.Deliver(&protocol.Message{\n\t\tID:   uint64(4),\n\t\tPath: \"/topic\",\n\t\tBody: []byte(\"{id:id}\"),\n\t}, true)\n\n\t// wait before closing the FCM connector\n\ttime.Sleep(100 * time.Millisecond)\n\n\terr = fcm.Stop()\n\ta.NoError(err)\n}\n\nfunc TestFCMFormatMessage(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\ta := assert.New(t)\n\n\tvar subRoute *router.Route\n\n\tfcm, mocks := testFCM(t, false)\n\tfcm.Start()\n\tdefer fcm.Stop()\n\ttime.Sleep(50 * time.Millisecond)\n\n\tmocks.router.EXPECT().Subscribe(gomock.Any()).Do(func(route *router.Route) (*router.Route, error) {\n\t\tsubRoute = route\n\t\treturn route, nil\n\t})\n\n\tpostSubscription(t, fcm, \"user01\", \"device01\", \"topic\")\n\ttime.Sleep(100 * time.Millisecond)\n\n\t// send a fully formated GCM message\n\tm := &protocol.Message{\n\t\tPath: \"/topic\",\n\t\tID:   1,\n\t\tBody: []byte(fullFCMMessage),\n\t}\n\n\tif !a.NotNil(subRoute) {\n\t\treturn\n\t}\n\n\tdoneC := make(chan bool)\n\n\tmocks.gcmSender.EXPECT().Send(gomock.Any()).Do(func(m *gcm.Message) (*gcm.Response, error) {\n\t\ta.NotNil(m.Notification)\n\t\ta.Equal(\"TEST\", m.Notification.Title)\n\t\ta.Equal(\"notification body\", m.Notification.Body)\n\t\ta.Equal(\"ic_notification_test_icon\", m.Notification.Icon)\n\t\ta.Equal(\"estimated_arrival\", m.Notification.ClickAction)\n\n\t\ta.NotNil(m.Data)\n\t\tif a.Contains(m.Data, \"field1\") {\n\t\t\ta.Equal(\"value1\", m.Data[\"field1\"])\n\t\t}\n\t\tif a.Contains(m.Data, \"field2\") {\n\t\t\ta.Equal(\"value2\", m.Data[\"field2\"])\n\t\t}\n\n\t\tdoneC <- true\n\t\treturn nil, nil\n\t}).Return(&gcm.Response{}, nil)\n\n\tsubRoute.Deliver(m, true)\n\tselect {\n\tcase <-doneC:\n\tcase <-time.After(100 * time.Millisecond):\n\t\ta.Fail(\"Message not received by FCM\")\n\t}\n\n\tm = &protocol.Message{\n\t\tPath: \"/topic\",\n\t\tID:   1,\n\t\tBody: []byte(`plain body`),\n\t}\n\n\tmocks.gcmSender.EXPECT().Send(gomock.Any()).Do(func(m *gcm.Message) (*gcm.Response, error) {\n\t\ta.Nil(m.Notification)\n\n\t\ta.NotNil(m.Data)\n\t\ta.Contains(m.Data, \"message\")\n\n\t\tdoneC <- true\n\t\treturn nil, nil\n\t}).Return(&gcm.Response{}, nil)\n\n\tsubRoute.Deliver(m, true)\n\tselect {\n\tcase <-doneC:\n\tcase <-time.After(100 * time.Millisecond):\n\t\ta.Fail(\"Message not received by FCM\")\n\t}\n}\n\nfunc testFCM(t *testing.T, mockStore bool) (connector.ResponsiveConnector, *mocks) {\n\tmcks := new(mocks)\n\n\tmcks.router = NewMockRouter(testutil.MockCtrl)\n\tmcks.router.EXPECT().Cluster().Return(nil).AnyTimes()\n\n\tkvs := kvstore.NewMemoryKVStore()\n\tmcks.router.EXPECT().KVStore().Return(kvs, nil).AnyTimes()\n\n\tkey := \"TEST-API-KEY\"\n\tnWorkers := 1\n\tendpoint := \"\"\n\tprefix := \"/fcm/\"\n\tintervalMetrics := false\n\n\tmcks.gcmSender = NewMockSender(testutil.MockCtrl)\n\tsender := NewSender(key)\n\tsender.gcmSender = mcks.gcmSender\n\n\tconn, err := New(mcks.router, sender, Config{\n\t\tAPIKey:          &key,\n\t\tWorkers:         &nWorkers,\n\t\tEndpoint:        &endpoint,\n\t\tPrefix:          &prefix,\n\t\tIntervalMetrics: &intervalMetrics,\n\t})\n\tassert.NoError(t, err)\n\tif mockStore {\n\t\tmcks.store = NewMockMessageStore(testutil.MockCtrl)\n\t\tmcks.router.EXPECT().MessageStore().Return(mcks.store, nil).AnyTimes()\n\t}\n\n\treturn conn, mcks\n}\n\nfunc postSubscription(t *testing.T, fcmConn connector.ResponsiveConnector, userID, gcmID, topic string) {\n\ta := assert.New(t)\n\tu := fmt.Sprintf(\"http://localhost/fcm/%s/%s/%s\", gcmID, userID, topic)\n\treq, err := http.NewRequest(http.MethodPost, u, nil)\n\ta.NoError(err)\n\tw := httptest.NewRecorder()\n\n\tfcmConn.ServeHTTP(w, req)\n\n\ta.Equal(fmt.Sprintf(`{\"subscribed\":\"/%s\"}`, topic), string(w.Body.Bytes()))\n}\n\nfunc deleteSubscription(t *testing.T, fcmConn connector.ResponsiveConnector, userID, gcmID, topic string) {\n\ta := assert.New(t)\n\tu := fmt.Sprintf(\"http://localhost/fcm/%s/%s/%s\", gcmID, userID, topic)\n\treq, err := http.NewRequest(http.MethodDelete, u, nil)\n\ta.NoError(err)\n\tw := httptest.NewRecorder()\n\n\tfcmConn.ServeHTTP(w, req)\n\n\ta.Equal(fmt.Sprintf(`{\"unsubscribed\":\"/%s\"}`, topic), string(w.Body.Bytes()))\n}\n\nfunc removeTrailingSlash(path string) string {\n\tif len(path) > 1 && path[len(path)-1] == '/' {\n\t\treturn path[:len(path)-1]\n\t}\n\treturn path\n}\n"
  },
  {
    "path": "server/fcm/json_error.go",
    "content": "package fcm\n\ntype jsonError struct {\n\tjson string\n}\n\nfunc (e *jsonError) Error() string {\n\treturn e.json\n}\n"
  },
  {
    "path": "server/fcm/logger.go",
    "content": "package fcm\n\nimport (\n\tlog \"github.com/Sirupsen/logrus\"\n)\n\nvar logger = log.WithField(\"module\", \"fcm\")\n"
  },
  {
    "path": "server/fcm/mocks_gcm_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/Bogh/gcm (interfaces: Sender)\n\npackage fcm\n\nimport (\n\tgcm \"github.com/Bogh/gcm\"\n\tgomock \"github.com/golang/mock/gomock\"\n)\n\n// Mock of Sender interface\ntype MockSender struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockSenderRecorder\n}\n\n// Recorder for MockSender (not exported)\ntype _MockSenderRecorder struct {\n\tmock *MockSender\n}\n\nfunc NewMockSender(ctrl *gomock.Controller) *MockSender {\n\tmock := &MockSender{ctrl: ctrl}\n\tmock.recorder = &_MockSenderRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockSender) EXPECT() *_MockSenderRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockSender) Send(_param0 *gcm.Message) (*gcm.Response, error) {\n\tret := _m.ctrl.Call(_m, \"Send\", _param0)\n\tret0, _ := ret[0].(*gcm.Response)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockSenderRecorder) Send(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Send\", arg0)\n}\n"
  },
  {
    "path": "server/fcm/mocks_kvstore_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/smancke/guble/server/kvstore (interfaces: KVStore)\n\npackage fcm\n\nimport (\n\tgomock \"github.com/golang/mock/gomock\"\n)\n\n// Mock of KVStore interface\ntype MockKVStore struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockKVStoreRecorder\n}\n\n// Recorder for MockKVStore (not exported)\ntype _MockKVStoreRecorder struct {\n\tmock *MockKVStore\n}\n\nfunc NewMockKVStore(ctrl *gomock.Controller) *MockKVStore {\n\tmock := &MockKVStore{ctrl: ctrl}\n\tmock.recorder = &_MockKVStoreRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockKVStore) EXPECT() *_MockKVStoreRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockKVStore) Delete(_param0 string, _param1 string) error {\n\tret := _m.ctrl.Call(_m, \"Delete\", _param0, _param1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockKVStoreRecorder) Delete(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Delete\", arg0, arg1)\n}\n\nfunc (_m *MockKVStore) Get(_param0 string, _param1 string) ([]byte, bool, error) {\n\tret := _m.ctrl.Call(_m, \"Get\", _param0, _param1)\n\tret0, _ := ret[0].([]byte)\n\tret1, _ := ret[1].(bool)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}\n\nfunc (_mr *_MockKVStoreRecorder) Get(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Get\", arg0, arg1)\n}\n\nfunc (_m *MockKVStore) Iterate(_param0 string, _param1 string) chan [2]string {\n\tret := _m.ctrl.Call(_m, \"Iterate\", _param0, _param1)\n\tret0, _ := ret[0].(chan [2]string)\n\treturn ret0\n}\n\nfunc (_mr *_MockKVStoreRecorder) Iterate(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Iterate\", arg0, arg1)\n}\n\nfunc (_m *MockKVStore) IterateKeys(_param0 string, _param1 string) chan string {\n\tret := _m.ctrl.Call(_m, \"IterateKeys\", _param0, _param1)\n\tret0, _ := ret[0].(chan string)\n\treturn ret0\n}\n\nfunc (_mr *_MockKVStoreRecorder) IterateKeys(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"IterateKeys\", arg0, arg1)\n}\n\nfunc (_m *MockKVStore) Put(_param0 string, _param1 string, _param2 []byte) error {\n\tret := _m.ctrl.Call(_m, \"Put\", _param0, _param1, _param2)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockKVStoreRecorder) Put(arg0, arg1, arg2 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Put\", arg0, arg1, arg2)\n}\n"
  },
  {
    "path": "server/fcm/mocks_router_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/smancke/guble/server/router (interfaces: Router)\n\npackage fcm\n\nimport (\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/auth\"\n\t\"github.com/smancke/guble/server/cluster\"\n\t\"github.com/smancke/guble/server/kvstore\"\n\t\"github.com/smancke/guble/server/router\"\n\t\"github.com/smancke/guble/server/store\"\n)\n\n// Mock of Router interface\ntype MockRouter struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockRouterRecorder\n}\n\n// Recorder for MockRouter (not exported)\ntype _MockRouterRecorder struct {\n\tmock *MockRouter\n}\n\nfunc NewMockRouter(ctrl *gomock.Controller) *MockRouter {\n\tmock := &MockRouter{ctrl: ctrl}\n\tmock.recorder = &_MockRouterRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockRouter) EXPECT() *_MockRouterRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockRouter) AccessManager() (auth.AccessManager, error) {\n\tret := _m.ctrl.Call(_m, \"AccessManager\")\n\tret0, _ := ret[0].(auth.AccessManager)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) AccessManager() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"AccessManager\")\n}\n\nfunc (_m *MockRouter) Cluster() *cluster.Cluster {\n\tret := _m.ctrl.Call(_m, \"Cluster\")\n\tret0, _ := ret[0].(*cluster.Cluster)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) Cluster() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Cluster\")\n}\n\nfunc (_m *MockRouter) Done() <-chan bool {\n\tret := _m.ctrl.Call(_m, \"Done\")\n\tret0, _ := ret[0].(<-chan bool)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) Done() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Done\")\n}\n\nfunc (_m *MockRouter) Fetch(_param0 *store.FetchRequest) error {\n\tret := _m.ctrl.Call(_m, \"Fetch\", _param0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) Fetch(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Fetch\", arg0)\n}\n\nfunc (_m *MockRouter) GetSubscribers(_param0 string) ([]byte, error) {\n\tret := _m.ctrl.Call(_m, \"GetSubscribers\", _param0)\n\tret0, _ := ret[0].([]byte)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) GetSubscribers(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"GetSubscribers\", arg0)\n}\n\nfunc (_m *MockRouter) HandleMessage(_param0 *protocol.Message) error {\n\tret := _m.ctrl.Call(_m, \"HandleMessage\", _param0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) HandleMessage(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"HandleMessage\", arg0)\n}\n\nfunc (_m *MockRouter) KVStore() (kvstore.KVStore, error) {\n\tret := _m.ctrl.Call(_m, \"KVStore\")\n\tret0, _ := ret[0].(kvstore.KVStore)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) KVStore() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"KVStore\")\n}\n\nfunc (_m *MockRouter) MessageStore() (store.MessageStore, error) {\n\tret := _m.ctrl.Call(_m, \"MessageStore\")\n\tret0, _ := ret[0].(store.MessageStore)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) MessageStore() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"MessageStore\")\n}\n\nfunc (_m *MockRouter) Subscribe(_param0 *router.Route) (*router.Route, error) {\n\tret := _m.ctrl.Call(_m, \"Subscribe\", _param0)\n\tret0, _ := ret[0].(*router.Route)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) Subscribe(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Subscribe\", arg0)\n}\n\nfunc (_m *MockRouter) Unsubscribe(_param0 *router.Route) {\n\t_m.ctrl.Call(_m, \"Unsubscribe\", _param0)\n}\n\nfunc (_mr *_MockRouterRecorder) Unsubscribe(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Unsubscribe\", arg0)\n}\n"
  },
  {
    "path": "server/fcm/mocks_store_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/smancke/guble/server/store (interfaces: MessageStore)\n\npackage fcm\n\nimport (\n\tgomock \"github.com/golang/mock/gomock\"\n\tprotocol \"github.com/smancke/guble/protocol\"\n\tstore \"github.com/smancke/guble/server/store\"\n)\n\n// Mock of MessageStore interface\ntype MockMessageStore struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockMessageStoreRecorder\n}\n\n// Recorder for MockMessageStore (not exported)\ntype _MockMessageStoreRecorder struct {\n\tmock *MockMessageStore\n}\n\nfunc NewMockMessageStore(ctrl *gomock.Controller) *MockMessageStore {\n\tmock := &MockMessageStore{ctrl: ctrl}\n\tmock.recorder = &_MockMessageStoreRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockMessageStore) EXPECT() *_MockMessageStoreRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockMessageStore) DoInTx(_param0 string, _param1 func(uint64) error) error {\n\tret := _m.ctrl.Call(_m, \"DoInTx\", _param0, _param1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockMessageStoreRecorder) DoInTx(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"DoInTx\", arg0, arg1)\n}\n\nfunc (_m *MockMessageStore) Fetch(_param0 *store.FetchRequest) {\n\t_m.ctrl.Call(_m, \"Fetch\", _param0)\n}\n\nfunc (_mr *_MockMessageStoreRecorder) Fetch(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Fetch\", arg0)\n}\n\nfunc (_m *MockMessageStore) GenerateNextMsgID(_param0 string, _param1 byte) (uint64, int64, error) {\n\tret := _m.ctrl.Call(_m, \"GenerateNextMsgID\", _param0, _param1)\n\tret0, _ := ret[0].(uint64)\n\tret1, _ := ret[1].(int64)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}\n\nfunc (_mr *_MockMessageStoreRecorder) GenerateNextMsgID(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"GenerateNextMsgID\", arg0, arg1)\n}\n\nfunc (_m *MockMessageStore) MaxMessageID(_param0 string) (uint64, error) {\n\tret := _m.ctrl.Call(_m, \"MaxMessageID\", _param0)\n\tret0, _ := ret[0].(uint64)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockMessageStoreRecorder) MaxMessageID(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"MaxMessageID\", arg0)\n}\n\nfunc (_m *MockMessageStore) Partition(_param0 string) (store.MessagePartition, error) {\n\tret := _m.ctrl.Call(_m, \"Partition\", _param0)\n\tret0, _ := ret[0].(store.MessagePartition)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockMessageStoreRecorder) Partition(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Partition\", arg0)\n}\n\nfunc (_m *MockMessageStore) Partitions() ([]store.MessagePartition, error) {\n\tret := _m.ctrl.Call(_m, \"Partitions\")\n\tret0, _ := ret[0].([]store.MessagePartition)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockMessageStoreRecorder) Partitions() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Partitions\")\n}\n\nfunc (_m *MockMessageStore) Store(_param0 string, _param1 uint64, _param2 []byte) error {\n\tret := _m.ctrl.Call(_m, \"Store\", _param0, _param1, _param2)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockMessageStoreRecorder) Store(arg0, arg1, arg2 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Store\", arg0, arg1, arg2)\n}\n\nfunc (_m *MockMessageStore) StoreMessage(_param0 *protocol.Message, _param1 byte) (int, error) {\n\tret := _m.ctrl.Call(_m, \"StoreMessage\", _param0, _param1)\n\tret0, _ := ret[0].(int)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockMessageStoreRecorder) StoreMessage(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"StoreMessage\", arg0, arg1)\n}\n"
  },
  {
    "path": "server/fcm/testutil.go",
    "content": "package fcm\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/Bogh/gcm\"\n\t\"github.com/smancke/guble/server/connector\"\n)\n\nconst (\n\tSuccessFCMResponse = `{\n\t   \"multicast_id\":3,\n\t   \"success\":1,\n\t   \"failure\":0,\n\t   \"canonical_ids\":0,\n\t   \"results\":[\n\t      {\n\t         \"message_id\":\"da\",\n\t         \"registration_id\":\"rId\",\n\t         \"error\":\"\"\n\t      }\n\t   ]\n\t}`\n\n\tErrorFCMResponse = `{\n\t   \"multicast_id\":3,\n\t   \"success\":0,\n\t   \"failure\":1,\n       \"error\":\"InvalidRegistration\",\n\t   \"canonical_ids\":5,\n\t   \"results\":[\n\t      {\n\t         \"message_id\":\"err\",\n\t         \"registration_id\":\"fcmCanonicalID\",\n\t         \"error\":\"InvalidRegistration\"\n\t      }\n\t   ]\n\t}`\n)\n\nfunc NewSenderWithMock(gcmSender gcm.Sender) *sender {\n\treturn &sender{gcmSender: gcmSender}\n}\n\ntype FCMSender func(message *gcm.Message) (*gcm.Response, error)\n\nfunc (fcms FCMSender) Send(message *gcm.Message) (*gcm.Response, error) {\n\treturn fcms(message)\n}\n\nfunc CreateFcmSender(body string, doneC chan bool, to time.Duration) (connector.Sender, error) {\n\tresponse := new(gcm.Response)\n\n\terr := json.Unmarshal([]byte(body), response)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewSenderWithMock(FCMSender(func(message *gcm.Message) (*gcm.Response, error) {\n\t\tdefer func() {\n\t\t\tdefer func() {\n\t\t\t\tif r := recover(); r != nil {\n\t\t\t\t\tfmt.Println(\"Recovered\")\n\t\t\t\t}\n\t\t\t}()\n\t\t\tdoneC <- true\n\t\t}()\n\t\t<-time.After(to)\n\t\treturn response, nil\n\t})), nil\n}\n"
  },
  {
    "path": "server/fcm_integration_test.go",
    "content": "package server\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"encoding/json\"\n\n\t\"github.com/smancke/guble/client\"\n\t\"github.com/smancke/guble/server/connector\"\n\t\"github.com/smancke/guble/server/fcm\"\n\t\"github.com/smancke/guble/server/service\"\n\t\"github.com/smancke/guble/testutil\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\nvar (\n\ttestHttpPort         = 11000\n\ttimeoutForOneMessage = 50 * time.Millisecond\n)\n\ntype fcmMetricsMap struct {\n\tCurrentErrorsCount            int `json:\"current_errors_count\"`\n\tCurrentMessagesCount          int `json:\"current_messages_count\"`\n\tCurrentMessagesTotalLatencies int `json:\"current_messages_total_latencies_nanos\"`\n\tCurrentErrorsTotalLatencies   int `json:\"current_errors_total_latencies_nanos\"`\n}\n\ntype fcmMetrics struct {\n\tTotalSentMessages      int           `json:\"fcm.total_sent_messages\"`\n\tTotalSentMessageErrors int           `json:\"fcm.total_sent_message_errors\"`\n\tMinute                 fcmMetricsMap `json:\"fcm.minute\"`\n\tHour                   fcmMetricsMap `json:\"fcm.hour\"`\n\tDay                    fcmMetricsMap `json:\"fcm.day\"`\n}\n\ntype routerMetrics struct {\n\tCurrentRoutes        int `json:\"router.current_routes\"`\n\tCurrentSubscriptions int `json:\"router.current_subscriptions\"`\n}\n\ntype expectedValues struct {\n\tZeroLatencies        bool\n\tMessageCount         int\n\tCurrentRoutes        int\n\tCurrentSubscriptions int\n}\n\n// Test that restarting the service continues to fetch messages from store for a subscription from lastID\nfunc TestFCMRestart(t *testing.T) {\n\t// defer testutil.EnableDebugForMethod()()\n\tdefer testutil.ResetDefaultRegistryHealthCheck()\n\n\ta := assert.New(t)\n\n\treceiveC := make(chan bool)\n\ts, cleanup := serviceSetUp(t)\n\tdefer cleanup()\n\n\tassertMetrics(a, s, expectedValues{true, 0, 0, 0})\n\n\tvar fcmConn connector.ResponsiveConnector\n\tvar ok bool\n\tfor _, iface := range s.ModulesSortedByStartOrder() {\n\t\tfcmConn, ok = iface.(connector.ResponsiveConnector)\n\t\tif ok {\n\t\t\tbreak\n\t\t}\n\t}\n\ta.True(ok, \"There should be a module of type FCMConnector\")\n\n\t// add a high timeout so the messages are processed slow\n\tsender, err := fcm.CreateFcmSender(fcm.SuccessFCMResponse, receiveC, 10*time.Millisecond)\n\ta.NoError(err)\n\tfcmConn.SetSender(sender)\n\n\t// create subscription on topic\n\tsubscriptionSetUp(t, s)\n\n\tassertMetrics(a, s, expectedValues{true, 0, 1, 1})\n\n\tc := clientSetUp(t, s)\n\n\t// send 3 messages in the router but read only one and close the service\n\tfor i := 0; i < 3; i++ {\n\t\tc.Send(testTopic, \"dummy body\", \"{dummy: value}\")\n\t}\n\n\t// receive one message only from FCM\n\tselect {\n\tcase <-receiveC:\n\tcase <-time.After(timeoutForOneMessage):\n\t\ta.Fail(\"Initial FCM message not received\")\n\t}\n\n\tassertMetrics(a, s, expectedValues{false, 1, 1, 1})\n\tclose(receiveC)\n\t// restart the service\n\ta.NoError(s.Stop())\n\n\t// remake the sender\n\treceiveC = make(chan bool)\n\tsender, err = fcm.CreateFcmSender(fcm.SuccessFCMResponse, receiveC, 10*time.Millisecond)\n\ta.NoError(err)\n\tfcmConn.SetSender(sender)\n\n\ttime.Sleep(50 * time.Millisecond)\n\ttestutil.ResetDefaultRegistryHealthCheck()\n\ta.NoError(s.Start())\n\n\t//TODO Cosmin Bogdan add 2 calls to assertMetrics before and after the next block\n\n\t// read the other 2 messages\n\tfor i := 0; i < 1; i++ {\n\t\tselect {\n\t\tcase <-receiveC:\n\t\tcase <-time.After(2 * timeoutForOneMessage):\n\t\t\ta.Fail(\"FCM message not received\")\n\t\t}\n\t}\n}\n\nfunc serviceSetUp(t *testing.T) (*service.Service, func()) {\n\tdir, errTempDir := ioutil.TempDir(\"\", \"guble_fcm_test\")\n\tassert.NoError(t, errTempDir)\n\n\t*Config.KVS = \"memory\"\n\t*Config.MS = \"file\"\n\t*Config.Cluster.NodeID = 0\n\t*Config.StoragePath = dir\n\t*Config.MetricsEndpoint = \"/admin/metrics\"\n\t*Config.FCM.Enabled = true\n\t*Config.FCM.APIKey = \"WILL BE OVERWRITTEN\"\n\t*Config.FCM.Prefix = \"/fcm/\"\n\t*Config.FCM.Workers = 1 // use only one worker so we can control the number of messages that go to FCM\n\t*Config.APNS.Enabled = false\n\n\tvar s *service.Service\n\tfor s == nil {\n\t\ttestHttpPort++\n\t\tlogger.WithField(\"port\", testHttpPort).Debug(\"trying to use HTTP Port\")\n\t\t*Config.HttpListen = fmt.Sprintf(\"127.0.0.1:%d\", testHttpPort)\n\t\ts = StartService()\n\t}\n\treturn s, func() {\n\t\terrRemove := os.RemoveAll(dir)\n\t\tif errRemove != nil {\n\t\t\tlogger.WithError(errRemove).WithField(\"module\", \"testing\").Error(\"Could not remove directory\")\n\t\t}\n\t}\n}\n\nfunc clientSetUp(t *testing.T, service *service.Service) client.Client {\n\twsURL := \"ws://\" + service.WebServer().GetAddr() + \"/stream/user/user01\"\n\tc, err := client.Open(wsURL, \"http://localhost/\", 1000, false)\n\tassert.NoError(t, err)\n\treturn c\n}\n\nfunc subscriptionSetUp(t *testing.T, service *service.Service) {\n\ta := assert.New(t)\n\n\turlFormat := fmt.Sprintf(\"http://%s/fcm/%%d/gcmId%%d/%%s\", service.WebServer().GetAddr())\n\t// create GCM subscription\n\tresponse, errPost := http.Post(\n\t\tfmt.Sprintf(urlFormat, 1, 1, strings.TrimPrefix(testTopic, \"/\")),\n\t\t\"text/plain\",\n\t\tbytes.NewBufferString(\"\"),\n\t)\n\ta.NoError(errPost)\n\ta.Equal(response.StatusCode, 200)\n\n\tbody, errReadAll := ioutil.ReadAll(response.Body)\n\ta.NoError(errReadAll)\n\ta.Equal(fmt.Sprintf(`{\"subscribed\":\"%s\"}`, testTopic), string(body))\n}\n\nfunc assertMetrics(a *assert.Assertions, s *service.Service, expected expectedValues) {\n\thttpClient := &http.Client{}\n\tu := fmt.Sprintf(\"http://%s%s\", s.WebServer().GetAddr(), defaultMetricsEndpoint)\n\trequest, err := http.NewRequest(http.MethodGet, u, nil)\n\ta.NoError(err)\n\n\tresponse, err := httpClient.Do(request)\n\ta.NoError(err)\n\tdefer response.Body.Close()\n\n\ta.Equal(http.StatusOK, response.StatusCode)\n\tbodyBytes, err := ioutil.ReadAll(response.Body)\n\ta.NoError(err)\n\tlogger.WithField(\"body\", string(bodyBytes)).Debug(\"metrics response\")\n\n\tmFCM := &fcmMetrics{}\n\terr = json.Unmarshal(bodyBytes, mFCM)\n\ta.NoError(err)\n\n\ta.Equal(0, mFCM.TotalSentMessageErrors)\n\ta.Equal(expected.MessageCount, mFCM.TotalSentMessages)\n\n\ta.Equal(0, mFCM.Minute.CurrentErrorsCount)\n\ta.Equal(expected.MessageCount, mFCM.Minute.CurrentMessagesCount)\n\ta.Equal(0, mFCM.Minute.CurrentErrorsTotalLatencies)\n\ta.Equal(expected.ZeroLatencies, mFCM.Minute.CurrentMessagesTotalLatencies == 0)\n\n\ta.Equal(0, mFCM.Hour.CurrentErrorsCount)\n\ta.Equal(expected.MessageCount, mFCM.Hour.CurrentMessagesCount)\n\ta.Equal(0, mFCM.Hour.CurrentErrorsTotalLatencies)\n\ta.Equal(expected.ZeroLatencies, mFCM.Hour.CurrentMessagesTotalLatencies == 0)\n\n\ta.Equal(0, mFCM.Day.CurrentErrorsCount)\n\ta.Equal(expected.MessageCount, mFCM.Day.CurrentMessagesCount)\n\ta.Equal(0, mFCM.Day.CurrentErrorsTotalLatencies)\n\ta.Equal(expected.ZeroLatencies, mFCM.Day.CurrentMessagesTotalLatencies == 0)\n\n\tmRouter := &routerMetrics{}\n\terr = json.Unmarshal(bodyBytes, mRouter)\n\ta.NoError(err)\n\n\ta.Equal(expected.CurrentRoutes, mRouter.CurrentRoutes)\n\ta.Equal(expected.CurrentSubscriptions, mRouter.CurrentSubscriptions)\n}\n"
  },
  {
    "path": "server/gubled.go",
    "content": "package server\n\nimport (\n\tlog \"github.com/Sirupsen/logrus\"\n\n\t\"github.com/smancke/guble/logformatter\"\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/apns\"\n\t\"github.com/smancke/guble/server/auth\"\n\t\"github.com/smancke/guble/server/cluster\"\n\t\"github.com/smancke/guble/server/fcm\"\n\t\"github.com/smancke/guble/server/kvstore\"\n\t\"github.com/smancke/guble/server/metrics\"\n\t\"github.com/smancke/guble/server/rest\"\n\t\"github.com/smancke/guble/server/router\"\n\t\"github.com/smancke/guble/server/service\"\n\t\"github.com/smancke/guble/server/sms\"\n\t\"github.com/smancke/guble/server/store\"\n\t\"github.com/smancke/guble/server/store/dummystore\"\n\t\"github.com/smancke/guble/server/store/filestore\"\n\t\"github.com/smancke/guble/server/webserver\"\n\t\"github.com/smancke/guble/server/websocket\"\n\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"os/signal\"\n\t\"path\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"syscall\"\n\n\t\"github.com/Bogh/gcm\"\n\t\"github.com/pkg/profile\"\n\t\"golang.org/x/crypto/ssh/terminal\"\n)\n\nconst (\n\tfileOption = \"file\"\n)\n\nvar AfterMessageDelivery = func(m *protocol.Message) {\n\tlogger.WithField(\"message\", m).Debug(\"message delivered\")\n}\n\n// ValidateStoragePath validates the guble configuration with regard to the storagePath\n// (which can be used by MessageStore and/or KVStore implementations).\nvar ValidateStoragePath = func() error {\n\tif *Config.KVS == fileOption || *Config.MS == fileOption {\n\t\ttestfile := path.Join(*Config.StoragePath, \"write-test-file\")\n\t\tf, err := os.Create(testfile)\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).WithField(\"storagePath\", *Config.StoragePath).Error(\"Storage path not present/writeable.\")\n\t\t\treturn err\n\t\t}\n\t\tf.Close()\n\t\tos.Remove(testfile)\n\t}\n\treturn nil\n}\n\n// CreateAccessManager is a func which returns a auth.AccessManager implementation\n// (currently: AllowAllAccessManager).\nvar CreateAccessManager = func() auth.AccessManager {\n\treturn auth.NewAllowAllAccessManager(true)\n}\n\n// CreateKVStore is a func which returns a kvstore.KVStore implementation\n// (currently, based on guble configuration).\nvar CreateKVStore = func() kvstore.KVStore {\n\tswitch *Config.KVS {\n\tcase \"memory\":\n\t\treturn kvstore.NewMemoryKVStore()\n\tcase \"file\":\n\t\tdb := kvstore.NewSqliteKVStore(path.Join(*Config.StoragePath, \"kv-store.db\"), true)\n\t\tif err := db.Open(); err != nil {\n\t\t\tlogger.WithError(err).Panic(\"Could not open sqlite database connection\")\n\t\t}\n\t\treturn db\n\tcase \"postgres\":\n\t\tdb := kvstore.NewPostgresKVStore(kvstore.PostgresConfig{\n\t\t\tConnParams: map[string]string{\n\t\t\t\t\"host\":     *Config.Postgres.Host,\n\t\t\t\t\"port\":     strconv.Itoa(*Config.Postgres.Port),\n\t\t\t\t\"user\":     *Config.Postgres.User,\n\t\t\t\t\"password\": *Config.Postgres.Password,\n\t\t\t\t\"dbname\":   *Config.Postgres.DbName,\n\t\t\t\t\"sslmode\":  \"disable\",\n\t\t\t},\n\t\t\tMaxIdleConns: 1,\n\t\t\tMaxOpenConns: runtime.GOMAXPROCS(0),\n\t\t})\n\t\tif err := db.Open(); err != nil {\n\t\t\tlogger.WithError(err).Panic(\"Could not open postgres database connection\")\n\t\t}\n\t\treturn db\n\tdefault:\n\t\tpanic(fmt.Errorf(\"Unknown key-value backend: %q\", *Config.KVS))\n\t}\n}\n\n// CreateMessageStore is a func which returns a store.MessageStore implementation\n// (currently, based on guble configuration).\nvar CreateMessageStore = func() store.MessageStore {\n\tswitch *Config.MS {\n\tcase \"none\", \"memory\", \"\":\n\t\treturn dummystore.New(kvstore.NewMemoryKVStore())\n\tcase \"file\":\n\t\tlogger.WithField(\"storagePath\", *Config.StoragePath).Info(\"Using FileMessageStore in directory\")\n\t\treturn filestore.New(*Config.StoragePath)\n\tdefault:\n\t\tpanic(fmt.Errorf(\"Unknown message-store backend: %q\", *Config.MS))\n\t}\n}\n\n// CreateModules is a func which returns a slice of modules which should be used by the service\n// (currently, based on guble configuration);\n// see package `service` for terminological details.\nvar CreateModules = func(router router.Router) []interface{} {\n\tvar modules []interface{}\n\n\tif wsHandler, err := websocket.NewWSHandler(router, \"/stream/\"); err != nil {\n\t\tlogger.WithError(err).Error(\"Error loading WSHandler module\")\n\t} else {\n\t\tmodules = append(modules, wsHandler)\n\t}\n\n\tmodules = append(modules, rest.NewRestMessageAPI(router, \"/api/\"))\n\n\tif *Config.FCM.Enabled {\n\t\tlogger.Info(\"Firebase Cloud Messaging: enabled\")\n\t\tif *Config.FCM.APIKey == \"\" {\n\t\t\tlogger.Panic(\"The API Key has to be provided when Firebase Cloud Messaging is enabled\")\n\t\t}\n\t\tConfig.FCM.AfterMessageDelivery = AfterMessageDelivery\n\t\t*Config.FCM.IntervalMetrics = true\n\t\tif Config.FCM.Endpoint != nil {\n\t\t\tgcm.GcmSendEndpoint = *Config.FCM.Endpoint\n\t\t}\n\t\tsender := fcm.NewSender(*Config.FCM.APIKey)\n\t\tif fcmConn, err := fcm.New(router, sender, Config.FCM); err != nil {\n\t\t\tlogger.WithError(err).Error(\"Error creating FCM connector\")\n\t\t} else {\n\t\t\tmodules = append(modules, fcmConn)\n\t\t}\n\t} else {\n\t\tlogger.Info(\"Firebase Cloud Messaging: disabled\")\n\t}\n\n\tif *Config.APNS.Enabled {\n\t\tif *Config.APNS.Production {\n\t\t\tlogger.Info(\"APNS: enabled in production mode\")\n\t\t} else {\n\t\t\tlogger.Info(\"APNS: enabled in development mode\")\n\t\t}\n\t\tlogger.Info(\"APNS: enabled\")\n\t\tif *Config.APNS.CertificateFileName == \"\" && Config.APNS.CertificateBytes == nil {\n\t\t\tlogger.Panic(\"The certificate (as filename or bytes) has to be provided when APNS is enabled\")\n\t\t}\n\t\tif *Config.APNS.CertificatePassword == \"\" {\n\t\t\tlogger.Panic(\"A non-empty password has to be provided when APNS is enabled\")\n\t\t}\n\t\tif *Config.APNS.AppTopic == \"\" {\n\t\t\tlogger.Panic(\"The Mobile App Topic (usually the bundle-id) has to be provided when APNS is enabled\")\n\t\t}\n\t\tapnsSender, err := apns.NewSender(Config.APNS)\n\t\tif err != nil {\n\t\t\tlogger.Panic(\"APNS Sender could not be created\")\n\t\t}\n\t\t*Config.APNS.IntervalMetrics = true\n\t\tif apnsConn, err := apns.New(router, apnsSender, Config.APNS); err != nil {\n\t\t\tlogger.WithError(err).Error(\"Error creating APNS connector\")\n\t\t} else {\n\t\t\tmodules = append(modules, apnsConn)\n\t\t}\n\t} else {\n\t\tlogger.Info(\"APNS: disabled\")\n\t}\n\n\tif *Config.SMS.Enabled {\n\t\tlogger.Info(\"Nexmo SMS: enabled\")\n\t\tif *Config.SMS.APIKey == \"\" || *Config.SMS.APISecret == \"\" {\n\t\t\tlogger.Panic(\"The API Key has to be provided when NEXMO SMS connector is enabled\")\n\t\t}\n\t\tnexmoSender, err := sms.NewNexmoSender(*Config.SMS.APIKey, *Config.SMS.APISecret)\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).Error(\"Error creating Nexmo Sender\")\n\t\t}\n\t\tsmsConn, err := sms.New(router, nexmoSender, Config.SMS)\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).Error(\"Error creating Nexmo Sender\")\n\t\t} else {\n\t\t\tmodules = append(modules, smsConn)\n\t\t}\n\t} else {\n\t\tlogger.Info(\"SMS: disabled\")\n\t}\n\n\treturn modules\n}\n\n// Main is the entry-point of the guble server.\nfunc Main() {\n\tdefer func() {\n\t\tif p := recover(); p != nil {\n\t\t\tlogger.Fatal(\"Fatal error in gubled after recover\")\n\t\t}\n\t}()\n\n\tparseConfig()\n\n\tif !terminal.IsTerminal(int(os.Stdout.Fd())) {\n\t\tlog.SetFormatter(&logformatter.LogstashFormatter{Env: *Config.EnvName})\n\t}\n\n\tlevel, err := log.ParseLevel(*Config.Log)\n\tif err != nil {\n\t\tlogger.WithError(err).Fatal(\"Invalid log level\")\n\t}\n\tlog.SetLevel(level)\n\n\tswitch *Config.Profile {\n\tcase cpuProfile:\n\t\tlogger.Info(\"starting to profile cpu\")\n\t\tdefer profile.Start(profile.CPUProfile).Stop()\n\tcase memProfile:\n\t\tlogger.Info(\"starting to profile memory\")\n\t\tdefer profile.Start(profile.MemProfile).Stop()\n\tcase blockProfile:\n\t\tlogger.Info(\"starting to profile blocking/contention\")\n\t\tdefer profile.Start(profile.BlockProfile).Stop()\n\tdefault:\n\t\tlogger.Debug(\"no profiling was started\")\n\t}\n\n\tif err := ValidateStoragePath(); err != nil {\n\t\tlogger.Fatal(\"Fatal error in gubled in validation of storage path\")\n\t}\n\n\tsrv := StartService()\n\tif srv == nil {\n\t\tlogger.Fatal(\"exiting because of unrecoverable error(s) when starting the service\")\n\t}\n\n\twaitForTermination(func() {\n\t\terr := srv.Stop()\n\t\tif err != nil {\n\t\t\tlogger.WithField(\"error\", err.Error()).Error(\"errors occurred while stopping service\")\n\t\t}\n\t})\n}\n\n// StartService starts a server.Service after first creating the router (and its dependencies), the webserver.\nfunc StartService() *service.Service {\n\t//TODO StartService could return an error in case it fails to start\n\n\taccessManager := CreateAccessManager()\n\tmessageStore := CreateMessageStore()\n\tkvStore := CreateKVStore()\n\n\tvar cl *cluster.Cluster\n\tvar err error\n\n\tif *Config.Cluster.NodeID > 0 {\n\t\texitIfInvalidClusterParams(*Config.Cluster.NodeID, *Config.Cluster.NodePort, *Config.Cluster.Remotes)\n\t\tlogger.Info(\"Starting in cluster-mode\")\n\t\tcl, err = cluster.New(&cluster.Config{\n\t\t\tID:      *Config.Cluster.NodeID,\n\t\t\tPort:    *Config.Cluster.NodePort,\n\t\t\tRemotes: *Config.Cluster.Remotes,\n\t\t})\n\t\tif err != nil {\n\t\t\tlogger.WithField(\"err\", err).Fatal(\"Module could not be started (cluster)\")\n\t\t}\n\t} else {\n\t\tlogger.Info(\"Starting in standalone-mode\")\n\t}\n\n\tr := router.New(accessManager, messageStore, kvStore, cl)\n\twebsrv := webserver.New(*Config.HttpListen)\n\n\tsrv := service.New(r, websrv).\n\t\tHealthEndpoint(*Config.HealthEndpoint).\n\t\tMetricsEndpoint(*Config.MetricsEndpoint)\n\n\tsrv.RegisterModules(0, 6, kvStore, messageStore)\n\tsrv.RegisterModules(4, 3, CreateModules(r)...)\n\n\tif err = srv.Start(); err != nil {\n\t\tlogger.WithField(\"error\", err.Error()).Error(\"errors occurred while starting service\")\n\t\tif err = srv.Stop(); err != nil {\n\t\t\tlogger.WithField(\"error\", err.Error()).Error(\"errors occurred when stopping service after it failed to start\")\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn srv\n}\n\nfunc exitIfInvalidClusterParams(nodeID uint8, nodePort int, remotes []*net.TCPAddr) {\n\tif (nodeID <= 0 && len(remotes) > 0) || (nodePort <= 0) {\n\t\terrorMessage := \"Could not start in cluster-mode: invalid/incomplete parameters\"\n\t\tlogger.WithFields(log.Fields{\n\t\t\t\"nodeID\":          nodeID,\n\t\t\t\"nodePort\":        nodePort,\n\t\t\t\"numberOfRemotes\": len(remotes),\n\t\t}).Fatal(errorMessage)\n\t}\n}\n\nfunc waitForTermination(callback func()) {\n\tsignalC := make(chan os.Signal)\n\tsignal.Notify(signalC, syscall.SIGINT, syscall.SIGTERM)\n\tsig := <-signalC\n\tlogger.Infof(\"Got signal '%v' .. exiting gracefully now\", sig)\n\tcallback()\n\tmetrics.LogOnDebugLevel()\n\tlogger.Info(\"Exit gracefully now\")\n\tos.Exit(0)\n}\n"
  },
  {
    "path": "server/gubled_test.go",
    "content": "package server\n\nimport (\n\t\"github.com/smancke/guble/server/kvstore\"\n\n\t\"github.com/smancke/guble/testutil\"\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestValidateStoragePath(t *testing.T) {\n\ta := assert.New(t)\n\n\tvalid := os.TempDir()\n\tinvalid := os.TempDir() + \"/non-existing-directory-for-guble-test\"\n\n\t*Config.MS = \"file\"\n\n\t*Config.StoragePath = valid\n\ta.NoError(ValidateStoragePath())\n\t*Config.StoragePath = invalid\n\n\ta.Error(ValidateStoragePath())\n\n\t*Config.KVS = \"file\"\n\ta.Error(ValidateStoragePath())\n}\n\nfunc TestCreateKVStoreBackend(t *testing.T) {\n\ta := assert.New(t)\n\t*Config.KVS = \"memory\"\n\tmemory := CreateKVStore()\n\ta.Equal(\"*kvstore.MemoryKVStore\", reflect.TypeOf(memory).String())\n\n\tdir, _ := ioutil.TempDir(\"\", \"guble_test\")\n\tdefer os.RemoveAll(dir)\n\n\t*Config.KVS = \"file\"\n\t*Config.StoragePath = dir\n\tsqlite := CreateKVStore()\n\ta.Equal(\"*kvstore.SqliteKVStore\", reflect.TypeOf(sqlite).String())\n}\n\nfunc TestFCMOnlyStartedIfEnabled(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\ta := assert.New(t)\n\n\trouterMock := initRouterMock()\n\trouterMock.EXPECT().KVStore().Return(kvstore.NewMemoryKVStore(), nil)\n\n\t*Config.FCM.Enabled = true\n\t*Config.FCM.APIKey = \"xyz\"\n\t*Config.APNS.Enabled = false\n\ta.True(containsFCMModule(CreateModules(routerMock)))\n\n\t*Config.FCM.Enabled = false\n\ta.False(containsFCMModule(CreateModules(routerMock)))\n}\n\nfunc containsFCMModule(modules []interface{}) bool {\n\tfor _, module := range modules {\n\t\tif reflect.TypeOf(module).String() == \"*fcm.fcm\" {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc TestPanicOnMissingFCMApiKey(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\tdefer func() {\n\t\tif r := recover(); r == nil {\n\t\t\tt.Log(\"expect panic, because the gcm api key was not supplied\")\n\t\t\tt.Fail()\n\t\t}\n\t}()\n\n\trouterMock := initRouterMock()\n\t*Config.FCM.APIKey = \"\"\n\t*Config.FCM.Enabled = true\n\tCreateModules(routerMock)\n}\n\nfunc TestCreateStoreBackendPanicInvalidBackend(t *testing.T) {\n\tvar p interface{}\n\tfunc() {\n\t\tdefer func() {\n\t\t\tp = recover()\n\t\t}()\n\n\t\t*Config.KVS = \"foo bar\"\n\t\tCreateKVStore()\n\t}()\n\tassert.NotNil(t, p)\n}\n\nfunc TestStartServiceModules(t *testing.T) {\n\tdefer testutil.ResetDefaultRegistryHealthCheck()\n\n\ta := assert.New(t)\n\n\t// when starting a simple valid service\n\t*Config.KVS = \"memory\"\n\t*Config.MS = \"file\"\n\t*Config.FCM.Enabled = false\n\t*Config.APNS.Enabled = false\n\n\t// using an available port for http\n\ttestHttpPort++\n\tlogger.WithField(\"port\", testHttpPort).Debug(\"trying to use HTTP Port\")\n\t*Config.HttpListen = fmt.Sprintf(\":%d\", testHttpPort)\n\n\ts := StartService()\n\n\t// then the number and ordering of modules should be correct\n\ta.Equal(6, len(s.ModulesSortedByStartOrder()))\n\tvar moduleNames []string\n\tfor _, iface := range s.ModulesSortedByStartOrder() {\n\t\tname := reflect.TypeOf(iface).String()\n\t\tmoduleNames = append(moduleNames, name)\n\t}\n\ta.Equal(\"*kvstore.MemoryKVStore *filestore.FileMessageStore *router.router *webserver.WebServer *websocket.WSHandler *rest.RestMessageAPI\",\n\t\tstrings.Join(moduleNames, \" \"))\n}\n\nfunc initRouterMock() *MockRouter {\n\trouterMock := NewMockRouter(testutil.MockCtrl)\n\trouterMock.EXPECT().Cluster().Return(nil).AnyTimes()\n\tamMock := NewMockAccessManager(testutil.MockCtrl)\n\tmsMock := NewMockMessageStore(testutil.MockCtrl)\n\n\trouterMock.EXPECT().AccessManager().Return(amMock, nil).AnyTimes()\n\trouterMock.EXPECT().MessageStore().Return(msMock, nil).AnyTimes()\n\n\treturn routerMock\n}\n"
  },
  {
    "path": "server/integration_test.go",
    "content": "package server\n\nimport (\n\t\"github.com/smancke/guble/client\"\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/service\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/smancke/guble/restclient\"\n\t\"github.com/smancke/guble/testutil\"\n)\n\nfunc initServerAndClients(t *testing.T) (*service.Service, client.Client, client.Client, func()) {\n\t*Config.HttpListen = \"localhost:0\"\n\t*Config.KVS = \"memory\"\n\ts := StartService()\n\n\ttime.Sleep(time.Millisecond * 100)\n\n\tvar err error\n\tclient1, err := client.Open(\"ws://\"+s.WebServer().GetAddr()+\"/stream/user/user1\", \"http://localhost\", 1, false)\n\tassert.NoError(t, err)\n\n\tcheckConnectedNotificationJSON(t, \"user1\",\n\t\texpectStatusMessage(t, client1, protocol.SUCCESS_CONNECTED, \"You are connected to the server.\"),\n\t)\n\n\tclient2, err := client.Open(\"ws://\"+s.WebServer().GetAddr()+\"/stream/user/user2\", \"http://localhost\", 1, false)\n\tassert.NoError(t, err)\n\tcheckConnectedNotificationJSON(t, \"user2\",\n\t\texpectStatusMessage(t, client2, protocol.SUCCESS_CONNECTED, \"You are connected to the server.\"),\n\t)\n\n\treturn s, client1, client2, func() {\n\t\tif client1 != nil {\n\t\t\tclient1.Close()\n\t\t}\n\t\tif client2 != nil {\n\t\t\tclient2.Close()\n\t\t}\n\t\ts.Stop()\n\t}\n}\n\nfunc expectStatusMessage(t *testing.T, client client.Client, name string, arg string) string {\n\tselect {\n\tcase notify := <-client.StatusMessages():\n\t\tassert.Equal(t, name, notify.Name)\n\t\tassert.Equal(t, arg, notify.Arg)\n\t\treturn notify.Json\n\tcase <-time.After(time.Second * 10):\n\t\tt.Logf(\"no notification of type %s after 2 second\", name)\n\t\tt.Fail()\n\t\treturn \"\"\n\t}\n}\n\nfunc checkConnectedNotificationJSON(t *testing.T, user string, connectedJSON string) {\n\tm := make(map[string]string)\n\terr := json.Unmarshal([]byte(connectedJSON), &m)\n\tassert.NoError(t, err)\n\tassert.Equal(t, user, m[\"UserId\"])\n\tassert.True(t, len(m[\"ApplicationId\"]) > 0)\n\t_, e := time.Parse(time.RFC3339, m[\"Time\"])\n\tassert.NoError(t, e)\n}\n\n//Used only for test and unmarshalling of the json response\ntype Subscriber struct {\n\tDeviceToken string `json:\"device_token\"`\n\tUserID      string `json:\"user_id\"`\n}\n\nfunc TestSubscribersIntegration(t *testing.T) {\n\tdefer testutil.SkipIfShort(t)\n\tdefer testutil.SkipIfDisabled(t)\n\n\tdefer testutil.ResetDefaultRegistryHealthCheck()\n\tdefer testutil.EnableDebugForMethod()()\n\n\ta := assert.New(t)\n\n\ts, cleanup := serviceSetUp(t)\n\tdefer cleanup()\n\n\tsubscribeMultipleClients(t, s, 4)\n\ta.Nil(nil)\n\n\trestClient := restclient.New(fmt.Sprintf(\"http://%s/api\", s.WebServer().GetAddr()))\n\tcontent, err := restClient.GetSubscribers(testTopic)\n\ta.NoError(err)\n\trouteParams := make([]*Subscriber, 0)\n\n\terr = json.Unmarshal(content, &routeParams)\n\ta.Equal(4, len(routeParams), \"Should have 4 subscribers\")\n\tfor i, rp := range routeParams {\n\t\ta.Equal(fmt.Sprintf(\"gcmId%d\", i), rp.DeviceToken)\n\t\ta.Equal(fmt.Sprintf(\"user%d\", i), rp.UserID)\n\t}\n\ta.NoError(err)\n}\n\nfunc subscribeMultipleClients(t *testing.T, service *service.Service, noOfClients int) {\n\ta := assert.New(t)\n\n\t// create FCM subscription for topic\n\tfor i := 0; i < noOfClients; i++ {\n\t\turlFormat := fmt.Sprintf(\"http://%s/fcm/gcmId%%d/user%%d/%%s\", service.WebServer().GetAddr())\n\t\turl := fmt.Sprintf(urlFormat, i, i, strings.TrimPrefix(testTopic, \"/\"))\n\t\tresponse, errPost := http.Post(\n\t\t\turl,\n\t\t\t\"text/plain\",\n\t\t\tbytes.NewBufferString(\"\"),\n\t\t)\n\t\tlogger.WithField(\"url\", url).Debug(\"subscribe\")\n\t\ta.NoError(errPost)\n\t\ta.Equal(response.StatusCode, 200)\n\n\t\tbody, errReadAll := ioutil.ReadAll(response.Body)\n\t\ta.NoError(errReadAll)\n\t\ta.Equal(fmt.Sprintf(`{\"subscribed\":\"%s\"}`, testTopic), string(body))\n\t}\n}\n"
  },
  {
    "path": "server/kvstore/common_test.go",
    "content": "package kvstore\n\nimport (\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"crypto/rand\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n)\n\nvar test1 = []byte(\"Test1\")\nvar test2 = []byte(\"Test2\")\nvar test3 = []byte(\"Test3\")\n\nfunc CommonTestPutGetDelete(t *testing.T, kvs1 KVStore, kvs2 KVStore) {\n\ta := assert.New(t)\n\n\ta.NoError(kvs1.Put(\"s1\", \"a\", test1))\n\ta.NoError(kvs1.Put(\"s1\", \"b\", test2))\n\ta.NoError(kvs1.Put(\"s2\", \"a\", test3))\n\n\tassertGet(a, kvs2, \"s1\", \"a\", test1)\n\tassertGet(a, kvs2, \"s1\", \"b\", test2)\n\tassertGet(a, kvs2, \"s2\", \"a\", test3)\n\tassertGetNoExist(a, kvs2, \"no\", \"thing\")\n\n\tkvs2.Delete(\"s1\", \"b\")\n\tassertGetNoExist(a, kvs1, \"s1\", \"b\")\n\tassertGet(a, kvs1, \"s1\", \"a\", test1)\n\tassertGet(a, kvs1, \"s2\", \"a\", test3)\n\n\tkvs2.Delete(\"s1\", \"a\")\n\tassertGetNoExist(a, kvs1, \"s1\", \"a\")\n\tassertGet(a, kvs1, \"s2\", \"a\", test3)\n\n\tkvs2.Delete(\"s2\", \"a\")\n\tassertGetNoExist(a, kvs1, \"s2\", \"a\")\n}\n\nfunc CommonTestIterate(t *testing.T, kvs1 KVStore, kvs2 KVStore) {\n\ta := assert.New(t)\n\n\ta.NoError(kvs1.Put(\"s1\", \"bli\", test1))\n\ta.NoError(kvs1.Put(\"s1\", \"bla\", test2))\n\ta.NoError(kvs1.Put(\"s1\", \"buu\", test3))\n\ta.NoError(kvs1.Put(\"s2\", \"bli\", test2))\n\n\tassertChannelContainsEntries(a, kvs2.Iterate(\"s1\", \"bl\"),\n\t\t[2]string{\"bli\", string(test1)},\n\t\t[2]string{\"bla\", string(test2)})\n\n\tassertChannelContainsEntries(a, kvs2.Iterate(\"s1\", \"\"),\n\t\t[2]string{\"bli\", string(test1)},\n\t\t[2]string{\"bla\", string(test2)},\n\t\t[2]string{\"buu\", string(test3)})\n\n\tassertChannelContainsEntries(a, kvs2.Iterate(\"s1\", \"bla\"),\n\t\t[2]string{\"bla\", string(test2)})\n\n\tassertChannelContainsEntries(a, kvs2.Iterate(\"s1\", \"nothing\"))\n\n\tassertChannelContainsEntries(a, kvs2.Iterate(\"s2\", \"\"),\n\t\t[2]string{\"bli\", string(test2)})\n}\n\nfunc assertChannelContainsEntries(a *assert.Assertions, entryC chan [2]string, expectedEntries ...[2]string) {\n\tvar allEntries [][2]string\n\nWAITLOOP:\n\tfor {\n\t\tselect {\n\t\tcase entry, ok := <-entryC:\n\t\t\tif !ok {\n\t\t\t\tbreak WAITLOOP\n\t\t\t}\n\t\t\tallEntries = append(allEntries, entry)\n\t\tcase <-time.After(time.Second):\n\t\t\ta.Fail(\"timeout\")\n\t\t}\n\t}\n\n\ta.Equal(len(expectedEntries), len(allEntries))\n\tfor _, expected := range expectedEntries {\n\t\ta.Contains(allEntries, expected)\n\t}\n}\n\nfunc CommonTestIterateKeys(t *testing.T, kvs1 KVStore, kvs2 KVStore) {\n\ta := assert.New(t)\n\n\ta.NoError(kvs1.Put(\"s1\", \"bli\", test1))\n\ta.NoError(kvs1.Put(\"s1\", \"bla\", test2))\n\ta.NoError(kvs1.Put(\"s1\", \"buu\", test3))\n\ta.NoError(kvs1.Put(\"s2\", \"bli\", test2))\n\n\tassertChannelContains(a, kvs2.IterateKeys(\"s1\", \"bl\"),\n\t\t\"bli\", \"bla\")\n\n\tassertChannelContains(a, kvs2.IterateKeys(\"s1\", \"\"),\n\t\t\"bli\", \"bla\", \"buu\")\n\n\tassertChannelContains(a, kvs2.IterateKeys(\"s1\", \"bla\"),\n\t\t\"bla\")\n\n\tassertChannelContains(a, kvs2.IterateKeys(\"s1\", \"nothing\"))\n\n\tassertChannelContains(a, kvs2.IterateKeys(\"s2\", \"\"),\n\t\t\"bli\")\n}\n\nfunc assertChannelContains(a *assert.Assertions, entryC chan string, expectedEntries ...string) {\n\tvar allEntries []string\n\nWAITLOOP:\n\tfor {\n\t\tselect {\n\t\tcase entry, ok := <-entryC:\n\t\t\tif !ok {\n\t\t\t\tbreak WAITLOOP\n\t\t\t}\n\t\t\tallEntries = append(allEntries, entry)\n\t\tcase <-time.After(time.Second):\n\t\t\ta.Fail(\"timeout\")\n\t\t}\n\t}\n\n\ta.Equal(len(expectedEntries), len(allEntries))\n\tfor _, expected := range expectedEntries {\n\t\ta.Contains(allEntries, expected)\n\t}\n}\n\nfunc CommonBenchmarkPutGet(b *testing.B, s KVStore) {\n\ta := assert.New(b)\n\tb.ResetTimer()\n\tfor n := 0; n < b.N; n++ {\n\t\tdata := randString(20)\n\t\ts.Put(\"bench\", data, []byte(data))\n\t\tval, exist, err := s.Get(\"bench\", data)\n\t\ta.NoError(err)\n\t\ta.True(exist)\n\t\ta.Equal(data, string(val))\n\t}\n\tb.StopTimer()\n}\n\nfunc assertGet(a *assert.Assertions, s KVStore, schema string, key string, expectedValue []byte) {\n\tval, exist, err := s.Get(schema, key)\n\ta.NoError(err)\n\ta.True(exist)\n\ta.Equal(expectedValue, val)\n}\n\nfunc assertGetNoExist(a *assert.Assertions, s KVStore, schema string, key string) {\n\tval, exist, err := s.Get(schema, key)\n\ta.NoError(err)\n\ta.False(exist)\n\ta.Nil(val)\n}\n\nfunc randString(n int) string {\n\tconst alphanum = \"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\"\n\tvar bytes = make([]byte, n)\n\trand.Read(bytes)\n\tfor i, b := range bytes {\n\t\tbytes[i] = alphanum[b%byte(len(alphanum))]\n\t}\n\treturn string(bytes)\n}\n\nfunc tempFilename() string {\n\tfile, err := ioutil.TempFile(\"/tmp\", \"guble_store_unittest\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfile.Close()\n\tos.Remove(file.Name())\n\treturn file.Name()\n}\n"
  },
  {
    "path": "server/kvstore/gorm.go",
    "content": "package kvstore\n\nimport (\n\tlog \"github.com/Sirupsen/logrus\"\n\t\"github.com/jinzhu/gorm\"\n\n\t\"errors\"\n\t\"time\"\n)\n\nconst (\n\tresponseChannelSize = 100\n)\n\ntype kvEntry struct {\n\tSchema    string    `gorm:\"primary_key\"sql:\"type:varchar(200)\"`\n\tKey       string    `gorm:\"primary_key\"sql:\"type:varchar(200)\"`\n\tValue     []byte    `sql:\"type:bytea\"`\n\tUpdatedAt time.Time ``\n}\n\ntype kvStore struct {\n\tdb     *gorm.DB\n\tlogger *log.Entry\n}\n\nfunc (store *kvStore) Stop() error {\n\tif store.db != nil {\n\t\terr := store.db.Close()\n\t\tstore.db = nil\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (store *kvStore) Check() error {\n\tif store.db == nil {\n\t\terrorMessage := \"Error: Database is not initialized (nil)\"\n\t\tstore.logger.Error(errorMessage)\n\t\treturn errors.New(errorMessage)\n\t}\n\tif err := store.db.DB().Ping(); err != nil {\n\t\tstore.logger.WithField(\"error\", err.Error()).Error(\"Error pinging database\")\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (store *kvStore) Put(schema, key string, value []byte) error {\n\tif err := store.Delete(schema, key); err != nil {\n\t\treturn err\n\t}\n\tentry := &kvEntry{Schema: schema, Key: key, Value: value, UpdatedAt: time.Now()}\n\treturn store.db.Create(entry).Error\n}\n\nfunc (store *kvStore) Get(schema, key string) ([]byte, bool, error) {\n\tentry := &kvEntry{}\n\tif err := store.db.First(&entry, \"schema = ? and key = ?\", schema, key).Error; err != nil {\n\t\tif err == gorm.ErrRecordNotFound {\n\t\t\treturn nil, false, nil\n\t\t}\n\t\treturn nil, false, err\n\t}\n\n\treturn entry.Value, true, nil\n}\n\nfunc (store *kvStore) Iterate(schema string, keyPrefix string) chan [2]string {\n\tresponseC := make(chan [2]string, responseChannelSize)\n\tgo func() {\n\t\trows, err := store.db.Raw(\"select key, value from kv_entry where schema = ? and key LIKE ?\", schema, keyPrefix+\"%\").\n\t\t\tRows()\n\t\tif err != nil {\n\t\t\tstore.logger.WithField(\"error\", err.Error()).Error(\"Error fetching keys from database\")\n\t\t} else {\n\t\t\tdefer rows.Close()\n\t\t\tfor rows.Next() {\n\t\t\t\tvar key, value string\n\t\t\t\trows.Scan(&key, &value)\n\t\t\t\tresponseC <- [2]string{key, value}\n\t\t\t}\n\t\t}\n\t\tclose(responseC)\n\t}()\n\treturn responseC\n}\n\nfunc (store *kvStore) IterateKeys(schema string, keyPrefix string) chan string {\n\tresponseC := make(chan string, responseChannelSize)\n\tgo func() {\n\t\trows, err := store.db.Raw(\"select key from kv_entry where schema = ? and key LIKE ?\", schema, keyPrefix+\"%\").\n\t\t\tRows()\n\t\tif err != nil {\n\t\t\tstore.logger.WithField(\"error\", err.Error()).Error(\"Error fetching keys from database\")\n\t\t} else {\n\t\t\tdefer rows.Close()\n\t\t\tfor rows.Next() {\n\t\t\t\tvar value string\n\t\t\t\trows.Scan(&value)\n\t\t\t\tresponseC <- value\n\t\t\t}\n\t\t}\n\t\tclose(responseC)\n\t}()\n\treturn responseC\n}\n\nfunc (store *kvStore) Delete(schema, key string) error {\n\treturn store.db.Delete(&kvEntry{Schema: schema, Key: key}).Error\n}\n"
  },
  {
    "path": "server/kvstore/kvstore.go",
    "content": "package kvstore\n\n// KVStore is an interface for a persistence backend, storing key-value pairs.\ntype KVStore interface {\n\n\t// Put stores an entry in the key-value store\n\tPut(schema, key string, value []byte) error\n\n\t// Get fetches one entry\n\tGet(schema, key string) (value []byte, exist bool, err error)\n\n\t// Delete an entry\n\tDelete(schema, key string) error\n\n\t// Iterate iterates over all entries in the key value store.\n\t// The result will be sent to the channel, which is closed after the last entry.\n\t// For simplicity, the return type is an string array with key, value.\n\t// If you have binary values, you can safely cast back to []byte.\n\tIterate(schema, keyPrefix string) (entries chan [2]string)\n\n\t// IterateKeys iterates over all keys in the key value store.\n\t// The keys will be sent to the channel, which is closed after the last entry.\n\tIterateKeys(schema, keyPrefix string) (keys chan string)\n}\n"
  },
  {
    "path": "server/kvstore/memory.go",
    "content": "package kvstore\n\nimport (\n\t\"strings\"\n\t\"sync\"\n)\n\n// MemoryKVStore is a struct representing an in-memory key-value store.\ntype MemoryKVStore struct {\n\tdata  map[string]map[string][]byte\n\tmutex sync.RWMutex\n}\n\n// NewMemoryKVStore returns a new configured MemoryKVStore.\nfunc NewMemoryKVStore() *MemoryKVStore {\n\treturn &MemoryKVStore{\n\t\tdata: make(map[string]map[string][]byte),\n\t}\n}\n\n// Put implements the `kvstore` Put func.\nfunc (kvStore *MemoryKVStore) Put(schema, key string, value []byte) error {\n\tkvStore.mutex.Lock()\n\tdefer kvStore.mutex.Unlock()\n\ts := kvStore.getSchema(schema)\n\ts[key] = value\n\treturn nil\n}\n\n// Get implements the `kvstore` Get func.\nfunc (kvStore *MemoryKVStore) Get(schema, key string) ([]byte, bool, error) {\n\tkvStore.mutex.Lock()\n\tdefer kvStore.mutex.Unlock()\n\ts := kvStore.getSchema(schema)\n\tif v, ok := s[key]; ok {\n\t\treturn v, true, nil\n\t}\n\treturn nil, false, nil\n}\n\n// Delete implements the `kvstore` Delete func.\nfunc (kvStore *MemoryKVStore) Delete(schema, key string) error {\n\tkvStore.mutex.Lock()\n\tdefer kvStore.mutex.Unlock()\n\ts := kvStore.getSchema(schema)\n\tdelete(s, key)\n\treturn nil\n}\n\n// Iterate iterates over the key-value pairs in the schema, with keys matching the keyPrefix.\n// TODO: this can lead to a deadlock, if the consumer modifies the store while receiving and the channel blocks\nfunc (kvStore *MemoryKVStore) Iterate(schema string, keyPrefix string) chan [2]string {\n\tresponseChan := make(chan [2]string, 100)\n\tkvStore.mutex.Lock()\n\ts := kvStore.getSchema(schema)\n\tkvStore.mutex.Unlock()\n\tgo func() {\n\t\tkvStore.mutex.Lock()\n\t\tfor key, value := range s {\n\t\t\tif strings.HasPrefix(key, keyPrefix) {\n\t\t\t\tresponseChan <- [2]string{key, string(value)}\n\t\t\t}\n\t\t}\n\t\tkvStore.mutex.Unlock()\n\t\tclose(responseChan)\n\t}()\n\treturn responseChan\n}\n\n// IterateKeys iterates over the keys in the schema, matching the keyPrefix.\n// TODO: this can lead to a deadlock, if the consumer modifies the store while receiving and the channel blocks\nfunc (kvStore *MemoryKVStore) IterateKeys(schema string, keyPrefix string) chan string {\n\tresponseChan := make(chan string, 100)\n\tkvStore.mutex.Lock()\n\ts := kvStore.getSchema(schema)\n\tkvStore.mutex.Unlock()\n\tgo func() {\n\t\tkvStore.mutex.Lock()\n\t\tfor key := range s {\n\t\t\tif strings.HasPrefix(key, keyPrefix) {\n\t\t\t\tresponseChan <- key\n\t\t\t}\n\t\t}\n\t\tkvStore.mutex.Unlock()\n\n\t\tclose(responseChan)\n\t}()\n\treturn responseChan\n}\n\nfunc (kvStore *MemoryKVStore) getSchema(schema string) map[string][]byte {\n\tif s, ok := kvStore.data[schema]; ok {\n\t\treturn s\n\t}\n\ts := make(map[string][]byte)\n\tkvStore.data[schema] = s\n\treturn s\n}\n"
  },
  {
    "path": "server/kvstore/memory_test.go",
    "content": "package kvstore\n\nimport (\n\t\"testing\"\n)\n\nfunc TestMemoryPutGetDelete(t *testing.T) {\n\tmkvs := NewMemoryKVStore()\n\tCommonTestPutGetDelete(t, mkvs, mkvs)\n}\n\nfunc TestMemoryIterateKeys(t *testing.T) {\n\tmkvs := NewMemoryKVStore()\n\tCommonTestIterateKeys(t, mkvs, mkvs)\n}\n\nfunc TestMemoryIterate(t *testing.T) {\n\tmkvs := NewMemoryKVStore()\n\tCommonTestIterate(t, mkvs, mkvs)\n}\n\nfunc BenchmarkMemoryPutGet(b *testing.B) {\n\tCommonBenchmarkPutGet(b, NewMemoryKVStore())\n}\n"
  },
  {
    "path": "server/kvstore/postgres.go",
    "content": "package kvstore\n\nimport (\n\tlog \"github.com/Sirupsen/logrus\"\n\n\t\"github.com/jinzhu/gorm\"\n\n\t// use gorm's postgres dialect\n\t_ \"github.com/jinzhu/gorm/dialects/postgres\"\n)\n\nconst postgresGormLogMode = false\n\n// PostgresKVStore extends a gorm-based kvStore with a Postgresql-specific configuration.\ntype PostgresKVStore struct {\n\t*kvStore\n\tconfig PostgresConfig\n}\n\n// NewPostgresKVStore returns a new configured PostgresKVStore (not opened yet).\nfunc NewPostgresKVStore(postgresConfig PostgresConfig) *PostgresKVStore {\n\treturn &PostgresKVStore{\n\t\tkvStore: &kvStore{logger: log.WithFields(log.Fields{\"module\": \"kv-postgres\"})},\n\t\tconfig:  postgresConfig,\n\t}\n}\n\n// Open a connection to Postgresql database, or return an error.\nfunc (kvStore *PostgresKVStore) Open() error {\n\tlogger := kvStore.logger.WithField(\"config\", kvStore.config)\n\tlogger.Info(\"Opening database\")\n\n\tgormdb, err := gorm.Open(\"postgres\", kvStore.config.connectionString())\n\tif err != nil {\n\t\tlogger.WithField(\"err\", err).Error(\"Error opening database\")\n\t\treturn err\n\t}\n\n\tif err := gormdb.DB().Ping(); err != nil {\n\t\tkvStore.logger.WithField(\"error\", err.Error()).Error(\"Error pinging database\")\n\t} else {\n\t\tkvStore.logger.Info(\"Ping reply from database\")\n\t}\n\n\tgormdb.LogMode(postgresGormLogMode)\n\tgormdb.SingularTable(true)\n\t//TODO MARIAN REMOVE THIS AFTER BUG\n\tgormdb.DB().SetMaxIdleConns(-1)\n\tgormdb.DB().SetMaxOpenConns(kvStore.config.MaxOpenConns)\n\n\t//TODO MARIAN maybe config\n\t//gormdb.DB().SetConnMaxLifetime(2 * time.Minute)\n\tif err := gormdb.AutoMigrate(&kvEntry{}).Error; err != nil {\n\t\tlogger.WithField(\"err\", err).Error(\"Error in schema migration\")\n\t\treturn err\n\t}\n\n\tlogger.Info(\"Ensured database schema\")\n\tkvStore.db = gormdb\n\treturn nil\n}\n"
  },
  {
    "path": "server/kvstore/postgres_config.go",
    "content": "package kvstore\n\nimport \"strings\"\n\n// PostgresConfig is a map-based configuration of a Postgresql connection (dbname, host etc.),\n// extended with gorm-specific parameters (e.g. number of open / idle connections).\ntype PostgresConfig struct {\n\tConnParams   map[string]string\n\tMaxIdleConns int\n\tMaxOpenConns int\n}\n\nfunc (pc PostgresConfig) connectionString() string {\n\tvar params []string\n\tfor key, value := range pc.ConnParams {\n\t\tparams = append(params, key+\"=\"+value)\n\t}\n\treturn strings.Join(params, \" \")\n}\n"
  },
  {
    "path": "server/kvstore/postgres_config_test.go",
    "content": "package kvstore\n\nimport (\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc TestPostgresConfig_String(t *testing.T) {\n\ta := assert.New(t)\n\tpc0 := PostgresConfig{map[string]string{}, 1, 1}\n\ta.Equal(pc0.connectionString(), \"\")\n\n\tpc1 := PostgresConfig{map[string]string{\"key\": \"value\"}, 1, 1}\n\ta.Equal(pc1.connectionString(), \"key=value\")\n\n\tpc2 := PostgresConfig{map[string]string{\"key\": \"value\", \"password\": \"secret\"}, 1, 1}\n\ts := pc2.connectionString()\n\ta.True(s == \"key=value password=secret\" || s == \"password=secret key=value\")\n}\n"
  },
  {
    "path": "server/kvstore/postgres_test.go",
    "content": "package kvstore\n\nimport (\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc BenchmarkPostgresKVStore_PutGet(b *testing.B) {\n\tkvs := NewPostgresKVStore(aPostgresConfig())\n\tkvs.Open()\n\tCommonBenchmarkPutGet(b, kvs)\n}\n\nfunc TestPostgresKVStore_PutGetDelete(t *testing.T) {\n\tkvs := NewPostgresKVStore(aPostgresConfig())\n\tkvs.Open()\n\tCommonTestPutGetDelete(t, kvs, kvs)\n}\n\nfunc TestPostgresKVStore_Iterate(t *testing.T) {\n\tkvs := NewPostgresKVStore(aPostgresConfig())\n\tkvs.Open()\n\tCommonTestIterate(t, kvs, kvs)\n}\n\nfunc TestPostgresKVStore_IterateKeys(t *testing.T) {\n\tkvs := NewPostgresKVStore(aPostgresConfig())\n\tkvs.Open()\n\tCommonTestIterateKeys(t, kvs, kvs)\n}\n\nfunc TestPostgresKVStore_Check(t *testing.T) {\n\ta := assert.New(t)\n\n\tkvs := NewPostgresKVStore(aPostgresConfig())\n\tkvs.Open()\n\n\terr := kvs.Check()\n\ta.NoError(err, \"Db ping should work\")\n\n\tkvs.Stop()\n\n\terr = kvs.Check()\n\ta.NotNil(err, \"Check should fail because db was already closed\")\n}\n\nfunc TestPostgresKVStore_Open(t *testing.T) {\n\tkvs := NewPostgresKVStore(invalidPostgresConfig())\n\terr := kvs.Open()\n\tassert.NotNil(t, err)\n}\n\n// This config assumes a postgresql running locally\nfunc aPostgresConfig() PostgresConfig {\n\treturn PostgresConfig{\n\t\tConnParams: map[string]string{\n\t\t\t\"host\":     \"localhost\",\n\t\t\t\"user\":     \"postgres\",\n\t\t\t\"password\": \"\",\n\t\t\t\"dbname\":   \"guble\",\n\t\t\t\"sslmode\":  \"disable\",\n\t\t},\n\t\tMaxIdleConns: 1,\n\t\tMaxOpenConns: 1,\n\t}\n}\n\nfunc invalidPostgresConfig() PostgresConfig {\n\treturn PostgresConfig{\n\t\tConnParams: map[string]string{\n\t\t\t\"host\":     \"localhost\",\n\t\t\t\"user\":     \"\",\n\t\t\t\"password\": \"\",\n\t\t\t\"dbname\":   \"\",\n\t\t\t\"sslmode\":  \"disable\",\n\t\t},\n\t\tMaxIdleConns: 1,\n\t\tMaxOpenConns: 1,\n\t}\n}\n"
  },
  {
    "path": "server/kvstore/sqlite.go",
    "content": "package kvstore\n\nimport (\n\t// use this as gorm's sqlite dialect / implementation\n\t_ \"github.com/mattn/go-sqlite3\"\n\n\t\"github.com/jinzhu/gorm\"\n\n\tlog \"github.com/Sirupsen/logrus\"\n\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"path/filepath\"\n)\n\nconst (\n\tsqliteMaxIdleConns = 2\n\tsqliteMaxOpenConns = 5\n\tsqliteGormLogMode  = false\n)\n\nvar writeTestFilename = \"db_testfile\"\n\n// SqliteKVStore is a struct representing a sqlite database which embeds a kvStore.\ntype SqliteKVStore struct {\n\t*kvStore\n\tfilename    string\n\tsyncOnWrite bool\n}\n\n// NewSqliteKVStore returns a new configured SqliteKVStore (not opened yet).\nfunc NewSqliteKVStore(filename string, syncOnWrite bool) *SqliteKVStore {\n\treturn &SqliteKVStore{\n\t\tkvStore: &kvStore{logger: log.WithFields(log.Fields{\n\t\t\t\"module\":      \"kv-sqlite\",\n\t\t\t\"filename\":    filename,\n\t\t\t\"syncOnWrite\": syncOnWrite,\n\t\t})},\n\t\tfilename:    filename,\n\t\tsyncOnWrite: syncOnWrite,\n\t}\n}\n\n// Open opens the database file. If the directory does not exist, it will be created.\nfunc (kvStore *SqliteKVStore) Open() error {\n\tdirectoryPath := filepath.Dir(kvStore.filename)\n\tif err := ensureWriteableDirectory(directoryPath); err != nil {\n\t\tkvStore.logger.WithError(err).Error(\"DB Directory is not writeable\")\n\t\treturn err\n\t}\n\n\tkvStore.logger.Info(\"Opening database\")\n\n\tgormdb, err := gorm.Open(\"sqlite3\", kvStore.filename)\n\tif err != nil {\n\t\tkvStore.logger.WithError(err).Error(\"Error opening database\")\n\t\treturn err\n\t}\n\n\tif err := gormdb.DB().Ping(); err != nil {\n\t\tkvStore.logger.WithError(err).Error(\"Error pinging database\")\n\t\treturn err\n\t}\n\tkvStore.logger.Info(\"Ping reply from database\")\n\n\tgormdb.LogMode(sqliteGormLogMode)\n\tgormdb.SingularTable(true)\n\tgormdb.DB().SetMaxIdleConns(sqliteMaxIdleConns)\n\tgormdb.DB().SetMaxOpenConns(sqliteMaxOpenConns)\n\n\tif err := gormdb.AutoMigrate(&kvEntry{}).Error; err != nil {\n\t\tkvStore.logger.WithError(err).Error(\"Error in schema migration\")\n\t\treturn err\n\t}\n\tkvStore.logger.Info(\"Ensured database schema\")\n\n\tif !kvStore.syncOnWrite {\n\t\tkvStore.logger.Info(\"Setting db: PRAGMA synchronous = OFF\")\n\t\tif err := gormdb.Exec(\"PRAGMA synchronous = OFF\").Error; err != nil {\n\t\t\tkvStore.logger.WithError(err).Error(\"Error setting PRAGMA synchronous = OFF\")\n\t\t\treturn err\n\t\t}\n\t}\n\tkvStore.db = gormdb\n\treturn nil\n}\n\nfunc ensureWriteableDirectory(dir string) error {\n\tdirInfo, errStat := os.Stat(dir)\n\tif os.IsNotExist(errStat) {\n\t\tif errMkdir := os.MkdirAll(dir, 0755); errMkdir != nil {\n\t\t\treturn errMkdir\n\t\t}\n\t\tdirInfo, errStat = os.Stat(dir)\n\t}\n\tif errStat != nil || !dirInfo.IsDir() {\n\t\treturn fmt.Errorf(\"kv-sqlite: not a directory %v\", dir)\n\t}\n\twriteTest := path.Join(dir, writeTestFilename)\n\tif err := ioutil.WriteFile(writeTest, []byte(\"writeTest\"), 0644); err != nil {\n\t\treturn err\n\t}\n\tif err := os.Remove(writeTest); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "server/kvstore/sqlite_test.go",
    "content": "package kvstore\n\nimport (\n\t\"github.com/stretchr/testify/assert\"\n\t\"os\"\n\t\"testing\"\n)\n\nfunc BenchmarkSqlitePutGet(b *testing.B) {\n\tf := tempFilename()\n\tdefer os.Remove(f)\n\n\tdb := NewSqliteKVStore(f, false)\n\tdb.Open()\n\tCommonBenchmarkPutGet(b, db)\n}\n\nfunc TestSqlitePutGetDelete(t *testing.T) {\n\tf := tempFilename()\n\tdefer os.Remove(f)\n\n\tdb := NewSqliteKVStore(f, false)\n\tdb.Open()\n\tCommonTestPutGetDelete(t, db, db)\n}\n\nfunc TestSqliteIterate(t *testing.T) {\n\tf := tempFilename()\n\tdefer os.Remove(f)\n\n\tdb := NewSqliteKVStore(f, false)\n\tdb.Open()\n\n\tCommonTestIterate(t, db, db)\n}\n\nfunc TestSqliteIterateKeys(t *testing.T) {\n\tf := tempFilename()\n\tdefer os.Remove(f)\n\n\tdb := NewSqliteKVStore(f, false)\n\tdb.Open()\n\n\tCommonTestIterateKeys(t, db, db)\n}\n\nfunc TestCheck_SqlKVStore(t *testing.T) {\n\ta := assert.New(t)\n\tf := tempFilename()\n\tdefer os.Remove(f)\n\n\tkvs := NewSqliteKVStore(f, false)\n\tkvs.Open()\n\n\terr := kvs.Check()\n\ta.Nil(err, \"Db ping should work\")\n\n\tkvs.Stop()\n\n\terr = kvs.Check()\n\ta.NotNil(err, \"Check should fail because db was already closed\")\n}\n"
  },
  {
    "path": "server/logger.go",
    "content": "package server\n\nimport (\n\tlog \"github.com/Sirupsen/logrus\"\n)\n\nvar logger = log.WithFields(log.Fields{\n\t\"module\": \"server\",\n})\n"
  },
  {
    "path": "server/metrics/average.go",
    "content": "package metrics\n\nimport (\n\t\"fmt\"\n)\n\ntype average struct {\n\tvalue string\n}\n\nfunc newAverage(total, cases, scale int64, defaultAverageJSONValue string) average {\n\tif cases <= 0 || scale <= 0 {\n\t\treturn average{defaultAverageJSONValue}\n\t}\n\treturn average{fmt.Sprintf(\"%v\", float64(total)/float64(cases*scale))}\n}\n\nfunc (a average) String() string {\n\treturn a.value\n}\n"
  },
  {
    "path": "server/metrics/average_test.go",
    "content": "package metrics\n\nimport (\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"testing\"\n)\n\nfunc TestAverage_String(t *testing.T) {\n\tassert.Equal(t, \"x\", newAverage(0, 0, 0, \"x\").String())\n\tassert.Equal(t, \"x\", newAverage(0, 1, 0, \"x\").String())\n\tassert.Equal(t, \"0\", newAverage(0, 1, 1, \"\").String())\n\tassert.Equal(t, \"1\", newAverage(1, 1, 1, \"\").String())\n\tassert.Equal(t, \"2\", newAverage(40, 2, 10, \"\").String())\n}\n"
  },
  {
    "path": "server/metrics/disabled.go",
    "content": "// +build disablemetrics\n\npackage metrics\n\nimport (\n\t\"expvar\"\n\t\"time\"\n)\n\ntype dummyInt struct{}\n\n// Dummy functions on dummyInt\nfunc (v *dummyInt) Add(delta int64) {}\nfunc (v *dummyInt) Set(value int64) {}\n\n// NewInt returns a dummyInt, depending on the build tag declared at the beginning of this file.\nfunc NewInt(name string) Int {\n\treturn &dummyInt{}\n}\n\ntype dummyMap struct{}\n\n// Dummy functions on dummyMap\nfunc (v *dummyMap) Init() *expvar.Map             { return nil }\nfunc (v *dummyMap) Get(key string) expvar.Var     { return nil }\nfunc (v *dummyMap) Set(key string, av expvar.Var) {}\nfunc (v *dummyMap) Add(key string, delta int64)   {}\n\n// NewMap returns a dummyMap, depending on the build tag declared at the beginning of this file.\nfunc NewMap(name string) Map {\n\treturn &dummyMap{}\n}\n\nfunc RegisterInterval(m Map, td time.Duration, reset func(Map, time.Time), processAndReset func(Map, time.Duration, time.Time)) {\n}\n"
  },
  {
    "path": "server/metrics/enabled.go",
    "content": "// +build !disablemetrics\n\npackage metrics\n\nimport (\n\t\"context\"\n\t\"expvar\"\n\t\"time\"\n)\n\n// NewInt returns an expvar Int, depending on the absence of build tag declared at the beginning of this file\nfunc NewInt(name string) Int {\n\treturn expvar.NewInt(name)\n}\n\nfunc NewMap(name string) Map {\n\treturn expvar.NewMap(name)\n}\n\nfunc RegisterInterval(ctx context.Context, m Map, td time.Duration, reset func(Map, time.Time), processAndReset func(Map, time.Duration, time.Time)) {\n\treset(m, time.Now())\n\tgo func(m Map, td time.Duration, processAndReset func(Map, time.Duration, time.Time)) {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase t := <-time.Tick(td):\n\t\t\t\tprocessAndReset(m, td, t)\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}(m, td, processAndReset)\n}\n"
  },
  {
    "path": "server/metrics/enabled_test.go",
    "content": "package metrics\n\nimport (\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"expvar\"\n\t\"testing\"\n)\n\nfunc TestNewInt(t *testing.T) {\n\t_, ok := NewInt(\"a_name\").(expvar.Var)\n\tassert.True(t, ok)\n}\n"
  },
  {
    "path": "server/metrics/int.go",
    "content": "package metrics\n\n// Int is an interface for some of the operations defined on expvar.Int\ntype Int interface {\n\tAdd(int64)\n\tSet(int64)\n}\n"
  },
  {
    "path": "server/metrics/map.go",
    "content": "package metrics\n\nimport (\n\t\"expvar\"\n\t\"strconv\"\n\t\"time\"\n)\n\n// Map is an interface for some of the operations defined on expvar.Map\ntype Map interface {\n\tInit() *expvar.Map\n\tGet(key string) expvar.Var\n\tSet(key string, av expvar.Var)\n\tAdd(key string, delta int64)\n}\n\nfunc SetRate(m Map, key string, value expvar.Var, timeframe, unit time.Duration) {\n\tif value != nil {\n\t\tv, err := strconv.ParseInt(value.String(), 10, 64)\n\t\tif err != nil {\n\t\t\tm.Set(key, zeroValue)\n\t\t}\n\t\tm.Set(key, newRate(v, timeframe, unit))\n\t} else {\n\t\tm.Set(key, zeroValue)\n\t}\n}\n\nfunc SetAverage(m Map, key string, totalVar, casesVar expvar.Var, scale int64, defaultValue string) {\n\tif totalVar != nil && casesVar != nil {\n\t\ttotal, err1 := strconv.ParseInt(totalVar.String(), 10, 64)\n\t\tcases, err2 := strconv.ParseInt(casesVar.String(), 10, 64)\n\t\tif err1 != nil || err2 != nil {\n\t\t\tm.Set(key, zeroValue)\n\t\t}\n\t\tm.Set(key, newAverage(total, cases, scale, defaultValue))\n\t} else {\n\t\tm.Set(key, zeroValue)\n\t}\n}\n\nfunc AddToMaps(key string, value int64, maps ...Map) {\n\tfor _, m := range maps {\n\t\tm.Add(key, value)\n\t}\n}\n"
  },
  {
    "path": "server/metrics/metrics.go",
    "content": "// Package metrics implements simple general counter-metrics.\n// Metrics are enabled by default. If you want to disable metrics, build with:\n// go build -tags disablemetrics\npackage metrics\n\nimport (\n\tlog \"github.com/Sirupsen/logrus\"\n\n\t\"expvar\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"runtime\"\n)\n\nvar (\n\tlogger        = log.WithField(\"module\", \"metrics\")\n\tnumGoroutines = expvar.NewInt(\"num_goroutines\")\n)\n\nconst (\n\tDefaultAverageLatencyJSONValue = \"\\\"\\\"\"\n\tMilliPerNano                   = 1000000\n)\n\n// HttpHandler is a HTTP handler writing the current metrics to the http.ResponseWriter\nfunc HttpHandler(rw http.ResponseWriter, r *http.Request) {\n\trw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\twriteMetrics(rw)\n}\n\nfunc writeMetrics(w io.Writer) {\n\tnumGoroutines.Set(int64(runtime.NumGoroutine()))\n\tfmt.Fprint(w, \"{\\n\")\n\tfirst := true\n\texpvar.Do(func(kv expvar.KeyValue) {\n\t\tif !first {\n\t\t\tfmt.Fprint(w, \",\\n\")\n\t\t}\n\t\tfirst = false\n\t\tfmt.Fprintf(w, \"%q: %s\", kv.Key, kv.Value)\n\t})\n\tfmt.Fprint(w, \"\\n}\\n\")\n}\n\n// LogOnDebugLevel logs all the current metrics, if logging is on Debug level.\nfunc LogOnDebugLevel() {\n\tif log.GetLevel() == log.DebugLevel {\n\t\tfields := log.Fields{}\n\t\texpvar.Do(func(kv expvar.KeyValue) {\n\t\t\tfields[kv.Key] = kv.Value\n\t\t})\n\t\tlogger.WithFields(fields).Debug(\"current values of metrics\")\n\t}\n}\n"
  },
  {
    "path": "server/metrics/metrics_test.go",
    "content": "package metrics\n\nimport (\n\t\"github.com/stretchr/testify/assert\"\n\n\tlog \"github.com/Sirupsen/logrus\"\n\n\t\"bytes\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"testing\"\n)\n\nfunc TestHttpHandler_MetricsNotEnabled(t *testing.T) {\n\ta := assert.New(t)\n\treq, _ := http.NewRequest(\"GET\", \"\", nil)\n\tw := httptest.NewRecorder()\n\tHttpHandler(w, req)\n\ta.Equal(http.StatusOK, w.Code)\n\tb, err := ioutil.ReadAll(w.Body)\n\ta.NoError(err)\n\ta.True(len(b) > 0)\n\tlog.Debugf(\"%s\", b)\n}\n\nfunc TestLogOnDebugLevel_Debug(t *testing.T) {\n\ta := assert.New(t)\n\tbufferDebug := bytes.NewBuffer([]byte{})\n\tlog.SetOutput(bufferDebug)\n\n\tlog.SetLevel(log.DebugLevel)\n\n\tLogOnDebugLevel()\n\n\tlogContent, err := ioutil.ReadAll(bufferDebug)\n\tlog.Debugf(\"%s\", logContent)\n\ta.NoError(err)\n\n\ta.Contains(string(logContent), \"cmdline\")\n\ta.Contains(string(logContent), \"memstats\")\n}\n\nfunc TestLogOnDebugLevel_Info(t *testing.T) {\n\ta := assert.New(t)\n\tbufferInfo := bytes.NewBuffer([]byte{})\n\tlog.SetOutput(bufferInfo)\n\n\tlog.SetLevel(log.InfoLevel)\n\n\tlogContent, err := ioutil.ReadAll(bufferInfo)\n\ta.NoError(err)\n\n\tLogOnDebugLevel()\n\n\ta.True(len(logContent) == 0)\n}\n"
  },
  {
    "path": "server/metrics/ns.go",
    "content": "package metrics\n\nconst sep = \".\"\n\n//NS is a namespace\ntype NS string\n\nfunc (ns NS) NewInt(key string) Int {\n\treturn NewInt(string(ns) + sep + key)\n}\n\nfunc (ns NS) NewMap(key string) Map {\n\treturn NewMap(string(ns) + sep + key)\n}\n\nfunc (ns NS) NewNS(childKey string) NS {\n\treturn NS(string(ns) + sep + childKey)\n}\n"
  },
  {
    "path": "server/metrics/rate.go",
    "content": "package metrics\n\nimport (\n\t\"fmt\"\n\t\"time\"\n)\n\ntype rate struct {\n\tvalue string\n}\n\nfunc newRate(value int64, timeframe, scale time.Duration) rate {\n\tif value <= 0 || timeframe <= 0 || scale <= 0 {\n\t\treturn rate{\"0\"}\n\t}\n\treturn rate{fmt.Sprintf(\"%v\", float64(value*scale.Nanoseconds())/float64(timeframe.Nanoseconds()))}\n}\n\nfunc (r rate) String() string {\n\treturn r.value\n}\n"
  },
  {
    "path": "server/metrics/rate_test.go",
    "content": "package metrics\n\nimport (\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"testing\"\n)\n\nfunc TestRate_String(t *testing.T) {\n\tassert.Equal(t, \"0\", newRate(0, 0, 0).String())\n\tassert.Equal(t, \"0\", newRate(1, 0, 0).String())\n\tassert.Equal(t, \"1\", newRate(1, 1, 1).String())\n\tassert.Equal(t, \"1.5\", newRate(90, 60000, 1000).String())\n\tassert.Equal(t, \"1.6666666666666667\", newRate(100, 60000, 1000).String())\n}\n"
  },
  {
    "path": "server/metrics/time.go",
    "content": "package metrics\n\nimport (\n\t\"fmt\"\n\t\"time\"\n)\n\ntype Time struct {\n\ttimeValue time.Time\n}\n\nfunc NewTime(timeValue time.Time) Time {\n\treturn Time{timeValue: timeValue}\n}\n\nfunc (t Time) String() string {\n\treturn fmt.Sprintf(\"\\\"%v\\\"\", t.timeValue)\n}\n"
  },
  {
    "path": "server/metrics/zero.go",
    "content": "package metrics\n\ntype zeroVar struct {\n}\n\nfunc (z zeroVar) String() string {\n\treturn \"0\"\n}\n\nvar zeroValue zeroVar\n"
  },
  {
    "path": "server/mocks_apns_pusher_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/smancke/guble/server/apns (interfaces: Pusher)\n\npackage server\n\nimport (\n\tgomock \"github.com/golang/mock/gomock\"\n\tapns2 \"github.com/sideshow/apns2\"\n)\n\n// Mock of Pusher interface\ntype MockPusher struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockPusherRecorder\n}\n\n// Recorder for MockPusher (not exported)\ntype _MockPusherRecorder struct {\n\tmock *MockPusher\n}\n\nfunc NewMockPusher(ctrl *gomock.Controller) *MockPusher {\n\tmock := &MockPusher{ctrl: ctrl}\n\tmock.recorder = &_MockPusherRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockPusher) EXPECT() *_MockPusherRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockPusher) Push(_param0 *apns2.Notification) (*apns2.Response, error) {\n\tret := _m.ctrl.Call(_m, \"Push\", _param0)\n\tret0, _ := ret[0].(*apns2.Response)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockPusherRecorder) Push(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Push\", arg0)\n}\n"
  },
  {
    "path": "server/mocks_auth_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/smancke/guble/server/auth (interfaces: AccessManager)\n\npackage server\n\nimport (\n\tgomock \"github.com/golang/mock/gomock\"\n\tprotocol \"github.com/smancke/guble/protocol\"\n\tauth \"github.com/smancke/guble/server/auth\"\n)\n\n// Mock of AccessManager interface\ntype MockAccessManager struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockAccessManagerRecorder\n}\n\n// Recorder for MockAccessManager (not exported)\ntype _MockAccessManagerRecorder struct {\n\tmock *MockAccessManager\n}\n\nfunc NewMockAccessManager(ctrl *gomock.Controller) *MockAccessManager {\n\tmock := &MockAccessManager{ctrl: ctrl}\n\tmock.recorder = &_MockAccessManagerRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockAccessManager) EXPECT() *_MockAccessManagerRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockAccessManager) IsAllowed(_param0 auth.AccessType, _param1 string, _param2 protocol.Path) bool {\n\tret := _m.ctrl.Call(_m, \"IsAllowed\", _param0, _param1, _param2)\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}\n\nfunc (_mr *_MockAccessManagerRecorder) IsAllowed(arg0, arg1, arg2 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"IsAllowed\", arg0, arg1, arg2)\n}\n"
  },
  {
    "path": "server/mocks_router_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/smancke/guble/server/router (interfaces: Router)\n\npackage server\n\nimport (\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/auth\"\n\t\"github.com/smancke/guble/server/cluster\"\n\t\"github.com/smancke/guble/server/kvstore\"\n\t\"github.com/smancke/guble/server/router\"\n\t\"github.com/smancke/guble/server/store\"\n)\n\n// Mock of Router interface\ntype MockRouter struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockRouterRecorder\n}\n\n// Recorder for MockRouter (not exported)\ntype _MockRouterRecorder struct {\n\tmock *MockRouter\n}\n\nfunc NewMockRouter(ctrl *gomock.Controller) *MockRouter {\n\tmock := &MockRouter{ctrl: ctrl}\n\tmock.recorder = &_MockRouterRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockRouter) EXPECT() *_MockRouterRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockRouter) AccessManager() (auth.AccessManager, error) {\n\tret := _m.ctrl.Call(_m, \"AccessManager\")\n\tret0, _ := ret[0].(auth.AccessManager)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) AccessManager() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"AccessManager\")\n}\n\nfunc (_m *MockRouter) Cluster() *cluster.Cluster {\n\tret := _m.ctrl.Call(_m, \"Cluster\")\n\tret0, _ := ret[0].(*cluster.Cluster)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) Cluster() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Cluster\")\n}\n\nfunc (_m *MockRouter) Done() <-chan bool {\n\tret := _m.ctrl.Call(_m, \"Done\")\n\tret0, _ := ret[0].(<-chan bool)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) Done() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Done\")\n}\n\nfunc (_m *MockRouter) Fetch(_param0 *store.FetchRequest) error {\n\tret := _m.ctrl.Call(_m, \"Fetch\", _param0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) Fetch(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Fetch\", arg0)\n}\n\nfunc (_m *MockRouter) GetSubscribers(_param0 string) ([]byte, error) {\n\tret := _m.ctrl.Call(_m, \"GetSubscribers\", _param0)\n\tret0, _ := ret[0].([]byte)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) GetSubscribers(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"GetSubscribers\", arg0)\n}\n\nfunc (_m *MockRouter) HandleMessage(_param0 *protocol.Message) error {\n\tret := _m.ctrl.Call(_m, \"HandleMessage\", _param0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) HandleMessage(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"HandleMessage\", arg0)\n}\n\nfunc (_m *MockRouter) KVStore() (kvstore.KVStore, error) {\n\tret := _m.ctrl.Call(_m, \"KVStore\")\n\tret0, _ := ret[0].(kvstore.KVStore)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) KVStore() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"KVStore\")\n}\n\nfunc (_m *MockRouter) MessageStore() (store.MessageStore, error) {\n\tret := _m.ctrl.Call(_m, \"MessageStore\")\n\tret0, _ := ret[0].(store.MessageStore)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) MessageStore() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"MessageStore\")\n}\n\nfunc (_m *MockRouter) Subscribe(_param0 *router.Route) (*router.Route, error) {\n\tret := _m.ctrl.Call(_m, \"Subscribe\", _param0)\n\tret0, _ := ret[0].(*router.Route)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) Subscribe(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Subscribe\", arg0)\n}\n\nfunc (_m *MockRouter) Unsubscribe(_param0 *router.Route) {\n\t_m.ctrl.Call(_m, \"Unsubscribe\", _param0)\n}\n\nfunc (_mr *_MockRouterRecorder) Unsubscribe(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Unsubscribe\", arg0)\n}\n"
  },
  {
    "path": "server/mocks_store_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/smancke/guble/server/store (interfaces: MessageStore)\n\npackage server\n\nimport (\n\tgomock \"github.com/golang/mock/gomock\"\n\tprotocol \"github.com/smancke/guble/protocol\"\n\tstore \"github.com/smancke/guble/server/store\"\n)\n\n// Mock of MessageStore interface\ntype MockMessageStore struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockMessageStoreRecorder\n}\n\n// Recorder for MockMessageStore (not exported)\ntype _MockMessageStoreRecorder struct {\n\tmock *MockMessageStore\n}\n\nfunc NewMockMessageStore(ctrl *gomock.Controller) *MockMessageStore {\n\tmock := &MockMessageStore{ctrl: ctrl}\n\tmock.recorder = &_MockMessageStoreRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockMessageStore) EXPECT() *_MockMessageStoreRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockMessageStore) DoInTx(_param0 string, _param1 func(uint64) error) error {\n\tret := _m.ctrl.Call(_m, \"DoInTx\", _param0, _param1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockMessageStoreRecorder) DoInTx(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"DoInTx\", arg0, arg1)\n}\n\nfunc (_m *MockMessageStore) Fetch(_param0 *store.FetchRequest) {\n\t_m.ctrl.Call(_m, \"Fetch\", _param0)\n}\n\nfunc (_mr *_MockMessageStoreRecorder) Fetch(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Fetch\", arg0)\n}\n\nfunc (_m *MockMessageStore) GenerateNextMsgID(_param0 string, _param1 byte) (uint64, int64, error) {\n\tret := _m.ctrl.Call(_m, \"GenerateNextMsgID\", _param0, _param1)\n\tret0, _ := ret[0].(uint64)\n\tret1, _ := ret[1].(int64)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}\n\nfunc (_mr *_MockMessageStoreRecorder) GenerateNextMsgID(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"GenerateNextMsgID\", arg0, arg1)\n}\n\nfunc (_m *MockMessageStore) MaxMessageID(_param0 string) (uint64, error) {\n\tret := _m.ctrl.Call(_m, \"MaxMessageID\", _param0)\n\tret0, _ := ret[0].(uint64)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockMessageStoreRecorder) MaxMessageID(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"MaxMessageID\", arg0)\n}\n\nfunc (_m *MockMessageStore) Partition(_param0 string) (store.MessagePartition, error) {\n\tret := _m.ctrl.Call(_m, \"Partition\", _param0)\n\tret0, _ := ret[0].(store.MessagePartition)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockMessageStoreRecorder) Partition(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Partition\", arg0)\n}\n\nfunc (_m *MockMessageStore) Partitions() ([]store.MessagePartition, error) {\n\tret := _m.ctrl.Call(_m, \"Partitions\")\n\tret0, _ := ret[0].([]store.MessagePartition)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockMessageStoreRecorder) Partitions() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Partitions\")\n}\n\nfunc (_m *MockMessageStore) Store(_param0 string, _param1 uint64, _param2 []byte) error {\n\tret := _m.ctrl.Call(_m, \"Store\", _param0, _param1, _param2)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockMessageStoreRecorder) Store(arg0, arg1, arg2 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Store\", arg0, arg1, arg2)\n}\n\nfunc (_m *MockMessageStore) StoreMessage(_param0 *protocol.Message, _param1 byte) (int, error) {\n\tret := _m.ctrl.Call(_m, \"StoreMessage\", _param0, _param1)\n\tret0, _ := ret[0].(int)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockMessageStoreRecorder) StoreMessage(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"StoreMessage\", arg0, arg1)\n}\n"
  },
  {
    "path": "server/redundancy_test.go",
    "content": "package server\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/smancke/guble/server/fcm\"\n\t\"github.com/smancke/guble/testutil\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc Test_Subscribe_on_random_node(t *testing.T) {\n\ttestutil.SkipIfShort(t)\n\ta := assert.New(t)\n\n\tnode1 := newTestClusterNode(t, testClusterNodeConfig{\n\t\tHttpListen: \"localhost:8080\",\n\t\tNodeID:     1,\n\t\tNodePort:   20000,\n\t\tRemotes:    \"localhost:20000\",\n\t})\n\ta.NotNil(node1)\n\tdefer node1.cleanup(true)\n\n\tnode2 := newTestClusterNode(t, testClusterNodeConfig{\n\t\tHttpListen: \"localhost:8081\",\n\t\tNodeID:     2,\n\t\tNodePort:   20001,\n\t\tRemotes:    \"localhost:20000\",\n\t})\n\ta.NotNil(node2)\n\tdefer node2.cleanup(true)\n\n\tnode1.FCM.setupRoundTripper(20*time.Millisecond, 10, fcm.SuccessFCMResponse)\n\tnode2.FCM.setupRoundTripper(20*time.Millisecond, 10, fcm.SuccessFCMResponse)\n\n\t// subscribe on first node\n\tnode1.Subscribe(testTopic, \"1\")\n\n\t// connect a client and send a message\n\tclient1, err := node1.client(\"user1\", 1000, true)\n\ta.NoError(err)\n\n\terr = client1.Send(testTopic, \"body\", \"{jsonHeader:1}\")\n\ta.NoError(err)\n\n\t// only one message should be received but only on the first node.\n\t// Every message should be delivered only once.\n\tnode1.FCM.checkReceived(1)\n\tnode2.FCM.checkReceived(0)\n}\n\nfunc Test_Subscribe_working_After_Node_Restart(t *testing.T) {\n\t// defer testutil.EnableDebugForMethod()()\n\ttestutil.SkipIfDisabled(t)\n\ttestutil.SkipIfShort(t)\n\ta := assert.New(t)\n\n\tnodeConfig1 := testClusterNodeConfig{\n\t\tHttpListen: \"localhost:8082\",\n\t\tNodeID:     1,\n\t\tNodePort:   20002,\n\t\tRemotes:    \"localhost:20002\",\n\t}\n\tnode1 := newTestClusterNode(t, nodeConfig1)\n\ta.NotNil(node1)\n\n\tnode2 := newTestClusterNode(t, testClusterNodeConfig{\n\t\tHttpListen: \"localhost:8083\",\n\t\tNodeID:     2,\n\t\tNodePort:   20003,\n\t\tRemotes:    \"localhost:20002\",\n\t})\n\ta.NotNil(node2)\n\tdefer node2.cleanup(true)\n\n\tnode1.FCM.setupRoundTripper(20*time.Millisecond, 10, fcm.SuccessFCMResponse)\n\tnode2.FCM.setupRoundTripper(20*time.Millisecond, 10, fcm.SuccessFCMResponse)\n\n\t// subscribe on first node\n\tnode1.Subscribe(testTopic, \"1\")\n\n\t// connect a clinet and send a message\n\tclient1, err := node1.client(\"user1\", 1000, true)\n\ta.NoError(err)\n\terr = client1.Send(testTopic, \"body\", \"{jsonHeader:1}\")\n\ta.NoError(err)\n\n\t// one message should be received but only on the first node.\n\t// Every message should be delivered only once.\n\tnode1.FCM.checkReceived(1)\n\tnode2.FCM.checkReceived(0)\n\n\t// stop a node, cleanup without removing directories\n\tnode1.cleanup(false)\n\ttime.Sleep(time.Millisecond * 150)\n\n\t// restart the service\n\trestartedNode1 := newTestClusterNode(t, nodeConfig1)\n\ta.NotNil(restartedNode1)\n\tdefer restartedNode1.cleanup(true)\n\n\trestartedNode1.FCM.setupRoundTripper(20*time.Millisecond, 10, fcm.SuccessFCMResponse)\n\n\t// send a message to the former subscription.\n\tclient1, err = restartedNode1.client(\"user1\", 1000, true)\n\ta.NoError(err)\n\ttime.Sleep(time.Second)\n\n\terr = client1.Send(testTopic, \"body\", \"{jsonHeader:1}\")\n\ta.NoError(err, \"Subscription should work even after node restart\")\n\n\t// only one message should be received but only on the first node.\n\t// Every message should be delivered only once.\n\trestartedNode1.FCM.checkReceived(1)\n\tnode2.FCM.checkReceived(0)\n}\n\nfunc Test_Independent_Receiving(t *testing.T) {\n\ttestutil.SkipIfDisabled(t)\n\ttestutil.SkipIfShort(t)\n\ta := assert.New(t)\n\n\tnode1 := newTestClusterNode(t, testClusterNodeConfig{\n\t\tHttpListen: \"localhost:8084\",\n\t\tNodeID:     1,\n\t\tNodePort:   20004,\n\t\tRemotes:    \"localhost:20004\",\n\t})\n\ta.NotNil(node1)\n\tdefer node1.cleanup(true)\n\n\tnode2 := newTestClusterNode(t, testClusterNodeConfig{\n\t\tHttpListen: \"localhost:8085\",\n\t\tNodeID:     2,\n\t\tNodePort:   20005,\n\t\tRemotes:    \"localhost:20004\",\n\t})\n\ta.NotNil(node2)\n\tdefer node2.cleanup(true)\n\n\tnode1.FCM.setupRoundTripper(20*time.Millisecond, 10, fcm.SuccessFCMResponse)\n\tnode2.FCM.setupRoundTripper(20*time.Millisecond, 10, fcm.SuccessFCMResponse)\n\n\t// subscribe on first node\n\tnode1.Subscribe(testTopic, \"1\")\n\n\t// connect a client and send a message\n\tclient1, err := node1.client(\"user1\", 1000, true)\n\terr = client1.Send(testTopic, \"body\", \"{jsonHeader:1}\")\n\ta.NoError(err)\n\n\t// only one message should be received but only on the first node.\n\t// Every message should be delivered only once.\n\tnode1.FCM.checkReceived(1)\n\tnode2.FCM.checkReceived(0)\n\n\t// reset the counter\n\tnode1.FCM.reset()\n\n\t// NOW connect to second node\n\tclient2, err := node2.client(\"user2\", 1000, true)\n\ta.NoError(err)\n\terr = client2.Send(testTopic, \"body\", \"{jsonHeader:1}\")\n\ta.NoError(err)\n\n\t// only one message should be received but only on the second node.\n\t// Every message should be delivered only once.\n\tnode1.FCM.checkReceived(0)\n\tnode2.FCM.checkReceived(1)\n}\n\nfunc Test_NoReceiving_After_Unsubscribe(t *testing.T) {\n\ttestutil.SkipIfDisabled(t)\n\ttestutil.SkipIfShort(t)\n\ta := assert.New(t)\n\n\tnode1 := newTestClusterNode(t, testClusterNodeConfig{\n\t\tHttpListen: \"localhost:8086\",\n\t\tNodeID:     1,\n\t\tNodePort:   20006,\n\t\tRemotes:    \"localhost:20006\",\n\t})\n\ta.NotNil(node1)\n\tdefer node1.cleanup(true)\n\n\tnode2 := newTestClusterNode(t, testClusterNodeConfig{\n\t\tHttpListen: \"localhost:8087\",\n\t\tNodeID:     2,\n\t\tNodePort:   20007,\n\t\tRemotes:    \"localhost:20006\",\n\t})\n\ta.NotNil(node2)\n\tdefer node2.cleanup(true)\n\n\tnode1.FCM.setupRoundTripper(20*time.Millisecond, 10, fcm.SuccessFCMResponse)\n\tnode2.FCM.setupRoundTripper(20*time.Millisecond, 10, fcm.SuccessFCMResponse)\n\n\t// subscribe on first node\n\tnode1.Subscribe(testTopic, \"1\")\n\ttime.Sleep(50 * time.Millisecond)\n\n\t// connect a client and send a message\n\tclient1, err := node1.client(\"user1\", 1000, true)\n\terr = client1.Send(testTopic, \"body\", \"{jsonHeader:1}\")\n\ta.NoError(err)\n\n\t// only one message should be received but only on the first node.\n\t// Every message should be delivered only once.\n\tnode1.FCM.checkReceived(1)\n\tnode2.FCM.checkReceived(0)\n\n\t// Unsubscribe\n\tnode2.Unsubscribe(testTopic, \"1\")\n\ttime.Sleep(50 * time.Millisecond)\n\n\t// reset the counter\n\tnode1.FCM.reset()\n\n\t// and send a message again. No one should receive it\n\terr = client1.Send(testTopic, \"body\", \"{jsonHeader:1}\")\n\ta.NoError(err)\n\n\t// only one message should be received but only on the second node.\n\t// Every message should be delivered only once.\n\tnode1.FCM.checkReceived(0)\n\tnode2.FCM.checkReceived(0)\n}\n"
  },
  {
    "path": "server/rest/mocks_router_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/smancke/guble/server/router (interfaces: Router)\n\npackage rest\n\nimport (\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/auth\"\n\t\"github.com/smancke/guble/server/cluster\"\n\t\"github.com/smancke/guble/server/kvstore\"\n\t\"github.com/smancke/guble/server/router\"\n\t\"github.com/smancke/guble/server/store\"\n)\n\n// Mock of Router interface\ntype MockRouter struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockRouterRecorder\n}\n\n// Recorder for MockRouter (not exported)\ntype _MockRouterRecorder struct {\n\tmock *MockRouter\n}\n\nfunc NewMockRouter(ctrl *gomock.Controller) *MockRouter {\n\tmock := &MockRouter{ctrl: ctrl}\n\tmock.recorder = &_MockRouterRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockRouter) EXPECT() *_MockRouterRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockRouter) AccessManager() (auth.AccessManager, error) {\n\tret := _m.ctrl.Call(_m, \"AccessManager\")\n\tret0, _ := ret[0].(auth.AccessManager)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) AccessManager() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"AccessManager\")\n}\n\nfunc (_m *MockRouter) Cluster() *cluster.Cluster {\n\tret := _m.ctrl.Call(_m, \"Cluster\")\n\tret0, _ := ret[0].(*cluster.Cluster)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) Cluster() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Cluster\")\n}\n\nfunc (_m *MockRouter) Done() <-chan bool {\n\tret := _m.ctrl.Call(_m, \"Done\")\n\tret0, _ := ret[0].(<-chan bool)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) Done() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Done\")\n}\n\nfunc (_m *MockRouter) Fetch(_param0 *store.FetchRequest) error {\n\tret := _m.ctrl.Call(_m, \"Fetch\", _param0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) Fetch(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Fetch\", arg0)\n}\n\nfunc (_m *MockRouter) GetSubscribers(_param0 string) ([]byte, error) {\n\tret := _m.ctrl.Call(_m, \"GetSubscribers\", _param0)\n\tret0, _ := ret[0].([]byte)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) GetSubscribers(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"GetSubscribers\", arg0)\n}\n\nfunc (_m *MockRouter) HandleMessage(_param0 *protocol.Message) error {\n\tret := _m.ctrl.Call(_m, \"HandleMessage\", _param0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) HandleMessage(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"HandleMessage\", arg0)\n}\n\nfunc (_m *MockRouter) KVStore() (kvstore.KVStore, error) {\n\tret := _m.ctrl.Call(_m, \"KVStore\")\n\tret0, _ := ret[0].(kvstore.KVStore)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) KVStore() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"KVStore\")\n}\n\nfunc (_m *MockRouter) MessageStore() (store.MessageStore, error) {\n\tret := _m.ctrl.Call(_m, \"MessageStore\")\n\tret0, _ := ret[0].(store.MessageStore)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) MessageStore() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"MessageStore\")\n}\n\nfunc (_m *MockRouter) Subscribe(_param0 *router.Route) (*router.Route, error) {\n\tret := _m.ctrl.Call(_m, \"Subscribe\", _param0)\n\tret0, _ := ret[0].(*router.Route)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) Subscribe(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Subscribe\", arg0)\n}\n\nfunc (_m *MockRouter) Unsubscribe(_param0 *router.Route) {\n\t_m.ctrl.Call(_m, \"Unsubscribe\", _param0)\n}\n\nfunc (_mr *_MockRouterRecorder) Unsubscribe(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Unsubscribe\", arg0)\n}\n"
  },
  {
    "path": "server/rest/rest_message_api.go",
    "content": "package rest\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\t\"github.com/azer/snakecase\"\n\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/router\"\n\n\t\"github.com/rs/xid\"\n\n\t\"bytes\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"strings\"\n\n\tlog \"github.com/Sirupsen/logrus\"\n)\n\nconst (\n\txHeaderPrefix     = \"x-guble-\"\n\tfilterPrefix      = \"filter\"\n\tsubscribersPrefix = \"/subscribers\"\n)\n\nvar errNotFound = errors.New(\"Not Found.\")\n\n// RestMessageAPI is a struct representing a router's connector for a REST API.\ntype RestMessageAPI struct {\n\trouter router.Router\n\tprefix string\n}\n\n// NewRestMessageAPI returns a new RestMessageAPI.\nfunc NewRestMessageAPI(router router.Router, prefix string) *RestMessageAPI {\n\treturn &RestMessageAPI{router, prefix}\n}\n\n// GetPrefix returns the prefix.\n// It is a part of the service.endpoint implementation.\nfunc (api *RestMessageAPI) GetPrefix() string {\n\treturn api.prefix\n}\n\n// ServeHTTP is an http.Handler.\n// It is a part of the service.endpoint implementation.\nfunc (api *RestMessageAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif r.Method == http.MethodHead {\n\t\treturn\n\t}\n\n\tif r.Method == http.MethodGet {\n\t\tlog.WithField(\"url\", r.URL.Path).Debug(\"GET\")\n\n\t\ttopic, err := api.extractTopic(r.URL.Path, subscribersPrefix)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Error(\"Extracting topic failed\")\n\t\t\tif err == errNotFound {\n\t\t\t\thttp.NotFound(w, r)\n\t\t\t\treturn\n\t\t\t}\n\t\t\thttp.Error(w, \"Server error.\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tresp, err := api.router.GetSubscribers(topic)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\t\t_, err = w.Write(resp)\n\t\tif err != nil {\n\t\t\tlog.WithField(\"error\", err.Error()).Error(\"Writing to byte stream failed\")\n\t\t\thttp.Error(w, \"Server error.\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\n\tif r.Method != http.MethodPost {\n\t\thttp.Error(w, \"Method not allowed\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\thttp.Error(w, \"Can not read body\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\ttopic, err := api.extractTopic(r.URL.Path, \"/message\")\n\tif err != nil {\n\t\tif err == errNotFound {\n\t\t\thttp.NotFound(w, r)\n\t\t\treturn\n\t\t}\n\t\thttp.Error(w, \"Server error.\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tmsg := &protocol.Message{\n\t\tPath:          protocol.Path(topic),\n\t\tBody:          body,\n\t\tUserID:        q(r, \"userId\"),\n\t\tApplicationID: xid.New().String(),\n\t\tHeaderJSON:    headersToJSON(r.Header),\n\t}\n\n\t// add filters\n\tapi.setFilters(r, msg)\n\n\tapi.router.HandleMessage(msg)\n\tfmt.Fprintf(w, \"OK\")\n}\n\nfunc (api *RestMessageAPI) extractTopic(path string, requestTypeTopicPrefix string) (string, error) {\n\tp := removeTrailingSlash(api.prefix) + requestTypeTopicPrefix\n\tif !strings.HasPrefix(path, p) {\n\t\treturn \"\", errNotFound\n\t}\n\t// Remove \"`api.prefix` + /message\" and we remain with the topic\n\ttopic := strings.TrimPrefix(path, p)\n\tif topic == \"/\" || topic == \"\" {\n\t\treturn \"\", errNotFound\n\t}\n\treturn topic, nil\n}\n\n// setFilters sets a field found in the format `filterCamelCaseField` in the\n// query of the request to underscore format on the message filters\nfunc (api *RestMessageAPI) setFilters(r *http.Request, msg *protocol.Message) {\n\tfor name, values := range r.URL.Query() {\n\t\tif strings.HasPrefix(name, filterPrefix) && len(values) > 0 {\n\t\t\tmsg.SetFilter(filterName(name), values[0])\n\t\t}\n\t}\n}\n\n// returns a query parameter\nfunc q(r *http.Request, name string) string {\n\tparams := r.URL.Query()[name]\n\tif len(params) > 0 {\n\t\treturn params[0]\n\t}\n\treturn \"\"\n}\n\n// transform from filterCamelCase to camel_case\nfunc filterName(name string) string {\n\treturn snakecase.SnakeCase(strings.TrimPrefix(name, filterPrefix))\n}\n\nfunc headersToJSON(header http.Header) string {\n\tbuff := &bytes.Buffer{}\n\tbuff.WriteString(\"{\")\n\tcount := 0\n\tfor key, valueList := range header {\n\t\tif strings.HasPrefix(strings.ToLower(key), xHeaderPrefix) && len(valueList) > 0 {\n\t\t\tif count > 0 {\n\t\t\t\tbuff.WriteString(\",\")\n\t\t\t}\n\t\t\tbuff.WriteString(`\"`)\n\t\t\tbuff.WriteString(key[len(xHeaderPrefix):])\n\t\t\tbuff.WriteString(`\":`)\n\t\t\tbuff.WriteString(`\"`)\n\t\t\tbuff.WriteString(valueList[0])\n\t\t\tbuff.WriteString(`\"`)\n\t\t\tcount++\n\t\t}\n\t}\n\tbuff.WriteString(\"}\")\n\treturn string(buff.Bytes())\n}\n\nfunc removeTrailingSlash(path string) string {\n\tif len(path) > 1 && path[len(path)-1] == '/' {\n\t\treturn path[:len(path)-1]\n\t}\n\treturn path\n}\n"
  },
  {
    "path": "server/rest/rest_message_api_test.go",
    "content": "package rest\n\nimport (\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/testutil\"\n\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"testing\"\n\t\"time\"\n)\n\nvar testBytes = []byte(\"test\")\n\nfunc TestServerHTTP(t *testing.T) {\n\tctrl, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\ta := assert.New(t)\n\n\t// given:  a rest api with a message sink\n\trouterMock := NewMockRouter(ctrl)\n\tapi := NewRestMessageAPI(routerMock, \"/api\")\n\n\tu, _ := url.Parse(\"http://localhost/api/message/my/topic?userId=marvin&messageId=42\")\n\n\t// and a http context\n\treq := &http.Request{\n\t\tMethod: http.MethodPost,\n\t\tURL:    u,\n\t\tBody:   ioutil.NopCloser(bytes.NewReader(testBytes)),\n\t\tHeader: http.Header{},\n\t}\n\tw := &httptest.ResponseRecorder{}\n\n\t// then i expect\n\trouterMock.EXPECT().HandleMessage(gomock.Any()).Do(func(msg *protocol.Message) {\n\t\ta.Equal(testBytes, msg.Body)\n\t\ta.Equal(\"{}\", msg.HeaderJSON)\n\t\ta.Equal(\"/my/topic\", string(msg.Path))\n\t\ta.True(len(msg.ApplicationID) > 0)\n\t\ta.Nil(msg.Filters)\n\t\ta.Equal(\"marvin\", msg.UserID)\n\t})\n\n\t// when: I POST a message\n\tapi.ServeHTTP(w, req)\n}\n\n// Server should return an 405 Method Not Allowed in case method request is not POST\nfunc TestServeHTTP_GetError(t *testing.T) {\n\ta := assert.New(t)\n\tdefer testutil.EnableDebugForMethod()()\n\tapi := NewRestMessageAPI(nil, \"/api\")\n\n\tu, _ := url.Parse(\"http://localhost/api/message/my/topic?userId=marvin&messageId=42\")\n\t// and a http context\n\treq := &http.Request{\n\t\tMethod: http.MethodGet,\n\t\tURL:    u,\n\t\tBody:   ioutil.NopCloser(bytes.NewReader(testBytes)),\n\t\tHeader: http.Header{},\n\t}\n\tw := &httptest.ResponseRecorder{}\n\n\t// when: I POST a message\n\tapi.ServeHTTP(w, req)\n\n\t//then\n\ta.Equal(http.StatusNotFound, w.Code)\n}\n\n// Server should return an 405 Method Not Allowed in case method request is not POST\nfunc TestServeHTTP_GetSubscribers(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\t//defer testutil.EnableDebugForMethod()()\n\n\ta := assert.New(t)\n\n\trouterMock := NewMockRouter(testutil.MockCtrl)\n\tapi := NewRestMessageAPI(routerMock, \"/api\")\n\trouterMock.EXPECT().GetSubscribers(gomock.Any()).Return([]byte(\"{}\"), nil)\n\tu, _ := url.Parse(\"http://localhost/api/subscribers/mytopic\")\n\t// and a http context\n\treq := &http.Request{\n\t\tMethod: http.MethodGet,\n\t\tURL:    u,\n\t}\n\tw := &httptest.ResponseRecorder{}\n\n\t// when: I POST a message\n\tapi.ServeHTTP(w, req)\n\n\t//then\n\ta.Equal(http.StatusOK, w.Code)\n}\n\nfunc TestHeadersToJSON(t *testing.T) {\n\ta := assert.New(t)\n\n\t// empty header\n\ta.Equal(`{}`, headersToJSON(http.Header{}))\n\n\t// simple head\n\tjsonString := headersToJSON(http.Header{\n\t\txHeaderPrefix + \"a\": []string{\"b\"},\n\t\t\"foo\":               []string{\"b\"},\n\t\txHeaderPrefix + \"x\": []string{\"y\"},\n\t\t\"bar\":               []string{\"b\"},\n\t})\n\n\theader := make(map[string]string)\n\terr := json.Unmarshal([]byte(jsonString), &header)\n\ta.NoError(err)\n\n\ta.Equal(2, len(header))\n\ta.Equal(\"b\", header[\"a\"])\n\ta.Equal(\"y\", header[\"x\"])\n}\n\nfunc TestRemoveTrailingSlash(t *testing.T) {\n\tassert.Equal(t, \"/foo\", removeTrailingSlash(\"/foo/\"))\n\tassert.Equal(t, \"/foo\", removeTrailingSlash(\"/foo\"))\n\tassert.Equal(t, \"/\", removeTrailingSlash(\"/\"))\n}\n\nfunc TestExtractTopic(t *testing.T) {\n\ta := assert.New(t)\n\n\tapi := NewRestMessageAPI(nil, \"/api\")\n\n\tcases := []struct {\n\t\tpath, topic string\n\t\terr         error\n\t}{\n\t\t{\"/api/message/my/topic\", \"/my/topic\", nil},\n\t\t{\"/api/message/\", \"\", errNotFound},\n\t\t{\"/api/message\", \"\", errNotFound},\n\t\t{\"/api/invalid/request\", \"\", errNotFound},\n\t}\n\n\tfor _, c := range cases {\n\t\ttopic, err := api.extractTopic(c.path, \"/message\")\n\t\tm := \"Assertion failed for path: \" + c.path\n\n\t\tif c.err == nil {\n\t\t\ta.Equal(c.topic, topic, m)\n\t\t} else {\n\t\t\ta.NotNil(err, m)\n\t\t\ta.Equal(c.err, err, m)\n\t\t}\n\t}\n}\n\nfunc TestRestMessageAPI_setFilters(t *testing.T) {\n\ta := assert.New(t)\n\n\tbody := bytes.NewBufferString(\"\")\n\treq, err := http.NewRequest(\n\t\thttp.MethodPost,\n\t\t\"http://localhost/api/message/topic?filterUserID=user01&filterDeviceID=ABC&filterDummyCamelCase=dummy_value\",\n\t\tbody)\n\ta.NoError(err)\n\n\tapi := &RestMessageAPI{}\n\tmsg := &protocol.Message{}\n\n\tapi.setFilters(req, msg)\n\n\ta.NotNil(msg.Filters)\n\tif a.Contains(msg.Filters, \"user_id\") {\n\t\ta.Equal(\"user01\", msg.Filters[\"user_id\"])\n\t}\n\tif a.Contains(msg.Filters, \"device_id\") {\n\t\ta.Equal(\"ABC\", msg.Filters[\"device_id\"])\n\t}\n\tif a.Contains(msg.Filters, \"dummy_camel_case\") {\n\t\ta.Equal(\"dummy_value\", msg.Filters[\"dummy_camel_case\"])\n\t}\n}\n\nfunc TestRestMessageAPI_SetFiltersWhenServing(t *testing.T) {\n\ttestutil.SkipIfDisabled(t)\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\ta := assert.New(t)\n\n\tbody := bytes.NewBufferString(\"\")\n\treq, err := http.NewRequest(\n\t\thttp.MethodPost,\n\t\t\"http://localhost/test/message/topic?filterUserID=user01&filterDeviceID=ABC&filterDummyCamelCase=dummy_value\",\n\t\tbody)\n\ta.NoError(err)\n\n\trouterMock := NewMockRouter(testutil.MockCtrl)\n\tapi := NewRestMessageAPI(routerMock, \"/test/\")\n\trecorder := httptest.NewRecorder()\n\n\trouterMock.EXPECT().HandleMessage(gomock.Any()).Do(func(msg *protocol.Message) error {\n\t\ta.NotNil(msg.Filters)\n\t\tif a.Contains(msg.Filters, \"user_id\") {\n\t\t\ta.Equal(\"user01\", msg.Filters[\"user_id\"])\n\t\t}\n\t\tif a.Contains(msg.Filters, \"device_id\") {\n\t\t\ta.Equal(\"ABC\", msg.Filters[\"device_id\"])\n\t\t}\n\t\tif a.Contains(msg.Filters, \"dummy_camel_case\") {\n\t\t\ta.Equal(\"dummy_value\", msg.Filters[\"dummy_camel_case\"])\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tapi.ServeHTTP(recorder, req)\n\n\ttime.Sleep(10 * time.Millisecond)\n}\n"
  },
  {
    "path": "server/router/errors.go",
    "content": "package router\n\nimport (\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/auth\"\n\n\t\"errors\"\n\t\"fmt\"\n)\n\nvar (\n\t// ErrServiceNotProvided is returned when the service required is not set.\n\tErrServiceNotProvided = errors.New(\"Service not provided.\")\n\n\t// ErrInvalidRoute is returned by the `Deliver` method of a `Route` when it has been closed\n\t// due to slow processing\n\tErrInvalidRoute = errors.New(\"Route is invalid. Channel is closed.\")\n\n\t// ErrChannelFull is returned when trying to `Deliver` a message with a queue size of zero\n\t// and the channel is full\n\tErrChannelFull = errors.New(\"Route channel is full. Route is closed.\")\n\n\t// ErrQueueFull is returned when trying to `Deliver` a message in a full queued route\n\tErrQueueFull = errors.New(\"Route queue is full. Route is closed.\")\n)\n\n// PermissionDeniedError is returned when AccessManager denies a user request for a topic\ntype PermissionDeniedError struct {\n\n\t// userId of request\n\tUserID string\n\n\t// accessType  requested(READ/WRITE)\n\tAccessType auth.AccessType\n\n\t// requested topic\n\tPath protocol.Path\n}\n\nfunc (e *PermissionDeniedError) Error() string {\n\treturn fmt.Sprintf(\"Access Denied for user=[%s] on path=[%s] for Operation=[%s]\", e.UserID, e.Path, e.AccessType)\n}\n\n// ModuleStoppingError is returned when the module is stopping\ntype ModuleStoppingError struct {\n\tName string\n}\n\nfunc (m *ModuleStoppingError) Error() string {\n\treturn fmt.Sprintf(\"Service %s is stopping\", m.Name)\n}\n"
  },
  {
    "path": "server/router/logger.go",
    "content": "package router\n\nimport (\n\tlog \"github.com/Sirupsen/logrus\"\n)\n\nvar logger = log.WithField(\"module\", \"router\")\n"
  },
  {
    "path": "server/router/message_queue.go",
    "content": "package router\n\nimport (\n\t\"github.com/smancke/guble/protocol\"\n\n\t\"sync\"\n)\n\nconst (\n\tdefaultQueueCap = 50\n)\n\ntype queue struct {\n\tmu    sync.Mutex\n\tqueue []*protocol.Message\n}\n\n// newQueue creates a *queue that will have the capacity specified by size.\n// If `size` is negative use the defaultQueueCap.\nfunc newQueue(size int) *queue {\n\tif size < 0 {\n\t\tsize = defaultQueueCap\n\t}\n\treturn &queue{\n\t\tqueue: make([]*protocol.Message, 0, size),\n\t}\n}\n\nfunc (q *queue) push(m *protocol.Message) {\n\tq.mu.Lock()\n\tdefer q.mu.Unlock()\n\n\tq.queue = append(q.queue, m)\n}\n\n// remove the first item from the queue if exists\nfunc (q *queue) remove() {\n\tq.mu.Lock()\n\tdefer q.mu.Unlock()\n\n\tif len(q.queue) == 0 {\n\t\treturn\n\t}\n\tq.queue = q.queue[1:]\n}\n\n// poll returns the first item from the queue without removing it\nfunc (q *queue) poll() (*protocol.Message, error) {\n\tq.mu.Lock()\n\tdefer q.mu.Unlock()\n\n\tif len(q.queue) == 0 {\n\t\treturn nil, errEmptyQueue\n\t}\n\n\treturn q.queue[0], nil\n}\n\nfunc (q *queue) size() int {\n\tq.mu.Lock()\n\tdefer q.mu.Unlock()\n\treturn len(q.queue)\n}\n"
  },
  {
    "path": "server/router/mocks_auth_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/smancke/guble/server/auth (interfaces: AccessManager)\n\npackage router\n\nimport (\n\tgomock \"github.com/golang/mock/gomock\"\n\tprotocol \"github.com/smancke/guble/protocol\"\n\tauth \"github.com/smancke/guble/server/auth\"\n)\n\n// Mock of AccessManager interface\ntype MockAccessManager struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockAccessManagerRecorder\n}\n\n// Recorder for MockAccessManager (not exported)\ntype _MockAccessManagerRecorder struct {\n\tmock *MockAccessManager\n}\n\nfunc NewMockAccessManager(ctrl *gomock.Controller) *MockAccessManager {\n\tmock := &MockAccessManager{ctrl: ctrl}\n\tmock.recorder = &_MockAccessManagerRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockAccessManager) EXPECT() *_MockAccessManagerRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockAccessManager) IsAllowed(_param0 auth.AccessType, _param1 string, _param2 protocol.Path) bool {\n\tret := _m.ctrl.Call(_m, \"IsAllowed\", _param0, _param1, _param2)\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}\n\nfunc (_mr *_MockAccessManagerRecorder) IsAllowed(arg0, arg1, arg2 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"IsAllowed\", arg0, arg1, arg2)\n}\n"
  },
  {
    "path": "server/router/mocks_checker_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/docker/distribution/health (interfaces: Checker)\n\npackage router\n\nimport (\n\tgomock \"github.com/golang/mock/gomock\"\n)\n\n// Mock of Checker interface\ntype MockChecker struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockCheckerRecorder\n}\n\n// Recorder for MockChecker (not exported)\ntype _MockCheckerRecorder struct {\n\tmock *MockChecker\n}\n\nfunc NewMockChecker(ctrl *gomock.Controller) *MockChecker {\n\tmock := &MockChecker{ctrl: ctrl}\n\tmock.recorder = &_MockCheckerRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockChecker) EXPECT() *_MockCheckerRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockChecker) Check() error {\n\tret := _m.ctrl.Call(_m, \"Check\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockCheckerRecorder) Check() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Check\")\n}\n"
  },
  {
    "path": "server/router/mocks_kvstore_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/smancke/guble/server/kvstore (interfaces: KVStore)\n\npackage router\n\nimport (\n\tgomock \"github.com/golang/mock/gomock\"\n)\n\n// Mock of KVStore interface\ntype MockKVStore struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockKVStoreRecorder\n}\n\n// Recorder for MockKVStore (not exported)\ntype _MockKVStoreRecorder struct {\n\tmock *MockKVStore\n}\n\nfunc NewMockKVStore(ctrl *gomock.Controller) *MockKVStore {\n\tmock := &MockKVStore{ctrl: ctrl}\n\tmock.recorder = &_MockKVStoreRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockKVStore) EXPECT() *_MockKVStoreRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockKVStore) Delete(_param0 string, _param1 string) error {\n\tret := _m.ctrl.Call(_m, \"Delete\", _param0, _param1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockKVStoreRecorder) Delete(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Delete\", arg0, arg1)\n}\n\nfunc (_m *MockKVStore) Get(_param0 string, _param1 string) ([]byte, bool, error) {\n\tret := _m.ctrl.Call(_m, \"Get\", _param0, _param1)\n\tret0, _ := ret[0].([]byte)\n\tret1, _ := ret[1].(bool)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}\n\nfunc (_mr *_MockKVStoreRecorder) Get(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Get\", arg0, arg1)\n}\n\nfunc (_m *MockKVStore) Iterate(_param0 string, _param1 string) chan [2]string {\n\tret := _m.ctrl.Call(_m, \"Iterate\", _param0, _param1)\n\tret0, _ := ret[0].(chan [2]string)\n\treturn ret0\n}\n\nfunc (_mr *_MockKVStoreRecorder) Iterate(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Iterate\", arg0, arg1)\n}\n\nfunc (_m *MockKVStore) IterateKeys(_param0 string, _param1 string) chan string {\n\tret := _m.ctrl.Call(_m, \"IterateKeys\", _param0, _param1)\n\tret0, _ := ret[0].(chan string)\n\treturn ret0\n}\n\nfunc (_mr *_MockKVStoreRecorder) IterateKeys(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"IterateKeys\", arg0, arg1)\n}\n\nfunc (_m *MockKVStore) Put(_param0 string, _param1 string, _param2 []byte) error {\n\tret := _m.ctrl.Call(_m, \"Put\", _param0, _param1, _param2)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockKVStoreRecorder) Put(arg0, arg1, arg2 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Put\", arg0, arg1, arg2)\n}\n"
  },
  {
    "path": "server/router/mocks_router_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/smancke/guble/server/router (interfaces: Router)\n\npackage router\n\nimport (\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/auth\"\n\t\"github.com/smancke/guble/server/cluster\"\n\t\"github.com/smancke/guble/server/kvstore\"\n\n\t\"github.com/smancke/guble/server/store\"\n)\n\n// Mock of Router interface\ntype MockRouter struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockRouterRecorder\n}\n\n// Recorder for MockRouter (not exported)\ntype _MockRouterRecorder struct {\n\tmock *MockRouter\n}\n\nfunc NewMockRouter(ctrl *gomock.Controller) *MockRouter {\n\tmock := &MockRouter{ctrl: ctrl}\n\tmock.recorder = &_MockRouterRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockRouter) EXPECT() *_MockRouterRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockRouter) AccessManager() (auth.AccessManager, error) {\n\tret := _m.ctrl.Call(_m, \"AccessManager\")\n\tret0, _ := ret[0].(auth.AccessManager)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) AccessManager() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"AccessManager\")\n}\n\nfunc (_m *MockRouter) Cluster() *cluster.Cluster {\n\tret := _m.ctrl.Call(_m, \"Cluster\")\n\tret0, _ := ret[0].(*cluster.Cluster)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) Cluster() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Cluster\")\n}\n\nfunc (_m *MockRouter) Done() <-chan bool {\n\tret := _m.ctrl.Call(_m, \"Done\")\n\tret0, _ := ret[0].(<-chan bool)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) Done() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Done\")\n}\n\nfunc (_m *MockRouter) Fetch(_param0 *store.FetchRequest) error {\n\tret := _m.ctrl.Call(_m, \"Fetch\", _param0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) Fetch(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Fetch\", arg0)\n}\n\nfunc (_m *MockRouter) GetSubscribers(_param0 string) ([]byte, error) {\n\tret := _m.ctrl.Call(_m, \"GetSubscribers\", _param0)\n\tret0, _ := ret[0].([]byte)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) GetSubscribers(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"GetSubscribers\", arg0)\n}\n\nfunc (_m *MockRouter) HandleMessage(_param0 *protocol.Message) error {\n\tret := _m.ctrl.Call(_m, \"HandleMessage\", _param0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) HandleMessage(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"HandleMessage\", arg0)\n}\n\nfunc (_m *MockRouter) KVStore() (kvstore.KVStore, error) {\n\tret := _m.ctrl.Call(_m, \"KVStore\")\n\tret0, _ := ret[0].(kvstore.KVStore)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) KVStore() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"KVStore\")\n}\n\nfunc (_m *MockRouter) MessageStore() (store.MessageStore, error) {\n\tret := _m.ctrl.Call(_m, \"MessageStore\")\n\tret0, _ := ret[0].(store.MessageStore)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) MessageStore() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"MessageStore\")\n}\n\nfunc (_m *MockRouter) Subscribe(_param0 *Route) (*Route, error) {\n\tret := _m.ctrl.Call(_m, \"Subscribe\", _param0)\n\tret0, _ := ret[0].(*Route)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) Subscribe(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Subscribe\", arg0)\n}\n\nfunc (_m *MockRouter) Unsubscribe(_param0 *Route) {\n\t_m.ctrl.Call(_m, \"Unsubscribe\", _param0)\n}\n\nfunc (_mr *_MockRouterRecorder) Unsubscribe(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Unsubscribe\", arg0)\n}\n"
  },
  {
    "path": "server/router/mocks_store_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/smancke/guble/server/store (interfaces: MessageStore)\n\npackage router\n\nimport (\n\tgomock \"github.com/golang/mock/gomock\"\n\tprotocol \"github.com/smancke/guble/protocol\"\n\tstore \"github.com/smancke/guble/server/store\"\n)\n\n// Mock of MessageStore interface\ntype MockMessageStore struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockMessageStoreRecorder\n}\n\n// Recorder for MockMessageStore (not exported)\ntype _MockMessageStoreRecorder struct {\n\tmock *MockMessageStore\n}\n\nfunc NewMockMessageStore(ctrl *gomock.Controller) *MockMessageStore {\n\tmock := &MockMessageStore{ctrl: ctrl}\n\tmock.recorder = &_MockMessageStoreRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockMessageStore) EXPECT() *_MockMessageStoreRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockMessageStore) DoInTx(_param0 string, _param1 func(uint64) error) error {\n\tret := _m.ctrl.Call(_m, \"DoInTx\", _param0, _param1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockMessageStoreRecorder) DoInTx(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"DoInTx\", arg0, arg1)\n}\n\nfunc (_m *MockMessageStore) Fetch(_param0 *store.FetchRequest) {\n\t_m.ctrl.Call(_m, \"Fetch\", _param0)\n}\n\nfunc (_mr *_MockMessageStoreRecorder) Fetch(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Fetch\", arg0)\n}\n\nfunc (_m *MockMessageStore) GenerateNextMsgID(_param0 string, _param1 byte) (uint64, int64, error) {\n\tret := _m.ctrl.Call(_m, \"GenerateNextMsgID\", _param0, _param1)\n\tret0, _ := ret[0].(uint64)\n\tret1, _ := ret[1].(int64)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}\n\nfunc (_mr *_MockMessageStoreRecorder) GenerateNextMsgID(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"GenerateNextMsgID\", arg0, arg1)\n}\n\nfunc (_m *MockMessageStore) MaxMessageID(_param0 string) (uint64, error) {\n\tret := _m.ctrl.Call(_m, \"MaxMessageID\", _param0)\n\tret0, _ := ret[0].(uint64)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockMessageStoreRecorder) MaxMessageID(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"MaxMessageID\", arg0)\n}\n\nfunc (_m *MockMessageStore) Partition(_param0 string) (store.MessagePartition, error) {\n\tret := _m.ctrl.Call(_m, \"Partition\", _param0)\n\tret0, _ := ret[0].(store.MessagePartition)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockMessageStoreRecorder) Partition(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Partition\", arg0)\n}\n\nfunc (_m *MockMessageStore) Partitions() ([]store.MessagePartition, error) {\n\tret := _m.ctrl.Call(_m, \"Partitions\")\n\tret0, _ := ret[0].([]store.MessagePartition)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockMessageStoreRecorder) Partitions() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Partitions\")\n}\n\nfunc (_m *MockMessageStore) Store(_param0 string, _param1 uint64, _param2 []byte) error {\n\tret := _m.ctrl.Call(_m, \"Store\", _param0, _param1, _param2)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockMessageStoreRecorder) Store(arg0, arg1, arg2 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Store\", arg0, arg1, arg2)\n}\n\nfunc (_m *MockMessageStore) StoreMessage(_param0 *protocol.Message, _param1 byte) (int, error) {\n\tret := _m.ctrl.Call(_m, \"StoreMessage\", _param0, _param1)\n\tret0, _ := ret[0].(int)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockMessageStoreRecorder) StoreMessage(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"StoreMessage\", arg0, arg1)\n}\n"
  },
  {
    "path": "server/router/route.go",
    "content": "package router\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tlog \"github.com/Sirupsen/logrus\"\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/store\"\n)\n\nvar (\n\terrEmptyQueue = errors.New(\"Empty queue\")\n\terrTimeout    = errors.New(\"Channel sending timeout\")\n\n\tErrMissingFetchRequest = errors.New(\"Missing FetchRequest configuration.\")\n)\n\n// Route represents a topic for subscription that has a channel to receive messages.\ntype Route struct {\n\tRouteConfig\n\n\tmessagesC chan *protocol.Message\n\n\t// queue that will store the messages in correct order.\n\t// The queue can have a settable size;\n\t// if it reaches the capacity the route is closed.\n\tqueue *queue\n\n\tcloseC chan struct{}\n\n\t// Indicates if the consumer go routine is running\n\tconsuming bool\n\tinvalid   bool\n\tmu        sync.RWMutex\n\n\tlogger *log.Entry\n}\n\n// NewRoute creates a new route pointer\nfunc NewRoute(config RouteConfig) *Route {\n\troute := &Route{\n\t\tRouteConfig: config,\n\n\t\tqueue:     newQueue(config.queueSize),\n\t\tmessagesC: make(chan *protocol.Message, config.ChannelSize),\n\t\tcloseC:    make(chan struct{}),\n\n\t\tlogger: logger.WithFields(log.Fields{\"path\": config.Path, \"params\": config.RouteParams}),\n\t}\n\n\treturn route\n}\n\n// Key returns a string that uniquely identifies the route\n// by concatenating the route Path and the route params\n// Example:\n//  /topic user_id:user1 application_id:app1\nfunc (r *Route) Key() string {\n\treturn strings.Join([]string{\n\t\tstring(r.Path),\n\t\tr.RouteParams.Key(),\n\t}, \" \")\n}\n\nfunc (r *Route) String() string {\n\treturn fmt.Sprintf(\"Path: %s , Params: %s\", r.Path, r.RouteParams)\n}\n\n// Deliver takes a messages and adds it to the queue to be delivered into the channel\n// isFromStore boolean specifies if the messages are being fetched or are from the router\n// In case they are fetched from the store the route won't close if it's full\nfunc (r *Route) Deliver(msg *protocol.Message, isFromStore bool) error {\n\tloggerMessage := r.logger.WithField(\"message\", msg)\n\n\tif r.isInvalid() {\n\t\tloggerMessage.Error(\"Cannot deliver because route is invalid\")\n\t\tmTotalDeliverMessageErrors.Add(1)\n\t\treturn ErrInvalidRoute\n\t}\n\n\tif !r.messageFilter(msg) {\n\t\tloggerMessage.Debug(\"Message filter didn't match route\")\n\t\tmTotalNotMatchedByFilters.Add(1)\n\t\treturn nil\n\t}\n\t// not an infinite queue\n\tif r.queueSize >= 0 {\n\t\t// if size is zero the sending is direct\n\t\tif r.queueSize == 0 {\n\t\t\treturn r.sendDirect(msg, isFromStore)\n\t\t} else if r.queue.size() >= r.queueSize {\n\t\t\tloggerMessage.Error(\"Closing route because queue is full\")\n\t\t\tr.Close()\n\t\t\tmTotalDeliverMessageErrors.Add(1)\n\t\t\treturn ErrQueueFull\n\t\t}\n\t}\n\n\tr.queue.push(msg)\n\tloggerMessage.WithField(\"queue_size\", r.queue.size()).Debug(\"Deliver\")\n\n\tr.consume()\n\treturn nil\n}\n\n// MessagesChannel returns the route channel to send or receive messages.\nfunc (r *Route) MessagesChannel() <-chan *protocol.Message {\n\treturn r.messagesC\n}\n\n// Provide accepts a router to use for fetching/subscribing and a boolean\n// indicating if it should close the route after fetching without subscribing\n// The method is blocking until fetch is finished or route is subscribed\nfunc (r *Route) Provide(router Router, subscribe bool) error {\n\tif r.FetchRequest != nil {\n\t\terr := r.handleFetch(router)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if !subscribe {\n\t\treturn ErrMissingFetchRequest\n\t}\n\tif !subscribe {\n\t\treturn nil\n\t}\n\treturn r.handleSubscribe(router)\n}\n\nfunc (r *Route) handleFetch(router Router) error {\n\tif r.isInvalid() {\n\t\treturn ErrInvalidRoute\n\t}\n\n\tr.FetchRequest.Partition = r.Path.Partition()\n\tms, err := router.MessageStore()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar (\n\t\tlastID   uint64\n\t\treceived int\n\t)\n\nREFETCH:\n\t// check if we need to continue fetching\n\tmaxID, err := ms.MaxMessageID(r.FetchRequest.Partition)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif r.FetchRequest.StartID > maxID && r.FetchRequest.Direction == store.DirectionForward {\n\t\treturn nil\n\t}\n\n\tif received >= r.FetchRequest.Count || lastID >= maxID ||\n\t\t(r.FetchRequest.EndID > 0 && r.FetchRequest.EndID <= lastID) {\n\t\treturn nil\n\t}\n\tr.FetchRequest.Init()\n\n\tif err := router.Fetch(r.FetchRequest); err != nil {\n\t\treturn err\n\t}\n\tcount := r.FetchRequest.Ready()\n\tr.logger.WithField(\"count\", count).Debug(\"Receiving messages\")\n\n\tfor {\n\t\tselect {\n\t\tcase fetchedMessage, open := <-r.FetchRequest.Messages():\n\t\t\tif !open {\n\t\t\t\tr.logger.Debug(\"Fetch channel closed.\")\n\t\t\t\tgoto REFETCH\n\t\t\t}\n\n\t\t\tr.logger.WithField(\"fetchedMessageID\", fetchedMessage.ID).Debug(\"Fetched message\")\n\t\t\tmessage, err := protocol.ParseMessage(fetchedMessage.Message)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tr.logger.WithField(\"messageID\", message.ID).Debug(\"Sending fetched message in channel\")\n\t\t\tif err := r.Deliver(message, true); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tlastID = message.ID\n\t\t\treceived++\n\t\tcase err := <-r.FetchRequest.Errors():\n\t\t\treturn err\n\t\tcase <-router.Done():\n\t\t\tr.logger.Debug(\"Stopping fetch because the router is shutting down\")\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\nfunc (r *Route) handleSubscribe(router Router) error {\n\t_, err := router.Subscribe(r)\n\treturn err\n}\n\n// Close closes the route channel.\nfunc (r *Route) Close() error {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\tr.logger.Debug(\"Closing route\")\n\n\t// route already closed\n\tif r.invalid {\n\t\treturn ErrInvalidRoute\n\t}\n\n\tr.invalid = true\n\tclose(r.messagesC)\n\tclose(r.closeC)\n\n\treturn ErrInvalidRoute\n}\n\n// Equal will check if the route path is matched and all the parameters or just a\n// subset of specific parameters between the routes\nfunc (r *Route) Equal(other *Route, keys ...string) bool {\n\treturn r.RouteConfig.Equal(other.RouteConfig, keys...)\n}\n\n// IsInvalid returns true if the route is invalid, has been closed previously\nfunc (r *Route) isInvalid() bool {\n\tr.mu.RLock()\n\tdefer r.mu.RUnlock()\n\treturn r.invalid\n}\n\nfunc (r *Route) setInvalid(invalid bool) {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\tr.invalid = invalid\n}\n\nfunc (r *Route) isConsuming() bool {\n\tr.mu.RLock()\n\tdefer r.mu.RUnlock()\n\treturn r.consuming\n}\n\nfunc (r *Route) setConsuming(consuming bool) {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\tr.consuming = consuming\n}\n\n// consume starts a goroutine to consume the queue and pass the messages to route\n// channel. Stops if there are no items in the queue.\nfunc (r *Route) consume() {\n\tif r.isConsuming() {\n\t\treturn\n\t}\n\tr.setConsuming(true)\n\n\tr.logger.Debug(\"Consuming route queue\")\n\tgo func() {\n\t\tdefer r.setConsuming(false)\n\n\t\tvar (\n\t\t\tmsg *protocol.Message\n\t\t\terr error\n\t\t)\n\n\t\tfor {\n\t\t\tif r.isInvalid() {\n\t\t\t\tr.logger.Debug(\"Stopping to consume because route is invalid.\")\n\t\t\t\tmTotalDeliverMessageErrors.Add(1)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tmsg, err = r.queue.poll()\n\n\t\t\tif err != nil {\n\t\t\t\tif err == errEmptyQueue {\n\t\t\t\t\tr.logger.Debug(\"Empty queue\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tr.logger.WithField(\"error\", err).Error(\"Error fetching a message from queue\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif err = r.send(msg); err != nil {\n\t\t\t\tr.logger.WithField(\"message\", msg).Error(\"Error sending message through route\")\n\t\t\t\tif err == errTimeout || err == ErrInvalidRoute {\n\t\t\t\t\t// channel been closed, ending the consumer\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\t// remove the first item from the queue\n\t\t\tr.queue.remove()\n\t\t}\n\t}()\n\truntime.Gosched()\n}\n\n// send message through the channel\nfunc (r *Route) send(msg *protocol.Message) error {\n\tdefer r.invalidRecover()\n\n\tr.logger.WithField(\"message\", msg).Debug(\"Sending message through route channel\")\n\n\t// no timeout, means we don't close the channel\n\tif r.timeout == -1 {\n\t\tr.messagesC <- msg\n\t\tr.logger.WithField(\"size\", len(r.messagesC)).Debug(\"Channel size\")\n\t\treturn nil\n\t}\n\n\tselect {\n\tcase r.messagesC <- msg:\n\t\treturn nil\n\tcase <-r.closeC:\n\t\treturn ErrInvalidRoute\n\tcase <-time.After(r.timeout):\n\t\tr.logger.Debug(\"Closing route because of timeout\")\n\t\tr.Close()\n\t\treturn errTimeout\n\t}\n}\n\n// invalidRecover is used to recover in case we end up sending on a closed channel\nfunc (r *Route) invalidRecover() error {\n\tif rc := recover(); rc != nil && r.isInvalid() {\n\t\tr.logger.WithField(\"error\", rc).Debug(\"Recovered closed route\")\n\t\treturn ErrInvalidRoute\n\t}\n\treturn nil\n}\n\n// sendDirect sends the message directly in the channel\nfunc (r *Route) sendDirect(msg *protocol.Message, store bool) error {\n\tif store {\n\t\tr.messagesC <- msg\n\t\treturn nil\n\t}\n\n\tselect {\n\tcase r.messagesC <- msg:\n\t\treturn nil\n\tdefault:\n\t\tr.logger.Debug(\"Closing route because of full channel\")\n\t\tr.Close()\n\t\treturn ErrChannelFull\n\t}\n}\n"
  },
  {
    "path": "server/router/route_config.go",
    "content": "package router\n\nimport (\n\t\"time\"\n\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/store\"\n)\n\n// Matcher is a func type that receives two route configurations pointers as parameters and\n// returns true if the routes are matching\ntype Matcher func(RouteConfig, RouteConfig, ...string) bool\n\ntype RouteConfig struct {\n\tRouteParams\n\n\tPath protocol.Path\n\n\tChannelSize int\n\n\t// queueSize specifies the size of the internal queue slice\n\t// (how many items to hold before the channel is closed).\n\t// If set to `0` then the queue will have no capacity and the messages\n\t// are directly sent, without buffering.\n\tqueueSize int\n\n\t// timeout defines how long to wait for the message to be read on the channel.\n\t// If timeout is reached the route is closed.\n\ttimeout time.Duration\n\n\t// Matcher if set will be used to check equality of the routes\n\tMatcher Matcher `json:\"-\"`\n\n\t// FetchRequest to fetch messages before subscribing\n\t// The Partition field of the FetchRequest is overrided with the Partition of the Route topic\n\tFetchRequest *store.FetchRequest `json:\"-\"`\n}\n\nfunc (rc *RouteConfig) Equal(other RouteConfig, keys ...string) bool {\n\tif rc.Matcher != nil {\n\t\treturn rc.Matcher(*rc, other, keys...)\n\t}\n\treturn rc.Path == other.Path && rc.RouteParams.Equal(other.RouteParams, keys...)\n}\n\n// messageFilter returns true if the route matches message filters\nfunc (rc *RouteConfig) messageFilter(m *protocol.Message) bool {\n\tif m.Filters == nil {\n\t\treturn true\n\t}\n\n\treturn rc.Filter(m.Filters)\n}\n\n// Filter returns true if all filters are matched on the route\nfunc (rc *RouteConfig) Filter(filters map[string]string) bool {\n\tfor key, value := range filters {\n\t\tif rc.Get(key) != value {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n"
  },
  {
    "path": "server/router/route_config_test.go",
    "content": "package router\n\nimport (\n\t\"testing\"\n\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\ntype routeConfig struct {\n\tpath   string\n\tfields map[string]string\n}\n\nfunc TestRouteConfig_Equal(t *testing.T) {\n\ta := assert.New(t)\n\n\ttestcases := map[string]struct {\n\t\t// first route definition\n\t\tfirst routeConfig\n\n\t\t// second route definition\n\t\tsecond routeConfig\n\n\t\tMatcher Matcher\n\n\t\t// keys to pass on matching\n\t\tkeys []string\n\n\t\t// expected result\n\t\tresult bool\n\t}{\n\t\t\"full equal\": {\n\t\t\tfirst: routeConfig{\n\t\t\t\tpath: \"/path\",\n\t\t\t\tfields: map[string]string{\n\t\t\t\t\t\"field1\": \"value1\",\n\t\t\t\t\t\"field2\": \"value2\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsecond: routeConfig{\n\t\t\t\tpath: \"/path\",\n\t\t\t\tfields: map[string]string{\n\t\t\t\t\t\"field1\": \"value1\",\n\t\t\t\t\t\"field2\": \"value2\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tresult: true,\n\t\t},\n\n\t\t\"full equal with matcher\": {\n\t\t\tfirst: routeConfig{\n\t\t\t\tpath: \"/path\",\n\t\t\t\tfields: map[string]string{\n\t\t\t\t\t\"field1\": \"value1\",\n\t\t\t\t\t\"field2\": \"value2\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsecond: routeConfig{\n\t\t\t\tpath: \"/path\",\n\t\t\t\tfields: map[string]string{\n\t\t\t\t\t\"field1\": \"value1\",\n\t\t\t\t\t\"field2\": \"value2\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tMatcher: func(config RouteConfig, other RouteConfig, keys ...string) bool {\n\t\t\t\treturn config.Path == other.Path\n\t\t\t},\n\t\t\tresult: true,\n\t\t},\n\n\t\t\"make sure matcher is called\": {\n\t\t\tfirst: routeConfig{\n\t\t\t\tpath: \"/path\",\n\t\t\t\tfields: map[string]string{\n\t\t\t\t\t\"field1\": \"value1\",\n\t\t\t\t\t\"field2\": \"value2\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsecond: routeConfig{\n\t\t\t\tpath: \"/incorrect-path\",\n\t\t\t\tfields: map[string]string{\n\t\t\t\t\t\"field1\": \"value1\",\n\t\t\t\t\t\"field2\": \"value2\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tMatcher: func(config RouteConfig, other RouteConfig, keys ...string) bool {\n\t\t\t\treturn true\n\t\t\t},\n\t\t\tresult: true,\n\t\t},\n\n\t\t\"partial match\": {\n\t\t\tfirst: routeConfig{\n\t\t\t\tpath: \"/path\",\n\t\t\t\tfields: map[string]string{\n\t\t\t\t\t\"field1\": \"value1\",\n\t\t\t\t\t\"field2\": \"value2\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsecond: routeConfig{\n\t\t\t\tpath: \"/path\",\n\t\t\t\tfields: map[string]string{\n\t\t\t\t\t\"field1\": \"value1\",\n\t\t\t\t\t\"field3\": \"value3\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tkeys:   []string{\"field1\"},\n\t\t\tresult: true,\n\t\t},\n\n\t\t\"unequal path with keys\": {\n\t\t\tfirst: routeConfig{\n\t\t\t\tpath: \"/path\",\n\t\t\t\tfields: map[string]string{\n\t\t\t\t\t\"field1\": \"value1\",\n\t\t\t\t\t\"field2\": \"value2\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsecond: routeConfig{\n\t\t\t\tpath: \"/different-path\",\n\t\t\t\tfields: map[string]string{\n\t\t\t\t\t\"field1\": \"value1\",\n\t\t\t\t\t\"field3\": \"value3\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tkeys:   []string{\"field1\"},\n\t\t\tresult: false,\n\t\t},\n\t}\n\n\tfor name, c := range testcases {\n\t\tfirst := RouteConfig{\n\t\t\tPath:        protocol.Path(c.first.path),\n\t\t\tRouteParams: RouteParams(c.first.fields),\n\t\t\tMatcher:     c.Matcher,\n\t\t}\n\t\tsecond := RouteConfig{\n\t\t\tPath:        protocol.Path(c.second.path),\n\t\t\tRouteParams: RouteParams(c.second.fields),\n\t\t\tMatcher:     c.Matcher,\n\t\t}\n\t\ta.Equal(c.result, first.Equal(second, c.keys...), \"Failed forward check for case: \"+name)\n\t\ta.Equal(c.result, second.Equal(first, c.keys...), \"Failed backwards check for case: \"+name)\n\t}\n}\n\nfunc TestRouteConfig_messageFilter(t *testing.T) {\n\ta := assert.New(t)\n\n\trouteConfig := RouteConfig{\n\t\tRouteParams: RouteParams{\n\t\t\t\"field1\": \"value1\",\n\t\t\t\"field2\": \"value2\",\n\t\t},\n\t}\n\n\ttestcases := map[string]struct {\n\t\t// filters on the message\n\t\tfilters map[string]string\n\n\t\t// expected result\n\t\tresult bool\n\t}{\n\t\t\"no filter\": {\n\t\t\tfilters: nil,\n\t\t\tresult:  true,\n\t\t},\n\t\t\"partial filter\": {\n\t\t\tfilters: map[string]string{\n\t\t\t\t\"field1\": \"value1\",\n\t\t\t},\n\t\t\tresult: true,\n\t\t},\n\t\t\"full filter\": {\n\t\t\tfilters: map[string]string{\n\t\t\t\t\"field1\": \"value1\",\n\t\t\t\t\"field2\": \"value2\",\n\t\t\t},\n\t\t\tresult: true,\n\t\t},\n\t\t\"one invalid filter\": {\n\t\t\tfilters: map[string]string{\n\t\t\t\t\"field1\": \"value1\",\n\t\t\t\t\"field2\": \"value3\",\n\t\t\t},\n\t\t\tresult: false,\n\t\t},\n\t\t\"both invalid\": {\n\t\t\tfilters: map[string]string{\n\t\t\t\t\"field1\": \"value3\",\n\t\t\t\t\"field2\": \"value4\",\n\t\t\t},\n\t\t\tresult: false,\n\t\t},\n\t\t\"partial invalid\": {\n\t\t\tfilters: map[string]string{\n\t\t\t\t\"field2\": \"value4\",\n\t\t\t},\n\t\t\tresult: false,\n\t\t},\n\t}\n\n\tfor name, c := range testcases {\n\t\tm := &protocol.Message{Filters: c.filters}\n\t\ta.Equal(c.result, routeConfig.messageFilter(m), \"Failed filter: \"+name)\n\t}\n}\n"
  },
  {
    "path": "server/router/route_params.go",
    "content": "package router\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n)\n\ntype RouteParams map[string]string\n\nfunc (rp *RouteParams) String() string {\n\ts := make([]string, 0, len(*rp))\n\tfor k, v := range *rp {\n\t\ts = append(s, fmt.Sprintf(\"%s:%s\", k, v))\n\t}\n\treturn strings.Join(s, \" \")\n}\n\nfunc (rp *RouteParams) Key() string {\n\t// The generated key must be the same always\n\ts := make([]string, 0, len(*rp))\n\tfor _, k := range rp.orderedKeys() {\n\t\ts = append(s, fmt.Sprintf(\"%s:%s\", k, (*rp)[k]))\n\t}\n\treturn strings.Join(s, \" \")\n}\n\n// orderedKeys returns a slice of ordered\nfunc (rp *RouteParams) orderedKeys() []string {\n\tkeys := make([]string, len(*rp))\n\ti := 0\n\tfor k := range *rp {\n\t\tkeys[i] = k\n\t\ti++\n\t}\n\tsort.Strings(keys)\n\treturn keys\n}\n\n// Equal verifies if the `receiver` params are the same as `other` params.\n// The `keys` param specifies which keys to check in case the match has to be\n// done only on a separate set of keys and not on all keys.\nfunc (rp *RouteParams) Equal(other RouteParams, keys ...string) bool {\n\tif len(keys) > 0 {\n\t\treturn rp.partialEqual(other, keys)\n\t}\n\tif len(*rp) != len(other) {\n\t\treturn false\n\t}\n\tfor k, v := range *rp {\n\t\tif v2, ok := other[k]; !ok {\n\t\t\treturn false\n\t\t} else if v != v2 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc (rp *RouteParams) partialEqual(other RouteParams, fields []string) bool {\n\tfor _, key := range fields {\n\t\tif v, ok := other[key]; !ok {\n\t\t\treturn false\n\t\t} else if v != (*rp)[key] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc (rp *RouteParams) Get(key string) string {\n\treturn (*rp)[key]\n}\n\nfunc (rp *RouteParams) Set(key, value string) {\n\t(*rp)[key] = value\n}\n\nfunc (rp *RouteParams) Copy() RouteParams {\n\tnrp := make(RouteParams, len(*rp))\n\tfor k, v := range *rp {\n\t\tnrp[k] = v\n\t}\n\treturn nrp\n}\n"
  },
  {
    "path": "server/router/route_test.go",
    "content": "package router\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/auth\"\n\t\"github.com/smancke/guble/server/kvstore\"\n\t\"github.com/smancke/guble/server/store\"\n\t\"github.com/smancke/guble/testutil\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\nvar (\n\tdummyPath          = protocol.Path(\"/dummy\")\n\tdummyMessageWithID = &protocol.Message{ID: 1, Path: dummyPath, Body: []byte(\"dummy body\")}\n\tdummyMessageBytes  = `/dummy,MESSAGE_ID,user01,phone01,{},1420110000,1\n{\"Content-Type\": \"text/plain\", \"Correlation-Id\": \"7sdks723ksgqn\"}\nHello World`\n\tchanSize  = 10\n\tqueueSize = 5\n)\n\n// Send messages in a zero queued route and expect the route to be closed\n// Same test exists for the router\n// see router_test.go:TestRoute_IsRemovedIfChannelIsFull\nfunc TestRouteDeliver_sendDirect(t *testing.T) {\n\ta := assert.New(t)\n\tr := testRoute()\n\n\tfor i := 0; i < chanSize; i++ {\n\t\terr := r.Deliver(dummyMessageWithID, false)\n\t\ta.NoError(err)\n\t}\n\n\tdone := make(chan bool)\n\tgo func() {\n\t\tr.Deliver(dummyMessageWithID, false)\n\t\tdone <- true\n\t}()\n\n\tselect {\n\tcase <-done:\n\tcase <-time.After(10 * time.Millisecond):\n\t\ta.Fail(\"Message not getting sent!\")\n\t}\n\n\tfor i := 0; i < chanSize; i++ {\n\t\tselect {\n\t\tcase _, open := <-r.MessagesChannel():\n\t\t\ta.True(open)\n\t\tcase <-time.After(time.Millisecond * 10):\n\t\t\ta.Fail(\"error not enough messages in channel\")\n\t\t}\n\t}\n\n\t// and the channel is closed\n\tselect {\n\tcase _, open := <-r.MessagesChannel():\n\t\ta.False(open)\n\tdefault:\n\t\tlogger.Debug(\"len(r.C): %v\", len(r.MessagesChannel()))\n\t\ta.Fail(\"channel was not closed\")\n\t}\n\n\ta.True(r.invalid)\n\ta.False(r.consuming)\n\ta.Equal(0, r.queue.size())\n}\n\nfunc TestRouteDeliver_Invalid(t *testing.T) {\n\ta := assert.New(t)\n\tr := testRoute()\n\tr.invalid = true\n\n\terr := r.Deliver(dummyMessageWithID, true)\n\ta.Equal(ErrInvalidRoute, err)\n}\n\nfunc TestRouteDeliver_QueueSize(t *testing.T) {\n\ta := assert.New(t)\n\t// create a route with a queue size\n\tr := testRoute()\n\tr.queueSize = queueSize\n\n\t// fill the channel buffer and the queue\n\tfor i := 0; i < chanSize+queueSize; i++ {\n\t\tr.Deliver(dummyMessageWithID, true)\n\t}\n\n\t// and the route should close itself if the queue is overflowed\n\tdone := make(chan bool)\n\tgo func() {\n\t\terr := r.Deliver(dummyMessageWithID, true)\n\t\ta.NotNil(err)\n\t\tdone <- true\n\t}()\n\n\tselect {\n\tcase <-done:\n\tcase <-time.After(40 * time.Millisecond):\n\t\ta.Fail(\"Message not delivering.\")\n\t}\n\ttime.Sleep(10 * time.Millisecond)\n\ta.True(r.isInvalid())\n\ta.False(r.isConsuming())\n}\n\nfunc TestRouteDeliver_WithTimeout(t *testing.T) {\n\ta := assert.New(t)\n\n\t// create a route with timeout and infinite queue size\n\tr := testRoute()\n\tr.queueSize = -1 // infinite queue size\n\tr.timeout = 10 * time.Millisecond\n\n\t// fill the channel buffer\n\tfor i := 0; i < chanSize; i++ {\n\t\tr.Deliver(dummyMessageWithID, true)\n\t}\n\n\t// delivering one more message should result in a closed route\n\tdone := make(chan bool)\n\tgo func() {\n\t\terr := r.Deliver(dummyMessageWithID, true)\n\t\ta.NoError(err)\n\t\tdone <- true\n\t}()\n\tselect {\n\tcase <-done:\n\tcase <-time.After(40 * time.Millisecond):\n\t\ta.Fail(\"Message not delivering.\")\n\t}\n\n\ttime.Sleep(30 * time.Millisecond)\n\terr := r.Deliver(dummyMessageWithID, true)\n\ta.Equal(ErrInvalidRoute, err)\n\ta.True(r.invalid)\n\ta.False(r.consuming)\n}\n\nfunc TestRoute_CloseTwice(t *testing.T) {\n\ta := assert.New(t)\n\n\tr := testRoute()\n\terr := r.Close()\n\ta.Equal(ErrInvalidRoute, err)\n\n\terr = r.Close()\n\ta.Equal(ErrInvalidRoute, err)\n}\n\nfunc TestQueue_ShiftEmpty(t *testing.T) {\n\tq := newQueue(5)\n\tq.remove()\n\tassert.Equal(t, 0, q.size())\n}\n\nfunc testRoute() *Route {\n\toptions := RouteConfig{\n\t\tRouteParams: RouteParams{\n\t\t\t\"application_id\": \"appID\",\n\t\t\t\"user_id\":        \"userID\",\n\t\t},\n\t\tPath:        protocol.Path(dummyPath),\n\t\tChannelSize: chanSize,\n\t}\n\treturn NewRoute(options)\n}\n\nfunc TestRoute_messageFilter(t *testing.T) {\n\ta := assert.New(t)\n\n\troute := NewRoute(RouteConfig{\n\t\tPath:        \"/topic\",\n\t\tChannelSize: 1,\n\t\tRouteParams: RouteParams{\n\t\t\t\"field1\": \"value1\",\n\t\t\t\"field2\": \"value2\",\n\t\t},\n\t})\n\n\tmsg := &protocol.Message{\n\t\tID:   1,\n\t\tPath: \"/topic\",\n\t}\n\troute.Deliver(msg, false)\n\n\t// test message is received on the channel\n\ta.True(isMessageReceived(route, msg))\n\n\tmsg = &protocol.Message{\n\t\tID:   1,\n\t\tPath: \"/topic\",\n\t}\n\tmsg.SetFilter(\"field1\", \"value1\")\n\troute.Deliver(msg, true)\n\ta.True(isMessageReceived(route, msg))\n\n\tmsg = &protocol.Message{\n\t\tID:   1,\n\t\tPath: \"/topic\",\n\t}\n\tmsg.SetFilter(\"field1\", \"value1\")\n\tmsg.SetFilter(\"field2\", \"value2\")\n\troute.Deliver(msg, true)\n\ta.True(isMessageReceived(route, msg))\n\n\tmsg = &protocol.Message{\n\t\tID:   1,\n\t\tPath: \"/topic\",\n\t}\n\tmsg.SetFilter(\"field1\", \"value1\")\n\tmsg.SetFilter(\"field2\", \"value2\")\n\tmsg.SetFilter(\"field3\", \"value3\")\n\troute.Deliver(msg, true)\n\ta.False(isMessageReceived(route, msg))\n\n\tmsg = &protocol.Message{\n\t\tID:   1,\n\t\tPath: \"/topic\",\n\t}\n\tmsg.SetFilter(\"field3\", \"value3\")\n\troute.Deliver(msg, true)\n\ta.False(isMessageReceived(route, msg))\n}\n\nfunc isMessageReceived(route *Route, msg *protocol.Message) bool {\n\tselect {\n\tcase m, opened := <-route.MessagesChannel():\n\t\tif !opened {\n\t\t\treturn false\n\t\t}\n\n\t\treturn m == msg\n\tcase <-time.After(20 * time.Millisecond):\n\t}\n\n\treturn false\n}\n\nfunc TestRoute_Provide_ErrMissingFetchRequest(t *testing.T) {\n\tctrl, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\ta := assert.New(t)\n\n\trouterMock := NewMockRouter(ctrl)\n\troute := NewRoute(RouteConfig{\n\t\tPath: \"/fetch_request\",\n\t})\n\terr := route.Provide(routerMock, false)\n\ta.Error(err)\n\ta.Equal(ErrMissingFetchRequest, err)\n}\n\nfunc TestRoute_Provide_Fetch(t *testing.T) {\n\tctrl, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\ta := assert.New(t)\n\tmsMock := NewMockMessageStore(ctrl)\n\trouterMock := NewMockRouter(ctrl)\n\n\trouterMock.EXPECT().MessageStore().Return(msMock, nil)\n\n\troute := NewRoute(RouteConfig{\n\t\tPath:         protocol.Path(\"/fetch_request\"),\n\t\tChannelSize:  5,\n\t\tFetchRequest: store.NewFetchRequest(\"\", 0, 0, store.DirectionForward, -1),\n\t})\n\n\tmsMock.EXPECT().MaxMessageID(\"fetch_request\").Return(uint64(2), nil).Times(2)\n\n\trouterMock.EXPECT().Done().Return(make(chan bool)).AnyTimes()\n\trouterMock.EXPECT().Fetch(gomock.Any()).Do(func(req *store.FetchRequest) {\n\t\ta.Equal(req.Partition, \"fetch_request\")\n\t\ta.Equal(uint64(0), req.StartID)\n\t\ta.Equal(uint64(0), req.EndID)\n\t\ta.Equal(store.DirectionForward, req.Direction)\n\t\tgo func() {\n\t\t\treq.StartC <- 2\n\n\t\t\t// send to messages\n\t\t\treq.Push(1, []byte(strings.Replace(dummyMessageBytes, \"MESSAGE_ID\", strconv.Itoa(1), 1)))\n\t\t\treq.Push(2, []byte(strings.Replace(dummyMessageBytes, \"MESSAGE_ID\", strconv.Itoa(2), 1)))\n\t\t\treq.Done()\n\t\t}()\n\t})\n\n\tdone := make(chan struct{})\n\tgo func() {\n\t\treceivedMessages := 0\n\t\tfor i := 1; i <= 2; i++ {\n\t\t\tselect {\n\t\t\tcase m, opened := <-route.MessagesChannel():\n\t\t\t\tif opened {\n\t\t\t\t\treceivedMessages++\n\t\t\t\t\ta.Equal(uint64(i), m.ID)\n\t\t\t\t}\n\t\t\tcase <-time.After(50 * time.Millisecond):\n\t\t\t\ta.Fail(\"Message not received\")\n\t\t\t}\n\t\t}\n\t\ta.Equal(2, receivedMessages)\n\t\tclose(done)\n\t}()\n\n\terr := route.Provide(routerMock, false)\n\ta.NoError(err)\n\t<-done\n}\n\nfunc TestRoute_Provide_WithSubscribe(t *testing.T) {\n\tctrl, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\ta := assert.New(t)\n\tmsMock := NewMockMessageStore(ctrl)\n\trouterMock := NewMockRouter(ctrl)\n\n\trouterMock.EXPECT().MessageStore().Return(msMock, nil)\n\n\troute := NewRoute(RouteConfig{\n\t\tPath:         protocol.Path(\"/fetch_request\"),\n\t\tChannelSize:  4,\n\t\tFetchRequest: store.NewFetchRequest(\"\", 0, 0, store.DirectionForward, -1),\n\t})\n\n\trouterMock.EXPECT().Done().Return(make(chan bool)).AnyTimes()\n\trouterMock.EXPECT().Fetch(gomock.Any()).Do(func(req *store.FetchRequest) {\n\t\ta.Equal(req.Partition, \"fetch_request\")\n\t\ta.Equal(uint64(0), req.StartID)\n\t\ta.Equal(uint64(0), req.EndID)\n\t\ta.Equal(store.DirectionForward, req.Direction)\n\n\t\tgo func() {\n\t\t\treq.StartC <- 2\n\n\t\t\t// send to messages\n\t\t\treq.Push(1, []byte(strings.Replace(dummyMessageBytes, \"MESSAGE_ID\", strconv.Itoa(1), 1)))\n\t\t\treq.Push(2, []byte(strings.Replace(dummyMessageBytes, \"MESSAGE_ID\", strconv.Itoa(2), 1)))\n\t\t\treq.Done()\n\t\t}()\n\t})\n\n\tmsMock.EXPECT().MaxMessageID(gomock.Eq(\"fetch_request\")).Return(uint64(2), nil).Times(2)\n\n\trouterMock.EXPECT().Subscribe(gomock.Any()).Do(func(r *Route) (*Route, error) {\n\t\ta.Equal(route, r)\n\n\t\tfor i := 3; i <= 4; i++ {\n\t\t\tr.Deliver(&protocol.Message{\n\t\t\t\tID:   uint64(i),\n\t\t\t\tPath: \"/fetch_request\",\n\t\t\t\tBody: []byte(\"dummy\"),\n\t\t\t}, true)\n\t\t}\n\n\t\treturn r, nil\n\t})\n\n\tdone := make(chan struct{})\n\tgo func() {\n\t\treceivedMessages := 0\n\t\tfor i := 1; i <= 4; i++ {\n\t\t\tselect {\n\t\t\tcase m, opened := <-route.MessagesChannel():\n\t\t\t\tif opened {\n\t\t\t\t\treceivedMessages++\n\t\t\t\t\ta.Equal(uint64(i), m.ID)\n\t\t\t\t}\n\t\t\tcase <-time.After(50 * time.Millisecond):\n\t\t\t\ta.Fail(\"Message not received\")\n\t\t\t}\n\t\t}\n\t\ta.Equal(4, receivedMessages)\n\t\tclose(done)\n\t}()\n\n\terr := route.Provide(routerMock, true)\n\ta.NoError(err)\n\t<-done\n}\n\ntype startable interface {\n\tStart() error\n}\n\ntype stopable interface {\n\tStop() error\n}\n\n// Test that the route will fetch in case new messages arrived that match the\n// fetch request\nfunc TestRoute_Provide_MultipleFetch(t *testing.T) {\n\tctrl, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\ta := assert.New(t)\n\n\tmemoryKV := kvstore.NewMemoryKVStore()\n\n\tmsMock := NewMockMessageStore(ctrl)\n\trouter := New(auth.AllowAllAccessManager(true), msMock, memoryKV, nil)\n\n\tif startable, ok := router.(startable); ok {\n\t\tstartable.Start()\n\t\tif stopable, ok := router.(stopable); ok {\n\t\t\tdefer stopable.Stop()\n\t\t}\n\t}\n\n\tpath := protocol.Path(\"/fetch_request\")\n\n\troute := NewRoute(RouteConfig{\n\t\tPath:         path,\n\t\tChannelSize:  4,\n\t\tFetchRequest: store.NewFetchRequest(\"\", 0, 0, store.DirectionForward, -1),\n\t})\n\n\tblock := make(chan struct{})\n\tmaxIDExpect := msMock.EXPECT().MaxMessageID(gomock.Any()).\n\t\tReturn(uint64(2), nil)\n\tmsMock.EXPECT().Fetch(gomock.Any()).Do(func(req *store.FetchRequest) {\n\t\ta.Equal(\"fetch_request\", req.Partition)\n\n\t\t// block the fetch request until pushing some new messages in the router\n\t\tgo func() {\n\t\t\t<-block\n\t\t\treq.StartC <- 2\n\t\t\treq.Push(1, []byte(strings.Replace(dummyMessageBytes, \"MESSAGE_ID\", strconv.Itoa(1), 1)))\n\t\t\treq.Push(2, []byte(strings.Replace(dummyMessageBytes, \"MESSAGE_ID\", strconv.Itoa(2), 1)))\n\t\t\treq.Done()\n\t\t}()\n\t}).After(maxIDExpect)\n\n\tmsMock.EXPECT().MaxMessageID(gomock.Any()).\n\t\tReturn(uint64(4), nil).Times(2)\n\tmsMock.EXPECT().Fetch(gomock.Any()).Do(func(req *store.FetchRequest) {\n\t\ta.Equal(\"fetch_request\", req.Partition)\n\t\tgo func() {\n\t\t\treq.StartC <- 2\n\n\t\t\t// block the fetch request until pushing some new messages in the router\n\t\t\treq.Push(3, []byte(strings.Replace(dummyMessageBytes, \"MESSAGE_ID\", strconv.Itoa(3), 1)))\n\t\t\treq.Push(4, []byte(strings.Replace(dummyMessageBytes, \"MESSAGE_ID\", strconv.Itoa(4), 1)))\n\t\t\treq.Done()\n\t\t}()\n\t})\n\n\tmsMock.EXPECT().StoreMessage(gomock.Any(), gomock.Any()).AnyTimes()\n\n\trouter.HandleMessage(&protocol.Message{ID: 3, Path: path, Body: []byte(\"dummy body\")})\n\trouter.HandleMessage(&protocol.Message{ID: 4, Path: path, Body: []byte(\"dummy body\")})\n\n\tdone := make(chan struct{})\n\tgo func() {\n\t\treceivedMessages := 0\n\t\tfor i := 1; i <= 4; i++ {\n\t\t\tselect {\n\t\t\tcase m, opened := <-route.MessagesChannel():\n\t\t\t\tif opened {\n\t\t\t\t\treceivedMessages++\n\t\t\t\t\ta.Equal(uint64(i), m.ID)\n\t\t\t\t}\n\t\t\tcase <-time.After(50 * time.Millisecond):\n\t\t\t\ta.Fail(fmt.Sprintf(\"Message not received: %d\", i))\n\t\t\t}\n\t\t}\n\t\ta.Equal(4, receivedMessages)\n\t\tclose(done)\n\t}()\n\tclose(block)\n\n\terr := route.Provide(router, true)\n\ta.NoError(err)\n\t<-done\n}\n\nfunc TestRoute_Provide_EndIDSubscribe(t *testing.T) {\n\tctrl, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\ta := assert.New(t)\n\tmsMock := NewMockMessageStore(ctrl)\n\trouterMock := NewMockRouter(ctrl)\n\n\trouterMock.EXPECT().MessageStore().Return(msMock, nil)\n\n\troute := NewRoute(RouteConfig{\n\t\tPath:         protocol.Path(\"/fetch_request\"),\n\t\tChannelSize:  5,\n\t\tFetchRequest: store.NewFetchRequest(\"\", 8, 10, store.DirectionForward, -1),\n\t})\n\n\tmsMock.EXPECT().MaxMessageID(\"fetch_request\").Return(uint64(12), nil).Times(2)\n\n\trouterMock.EXPECT().Done().Return(make(chan bool)).AnyTimes()\n\trouterMock.EXPECT().Fetch(gomock.Any()).Do(func(req *store.FetchRequest) {\n\t\ta.Equal(req.Partition, \"fetch_request\")\n\t\ta.Equal(uint64(8), req.StartID)\n\t\ta.Equal(uint64(10), req.EndID)\n\t\ta.Equal(store.DirectionForward, req.Direction)\n\n\t\tgo func() {\n\t\t\treq.StartC <- 3\n\n\t\t\t// send the messages\n\t\t\treq.Push(8, []byte(strings.Replace(dummyMessageBytes, \"MESSAGE_ID\", strconv.Itoa(8), 1)))\n\t\t\treq.Push(9, []byte(strings.Replace(dummyMessageBytes, \"MESSAGE_ID\", strconv.Itoa(9), 1)))\n\t\t\treq.Push(10, []byte(strings.Replace(dummyMessageBytes, \"MESSAGE_ID\", strconv.Itoa(10), 1)))\n\t\t\treq.Done()\n\t\t}()\n\t})\n\n\trouterMock.EXPECT().Subscribe(gomock.Eq(route)).Return(route, nil)\n\n\tdone := make(chan struct{})\n\tgo func() {\n\t\treceivedMessages := 0\n\t\tfor i := 8; i <= 10; i++ {\n\t\t\tselect {\n\t\t\tcase m, opened := <-route.MessagesChannel():\n\t\t\t\tif opened {\n\t\t\t\t\treceivedMessages++\n\t\t\t\t\ta.Equal(uint64(i), m.ID)\n\t\t\t\t}\n\t\t\tcase <-time.After(50 * time.Millisecond):\n\t\t\t\ta.Fail(\"Message not received\")\n\t\t\t}\n\t\t}\n\t\ta.Equal(3, receivedMessages)\n\t\tclose(done)\n\t}()\n\n\terr := route.Provide(routerMock, true)\n\ta.NoError(err)\n\t<-done\n}\n"
  },
  {
    "path": "server/router/router.go",
    "content": "package router\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\n\tlog \"github.com/Sirupsen/logrus\"\n\t\"github.com/docker/distribution/health\"\n\n\t\"encoding/json\"\n\n\t\"net/http\"\n\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/auth\"\n\t\"github.com/smancke/guble/server/cluster\"\n\t\"github.com/smancke/guble/server/kvstore\"\n\t\"github.com/smancke/guble/server/store\"\n)\n\nconst (\n\toverloadedHandleChannelRatio = 0.9\n\thandleChannelCapacity        = 500\n\tsubscribeChannelCapacity     = 10\n\tunsubscribeChannelCapacity   = 10\n\tprefix                       = \"/admin/router\"\n)\n\n// Router interface provides a mechanism for PubSub messaging\ntype Router interface {\n\tSubscribe(r *Route) (*Route, error)\n\tUnsubscribe(r *Route)\n\tHandleMessage(message *protocol.Message) error\n\tFetch(*store.FetchRequest) error\n\tGetSubscribers(topic string) ([]byte, error)\n\n\tAccessManager() (auth.AccessManager, error)\n\tMessageStore() (store.MessageStore, error)\n\tKVStore() (kvstore.KVStore, error)\n\tCluster() *cluster.Cluster\n\n\tDone() <-chan bool\n}\n\n// Helper struct to pass `Route` to subscription channel and provide a notification channel.\ntype subRequest struct {\n\troute *Route\n\tdoneC chan bool\n}\n\ntype router struct {\n\troutes       map[protocol.Path][]*Route // mapping the path to the route slice\n\thandleC      chan *protocol.Message\n\tsubscribeC   chan subRequest\n\tunsubscribeC chan subRequest\n\tstopC        chan bool      // Channel that signals stop of the router\n\tstopping     bool           // Flag: the router is in stopping process and no incoming messages are accepted\n\twg           sync.WaitGroup // Add any operation that we need to wait upon here\n\n\taccessManager auth.AccessManager\n\tmessageStore  store.MessageStore\n\tkvStore       kvstore.KVStore\n\tcluster       *cluster.Cluster\n\n\tsync.RWMutex\n}\n\n// New returns a pointer to Router\nfunc New(accessManager auth.AccessManager, messageStore store.MessageStore, kvStore kvstore.KVStore, cluster *cluster.Cluster) Router {\n\treturn &router{\n\t\troutes: make(map[protocol.Path][]*Route),\n\n\t\thandleC:      make(chan *protocol.Message, handleChannelCapacity),\n\t\tsubscribeC:   make(chan subRequest, subscribeChannelCapacity),\n\t\tunsubscribeC: make(chan subRequest, unsubscribeChannelCapacity),\n\t\tstopC:        make(chan bool, 1),\n\n\t\taccessManager: accessManager,\n\t\tmessageStore:  messageStore,\n\t\tkvStore:       kvStore,\n\t\tcluster:       cluster,\n\t}\n}\n\nfunc (router *router) Start() error {\n\trouter.panicIfInternalDependenciesAreNil()\n\tlogger.Info(\"Starting router\")\n\tresetRouterMetrics()\n\n\trouter.wg.Add(1)\n\trouter.setStopping(false)\n\n\tgo func() {\n\t\tfor {\n\t\t\tif router.stopping && router.channelsAreEmpty() {\n\t\t\t\trouter.closeRoutes()\n\t\t\t\trouter.wg.Done()\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfunc() {\n\t\t\t\tdefer protocol.PanicLogger()\n\n\t\t\t\tselect {\n\t\t\t\tcase message := <-router.handleC:\n\t\t\t\t\trouter.handleMessage(message)\n\t\t\t\t\truntime.Gosched()\n\t\t\t\tcase subscriber := <-router.subscribeC:\n\t\t\t\t\trouter.subscribe(subscriber.route)\n\t\t\t\t\tsubscriber.doneC <- true\n\t\t\t\tcase unsubscriber := <-router.unsubscribeC:\n\t\t\t\t\trouter.unsubscribe(unsubscriber.route)\n\t\t\t\t\tunsubscriber.doneC <- true\n\t\t\t\tcase <-router.Done():\n\t\t\t\t\trouter.setStopping(true)\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}()\n\n\treturn nil\n}\n\n// Stop stops the router by closing the stop channel, and waiting on the WaitGroup\nfunc (router *router) Stop() error {\n\tlogger.Info(\"Stopping router\")\n\n\trouter.stopC <- true\n\trouter.wg.Wait()\n\treturn nil\n}\n\nfunc (router *router) Check() error {\n\tif router.accessManager == nil || router.messageStore == nil || router.kvStore == nil {\n\t\tlogger.WithError(ErrServiceNotProvided).Error(\"Some mandatory services are not provided\")\n\t\treturn ErrServiceNotProvided\n\t}\n\tif checkable, ok := router.messageStore.(health.Checker); ok {\n\t\terr := checkable.Check()\n\t\tif err != nil {\n\t\t\tlogger.WithField(\"error\", err.Error()).Error(\"MessageStore check failed\")\n\t\t\treturn err\n\t\t}\n\t}\n\tif checkable, ok := router.kvStore.(health.Checker); ok {\n\t\terr := checkable.Check()\n\t\tif err != nil {\n\t\t\tlogger.WithField(\"error\", err.Error()).Error(\"KVStore check failed\")\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n// HandleMessage stores the message in the MessageStore(and gets a new ID for it if the message was created locally)\n// and then passes it to the internal channel, and asynchronously to the cluster (if available).\nfunc (router *router) HandleMessage(message *protocol.Message) error {\n\tlogger.WithFields(log.Fields{\n\t\t\"userID\": message.UserID,\n\t\t\"path\":   message.Path}).Debug(\"HandleMessage\")\n\n\tmTotalMessagesIncoming.Add(1)\n\tif err := router.isStopping(); err != nil {\n\t\tlogger.WithField(\"error\", err.Error()).Error(\"Router is stopping\")\n\t\treturn err\n\t}\n\n\tif !router.accessManager.IsAllowed(auth.WRITE, message.UserID, message.Path) {\n\t\treturn &PermissionDeniedError{UserID: message.UserID, AccessType: auth.WRITE, Path: message.Path}\n\t}\n\n\tvar nodeID uint8\n\tif router.cluster != nil {\n\t\tnodeID = router.cluster.Config.ID\n\t}\n\n\tmTotalMessagesIncomingBytes.Add(int64(len(message.Bytes())))\n\tsize, err := router.messageStore.StoreMessage(message, nodeID)\n\tif err != nil {\n\t\tlogger.WithField(\"error\", err.Error()).Error(\"Error storing message\")\n\t\tmTotalMessageStoreErrors.Add(1)\n\t\treturn err\n\t}\n\tmTotalMessagesStoredBytes.Add(int64(size))\n\n\trouter.handleOverloadedChannel()\n\n\trouter.handleC <- message\n\n\tif router.cluster != nil && message.NodeID == router.cluster.Config.ID {\n\t\tgo router.cluster.BroadcastMessage(message)\n\t}\n\n\treturn nil\n}\n\nfunc (router *router) Subscribe(r *Route) (*Route, error) {\n\tlogger.WithFields(log.Fields{\n\t\t\"accessManager\": router.accessManager,\n\t\t\"route\":         r,\n\t}).Debug(\"Subscribe\")\n\n\tif err := router.isStopping(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tuserID := r.Get(\"user_id\")\n\troutePath := r.Path\n\n\taccessAllowed := router.accessManager.IsAllowed(auth.READ, userID, routePath)\n\tif !accessAllowed {\n\t\treturn r, &PermissionDeniedError{UserID: userID, AccessType: auth.READ, Path: routePath}\n\t}\n\treq := subRequest{\n\t\troute: r,\n\t\tdoneC: make(chan bool),\n\t}\n\n\trouter.subscribeC <- req\n\t<-req.doneC\n\treturn r, nil\n}\n\n// Subscribe adds a route to the subscribers. If there is already a route with same Application Id and Path, it will be replaced.\nfunc (router *router) Unsubscribe(r *Route) {\n\tlogger.WithFields(log.Fields{\n\t\t\"accessManager\": router.accessManager,\n\t\t\"route\":         r,\n\t}).Debug(\"Unsubscribe\")\n\n\treq := subRequest{\n\t\troute: r,\n\t\tdoneC: make(chan bool),\n\t}\n\trouter.unsubscribeC <- req\n\t<-req.doneC\n}\n\nfunc (router *router) GetSubscribers(topicPath string) ([]byte, error) {\n\tsubscribers := make([]RouteParams, 0)\n\troutes, present := router.routes[protocol.Path(topicPath)]\n\tif present {\n\t\tfor index, currRoute := range routes {\n\t\t\tlogger.WithFields(log.Fields{\n\t\t\t\t\"index\":       index,\n\t\t\t\t\"routeParams\": currRoute.RouteParams,\n\t\t\t}).Debug(\"Added route to slice\")\n\t\t\tsubscribers = append(subscribers, currRoute.RouteParams)\n\t\t}\n\t}\n\treturn json.Marshal(subscribers)\n}\n\nfunc (router *router) subscribe(r *Route) {\n\tlogger.WithField(\"route\", r).Debug(\"Internal subscribe\")\n\tmTotalSubscriptionAttempts.Add(1)\n\n\troutePath := r.Path\n\tslice, present := router.routes[routePath]\n\tvar removed bool\n\tif present {\n\t\t// Try to remove, to avoid double subscriptions of the same app\n\t\tslice, removed = removeIfMatching(slice, r)\n\t} else {\n\t\t// Path not present yet. Initialize the slice\n\t\tslice = make([]*Route, 0, 1)\n\t\trouter.routes[routePath] = slice\n\t\tmCurrentRoutes.Add(1)\n\t}\n\trouter.routes[routePath] = append(slice, r)\n\tif removed {\n\t\tmTotalDuplicateSubscriptionsAttempts.Add(1)\n\t} else {\n\t\tmTotalSubscriptions.Add(1)\n\t\tmCurrentSubscriptions.Add(1)\n\t}\n}\n\nfunc (router *router) unsubscribe(r *Route) {\n\tlogger.WithField(\"route\", r).Debug(\"Internal unsubscribe\")\n\tmTotalUnsubscriptionAttempts.Add(1)\n\n\troutePath := r.Path\n\tslice, present := router.routes[routePath]\n\tif !present {\n\t\tmTotalInvalidTopicOnUnsubscriptionAttempts.Add(1)\n\t\treturn\n\t}\n\tvar removed bool\n\trouter.routes[routePath], removed = removeIfMatching(slice, r)\n\tif removed {\n\t\tmTotalUnsubscriptions.Add(1)\n\t\tmCurrentSubscriptions.Add(-1)\n\t} else {\n\t\tmTotalInvalidUnsubscriptionAttempts.Add(1)\n\t}\n\tif len(router.routes[routePath]) == 0 {\n\t\tdelete(router.routes, routePath)\n\t\tmCurrentRoutes.Add(-1)\n\t}\n}\n\nfunc (router *router) panicIfInternalDependenciesAreNil() {\n\tif router.accessManager == nil || router.kvStore == nil || router.messageStore == nil {\n\t\tpanic(fmt.Sprintf(\"router: the internal dependencies marked with `true` are not set: AccessManager=%v, KVStore=%v, MessageStore=%v\",\n\t\t\trouter.accessManager == nil, router.kvStore == nil, router.messageStore == nil))\n\t}\n}\n\nfunc (router *router) channelsAreEmpty() bool {\n\treturn len(router.handleC) == 0 && len(router.subscribeC) == 0 && len(router.unsubscribeC) == 0\n}\n\nfunc (router *router) setStopping(v bool) {\n\trouter.Lock()\n\tdefer router.Unlock()\n\n\trouter.stopping = v\n}\n\nfunc (router *router) Done() <-chan bool {\n\treturn router.stopC\n}\n\nfunc (router *router) isStopping() error {\n\trouter.RLock()\n\tdefer router.RUnlock()\n\n\tif router.stopping {\n\t\treturn &ModuleStoppingError{\"Router\"}\n\t}\n\n\treturn nil\n}\n\nfunc (router *router) handleMessage(message *protocol.Message) {\n\tflog := logger.WithFields(log.Fields{\n\t\t\"topic\":    message.Path,\n\t\t\"metadata\": message.Metadata(),\n\t\t\"filters\":  message.Filters,\n\t})\n\tflog.Debug(\"Called routeMessage for data\")\n\tmTotalMessagesRouted.Add(1)\n\n\tmatched := false\n\tfor path, pathRoutes := range router.routes {\n\t\tif matchesTopic(message.Path, path) {\n\t\t\tmatched = true\n\t\t\tfor _, route := range pathRoutes {\n\t\t\t\tif err := route.Deliver(message, false); err == ErrInvalidRoute {\n\t\t\t\t\t// Unsubscribe invalid routes\n\t\t\t\t\trouter.unsubscribe(route)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif !matched {\n\t\tflog.Debug(\"No route matched.\")\n\t\tmTotalMessagesNotMatchingTopic.Add(1)\n\t}\n}\n\nfunc (router *router) closeRoutes() {\n\tlogger.Debug(\"closeRoutes\")\n\n\tfor _, currentRouteList := range router.routes {\n\t\tfor _, route := range currentRouteList {\n\t\t\trouter.unsubscribe(route)\n\t\t\tlog.WithFields(log.Fields{\"module\": \"router\", \"route\": route.String()}).Debug(\"Closing route\")\n\t\t\troute.Close()\n\t\t}\n\t}\n}\n\nfunc (router *router) handleOverloadedChannel() {\n\tif float32(len(router.handleC))/float32(cap(router.handleC)) > overloadedHandleChannelRatio {\n\t\tlogger.WithFields(log.Fields{\n\t\t\t\"currentLength\": len(router.handleC),\n\t\t\t\"maxCapacity\":   cap(router.handleC),\n\t\t}).Warn(\"handleC channel is almost full\")\n\t\tmTotalOverloadedHandleChannel.Add(1)\n\t}\n}\n\n// matchesTopic checks whether the supplied routePath matches the message topic\nfunc matchesTopic(messagePath, routePath protocol.Path) bool {\n\tmessagePathLen := len(string(messagePath))\n\troutePathLen := len(string(routePath))\n\treturn strings.HasPrefix(string(messagePath), string(routePath)) &&\n\t\t(messagePathLen == routePathLen ||\n\t\t\t(messagePathLen > routePathLen && string(messagePath)[routePathLen] == '/'))\n}\n\n// removeIfMatching removes a route from the supplied list, based on same ApplicationID id and same path (if existing)\n// returns: the (possibly updated) slide, and a boolean value (true if route was removed, false otherwise)\nfunc removeIfMatching(slice []*Route, route *Route) ([]*Route, bool) {\n\tposition := -1\n\tfor p, r := range slice {\n\t\tif r.Equal(route) {\n\t\t\tposition = p\n\t\t\tbreak\n\t\t}\n\t}\n\tif position == -1 {\n\t\treturn slice, false\n\t}\n\treturn append(slice[:position], slice[position+1:]...), true\n}\n\nfunc (router *router) Fetch(req *store.FetchRequest) error {\n\tlogger.Debug(\"Fetch\")\n\tif err := router.isStopping(); err != nil {\n\t\treturn err\n\t}\n\trouter.messageStore.Fetch(req)\n\treturn nil\n}\n\n// AccessManager returns the `accessManager` provided for the router\nfunc (router *router) AccessManager() (auth.AccessManager, error) {\n\tif router.accessManager == nil {\n\t\treturn nil, ErrServiceNotProvided\n\t}\n\treturn router.accessManager, nil\n}\n\n// MessageStore returns the `messageStore` provided for the router\nfunc (router *router) MessageStore() (store.MessageStore, error) {\n\tif router.messageStore == nil {\n\t\treturn nil, ErrServiceNotProvided\n\t}\n\treturn router.messageStore, nil\n}\n\n// KVStore returns the `kvStore` provided for the router\nfunc (router *router) KVStore() (kvstore.KVStore, error) {\n\tif router.kvStore == nil {\n\t\treturn nil, ErrServiceNotProvided\n\t}\n\treturn router.kvStore, nil\n}\n\n// Cluster returns the `cluster` provided for the router, or nil if no cluster was set-up\nfunc (router *router) Cluster() *cluster.Cluster {\n\treturn router.cluster\n}\n\nfunc (router *router) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\tif req.Method != http.MethodGet {\n\t\thttp.Error(w, `{\"error\": Error method not allowed.Only HTTP GET is accepted}`, http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\terr := json.NewEncoder(w).Encode(router.routes)\n\tif err != nil {\n\t\thttp.Error(w, `{\"error\":Error encoding data.}`, http.StatusInternalServerError)\n\t\tlogger.WithField(\"error\", err.Error()).Error(\"Error encoding data.\")\n\t\treturn\n\t}\n}\n\nfunc (router *router) GetPrefix() string {\n\treturn prefix\n}\n"
  },
  {
    "path": "server/router/router_metrics.go",
    "content": "package router\n\nimport (\n\t\"github.com/smancke/guble/server/metrics\"\n)\n\nvar (\n\tmTotalSubscriptionAttempts                 = metrics.NewInt(\"router.total_subscription_attempts\")\n\tmTotalDuplicateSubscriptionsAttempts       = metrics.NewInt(\"router.total_subscription_attempts_duplicate\")\n\tmTotalSubscriptions                        = metrics.NewInt(\"router.total_subscriptions\")\n\tmTotalUnsubscriptionAttempts               = metrics.NewInt(\"router.total_unsubscription_attempts\")\n\tmTotalInvalidTopicOnUnsubscriptionAttempts = metrics.NewInt(\"router.total_unsubscription_attempts_invalid_topic\")\n\tmTotalInvalidUnsubscriptionAttempts        = metrics.NewInt(\"router.total_unsubscription_attempts_invalid\")\n\tmTotalUnsubscriptions                      = metrics.NewInt(\"router.total_unsubscriptions\")\n\tmCurrentSubscriptions                      = metrics.NewInt(\"router.current_subscriptions\")\n\tmCurrentRoutes                             = metrics.NewInt(\"router.current_routes\")\n\tmTotalMessagesIncoming                     = metrics.NewInt(\"router.total_messages_incoming\")\n\tmTotalMessagesIncomingBytes                = metrics.NewInt(\"router.total_messages_bytes_incoming\")\n\tmTotalMessagesStoredBytes                  = metrics.NewInt(\"router.total_messages_bytes_stored\")\n\tmTotalMessagesRouted                       = metrics.NewInt(\"router.total_messages_routed\")\n\tmTotalOverloadedHandleChannel              = metrics.NewInt(\"router.total_overloaded_handle_channel\")\n\tmTotalMessagesNotMatchingTopic             = metrics.NewInt(\"router.total_messages_not_matching_topic\")\n\tmTotalMessageStoreErrors                   = metrics.NewInt(\"router.total_errors_message_store\")\n\tmTotalDeliverMessageErrors                 = metrics.NewInt(\"router.total_errors_deliver_message\")\n\tmTotalNotMatchedByFilters                  = metrics.NewInt(\"router.total_not_matched_by_filters\")\n)\n\nfunc resetRouterMetrics() {\n\tmTotalSubscriptionAttempts.Set(0)\n\tmTotalDuplicateSubscriptionsAttempts.Set(0)\n\tmTotalSubscriptions.Set(0)\n\tmTotalUnsubscriptionAttempts.Set(0)\n\tmTotalInvalidTopicOnUnsubscriptionAttempts.Set(0)\n\tmTotalUnsubscriptions.Set(0)\n\tmTotalInvalidUnsubscriptionAttempts.Set(0)\n\tmCurrentSubscriptions.Set(0)\n\tmCurrentRoutes.Set(0)\n\tmTotalMessagesIncoming.Set(0)\n\tmTotalMessagesRouted.Set(0)\n\tmTotalOverloadedHandleChannel.Set(0)\n\tmTotalMessagesNotMatchingTopic.Set(0)\n\tmTotalDeliverMessageErrors.Set(0)\n\tmTotalMessageStoreErrors.Set(0)\n\tmTotalMessagesIncomingBytes.Set(0)\n\tmTotalMessagesStoredBytes.Set(0)\n\tmTotalNotMatchedByFilters.Set(0)\n}\n"
  },
  {
    "path": "server/router/router_test.go",
    "content": "package router\n\nimport (\n\t\"errors\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/auth\"\n\t\"github.com/smancke/guble/server/kvstore\"\n\t\"github.com/smancke/guble/server/store\"\n\t\"github.com/smancke/guble/server/store/dummystore\"\n\t\"github.com/smancke/guble/testutil\"\n\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\nvar aTestByteMessage = []byte(\"Hello World!\")\n\ntype msChecker struct {\n\t*MockMessageStore\n\t*MockChecker\n}\n\nfunc newMSChecker() *msChecker {\n\treturn &msChecker{\n\t\tNewMockMessageStore(testutil.MockCtrl),\n\t\tNewMockChecker(testutil.MockCtrl),\n\t}\n}\n\ntype kvsChecker struct {\n\t*MockKVStore\n\t*MockChecker\n}\n\nfunc newKVSChecker() *kvsChecker {\n\treturn &kvsChecker{\n\t\tNewMockKVStore(testutil.MockCtrl),\n\t\tNewMockChecker(testutil.MockCtrl),\n\t}\n}\n\nfunc TestRouter_AddAndRemoveRoutes(t *testing.T) {\n\ta := assert.New(t)\n\n\t// Given a Router\n\trouter, _, _, _ := aStartedRouter()\n\n\t// when i add two routes in the same path\n\trouteBlah1, _ := router.Subscribe(NewRoute(\n\t\tRouteConfig{\n\t\t\tRouteParams: RouteParams{\"application_id\": \"appid01\", \"user_id\": \"user01\"},\n\t\t\tPath:        protocol.Path(\"/blah\"),\n\t\t\tChannelSize: chanSize,\n\t\t},\n\t))\n\trouteBlah2, _ := router.Subscribe(NewRoute(\n\t\tRouteConfig{\n\t\t\tRouteParams: RouteParams{\"application_id\": \"appid02\", \"user_id\": \"user01\"},\n\t\t\tPath:        protocol.Path(\"/blah\"),\n\t\t\tChannelSize: chanSize,\n\t\t},\n\t))\n\n\t// and one route in another path\n\trouteFoo, _ := router.Subscribe(NewRoute(\n\t\tRouteConfig{\n\t\t\tRouteParams: RouteParams{\"application_id\": \"appid01\", \"user_id\": \"user01\"},\n\t\t\tPath:        protocol.Path(\"/foo\"),\n\t\t\tChannelSize: chanSize,\n\t\t},\n\t))\n\n\t// then\n\n\t// the routes are stored\n\ta.Equal(2, len(router.routes[protocol.Path(\"/blah\")]))\n\ta.True(routeBlah1.Equal(router.routes[protocol.Path(\"/blah\")][0]))\n\ta.True(routeBlah2.Equal(router.routes[protocol.Path(\"/blah\")][1]))\n\n\ta.Equal(1, len(router.routes[protocol.Path(\"/foo\")]))\n\ta.True(routeFoo.Equal(router.routes[protocol.Path(\"/foo\")][0]))\n\n\t// when i remove routes\n\trouter.Unsubscribe(routeBlah1)\n\trouter.Unsubscribe(routeFoo)\n\n\t// then they are gone\n\ta.Equal(1, len(router.routes[protocol.Path(\"/blah\")]))\n\ta.True(routeBlah2.Equal(router.routes[protocol.Path(\"/blah\")][0]))\n\n\ta.Nil(router.routes[protocol.Path(\"/foo\")])\n}\n\nfunc TestRouter_SubscribeNotAllowed(t *testing.T) {\n\tctrl, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\ta := assert.New(t)\n\n\tam := NewMockAccessManager(ctrl)\n\tmsMock := NewMockMessageStore(ctrl)\n\tkvsMock := NewMockKVStore(ctrl)\n\n\tam.EXPECT().IsAllowed(auth.READ, \"user01\", protocol.Path(\"/blah\")).Return(false)\n\n\trouter := New(am, msMock, kvsMock, nil).(*router)\n\trouter.Start()\n\n\t_, e := router.Subscribe(NewRoute(\n\t\tRouteConfig{\n\t\t\tRouteParams: RouteParams{\"application_id\": \"appid01\", \"user_id\": \"user01\"},\n\t\t\tPath:        protocol.Path(\"/blah\"),\n\t\t\tChannelSize: chanSize,\n\t\t},\n\t))\n\n\t// default TestAccessManager denies all\n\ta.NotNil(e)\n\n\t// now add permissions\n\tam.EXPECT().IsAllowed(auth.READ, \"user01\", protocol.Path(\"/blah\")).Return(true)\n\n\t// and user shall be allowed to subscribe\n\t_, e = router.Subscribe(NewRoute(\n\t\tRouteConfig{\n\t\t\tRouteParams: RouteParams{\"application_id\": \"appid01\", \"user_id\": \"user01\"},\n\t\t\tPath:        protocol.Path(\"/blah\"),\n\t\t\tChannelSize: chanSize,\n\t\t},\n\t))\n\n\ta.Nil(e)\n}\n\nfunc TestRouter_HandleMessageNotAllowed(t *testing.T) {\n\tctrl, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\ta := assert.New(t)\n\n\tamMock := NewMockAccessManager(ctrl)\n\tmsMock := NewMockMessageStore(ctrl)\n\tkvsMock := NewMockKVStore(ctrl)\n\n\t// Given a Router with route\n\trouter, r := aRouterRoute(chanSize)\n\trouter.accessManager = amMock\n\trouter.messageStore = msMock\n\trouter.kvStore = kvsMock\n\n\tamMock.EXPECT().IsAllowed(auth.WRITE, r.Get(\"user_id\"), r.Path).Return(false)\n\n\t// when i send a message to the route\n\terr := router.HandleMessage(&protocol.Message{\n\t\tPath:   r.Path,\n\t\tBody:   aTestByteMessage,\n\t\tUserID: r.Get(\"user_id\"),\n\t})\n\n\t// an error shall be returned\n\ta.Error(err)\n\n\t// and when permission is granted\n\tid, ts := uint64(2), time.Now().Unix()\n\n\tamMock.EXPECT().IsAllowed(auth.WRITE, r.Get(\"user_id\"), r.Path).Return(true)\n\tmsMock.EXPECT().\n\t\tStoreMessage(gomock.Any(), gomock.Any()).\n\t\tDo(func(m *protocol.Message, nodeID uint8) (int, error) {\n\t\t\tm.ID = id\n\t\t\tm.Time = ts\n\t\t\tm.NodeID = nodeID\n\t\t\treturn len(m.Bytes()), nil\n\t\t})\n\n\t// sending message\n\terr = router.HandleMessage(&protocol.Message{\n\t\tPath:   r.Path,\n\t\tBody:   aTestByteMessage,\n\t\tUserID: r.Get(\"user_id\"),\n\t})\n\n\t// shall give no error\n\ta.NoError(err)\n}\n\nfunc TestRouter_ReplacingOfRoutesMatchingAppID(t *testing.T) {\n\ta := assert.New(t)\n\n\t// Given a Router with a route\n\trouter, _, _, _ := aStartedRouter()\n\n\tmatcherFunc := func(route, other RouteConfig, keys ...string) bool {\n\t\treturn route.Path == other.Path && route.Get(\"application_id\") == other.Get(\"application_id\")\n\t}\n\trouter.Subscribe(NewRoute(\n\t\tRouteConfig{\n\t\t\tRouteParams: RouteParams{\"application_id\": \"appid01\", \"user_id\": \"user01\"},\n\t\t\tPath:        protocol.Path(\"/blah\"),\n\t\t\tMatcher:     matcherFunc,\n\t\t},\n\t))\n\n\t// when: i add another route with the same Application Id and Same Path\n\trouter.Subscribe(NewRoute(\n\t\tRouteConfig{\n\t\t\tRouteParams: RouteParams{\"application_id\": \"appid01\", \"user_id\": \"newUserId\"},\n\t\t\tPath:        protocol.Path(\"/blah\"),\n\t\t\tMatcher:     matcherFunc,\n\t\t},\n\t))\n\n\t// then: the router only contains the new route\n\ta.Equal(1, len(router.routes))\n\ta.Equal(1, len(router.routes[\"/blah\"]))\n\ta.Equal(\"newUserId\", router.routes[\"/blah\"][0].Get(\"user_id\"))\n}\n\nfunc TestRouter_SimpleMessageSending(t *testing.T) {\n\tctrl, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\ta := assert.New(t)\n\n\t// Given a Router with route\n\trouter, r := aRouterRoute(chanSize)\n\tmsMock := NewMockMessageStore(ctrl)\n\trouter.messageStore = msMock\n\n\tid, ts := uint64(2), time.Now().Unix()\n\tmsMock.EXPECT().\n\t\tStoreMessage(gomock.Any(), gomock.Any()).\n\t\tDo(func(m *protocol.Message, nodeID uint8) (int, error) {\n\t\t\tm.ID = id\n\t\t\tm.Time = ts\n\t\t\tm.NodeID = nodeID\n\t\t\treturn len(m.Bytes()), nil\n\t\t})\n\n\t// when i send a message to the route\n\trouter.HandleMessage(&protocol.Message{Path: r.Path, Body: aTestByteMessage})\n\n\t// then I can receive it a short time later\n\tassertChannelContainsMessage(a, r.MessagesChannel(), aTestByteMessage)\n}\n\nfunc TestRouter_RoutingWithSubTopics(t *testing.T) {\n\tctrl, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\ta := assert.New(t)\n\n\t// Given a Router with route\n\trouter, _, _, _ := aStartedRouter()\n\n\tmsMock := NewMockMessageStore(ctrl)\n\trouter.messageStore = msMock\n\t// expect a message to `blah` partition first and `blahblub` second\n\tfirstStore := msMock.EXPECT().\n\t\tStoreMessage(gomock.Any(), gomock.Any()).\n\t\tDo(func(m *protocol.Message, nodeID uint8) (int, error) {\n\t\t\ta.Equal(\"/blah/blub\", string(m.Path))\n\t\t\treturn 0, nil\n\t\t})\n\n\tmsMock.EXPECT().\n\t\tStoreMessage(gomock.Any(), gomock.Any()).After(firstStore).\n\t\tDo(func(m *protocol.Message, nodeID uint8) (int, error) {\n\t\t\ta.Equal(\"/blahblub\", string(m.Path))\n\t\t\treturn 0, nil\n\t\t})\n\n\tr, _ := router.Subscribe(NewRoute(\n\t\tRouteConfig{\n\t\t\tRouteParams: RouteParams{\"application_id\": \"appid01\", \"user_id\": \"user01\"},\n\t\t\tPath:        protocol.Path(\"/blah\"),\n\t\t\tChannelSize: chanSize,\n\t\t},\n\t))\n\n\t// when i send a message to a subroute\n\trouter.HandleMessage(&protocol.Message{Path: \"/blah/blub\", Body: aTestByteMessage})\n\n\t// then I can receive the message\n\tassertChannelContainsMessage(a, r.MessagesChannel(), aTestByteMessage)\n\n\t// but, when i send a message to a resource, which is just a substring\n\trouter.HandleMessage(&protocol.Message{Path: \"/blahblub\", Body: aTestByteMessage})\n\n\t// then the message gets not delivered\n\ta.Equal(0, len(r.MessagesChannel()))\n}\n\nfunc TestMatchesTopic(t *testing.T) {\n\tfor _, test := range []struct {\n\t\tmessagePath protocol.Path\n\t\troutePath   protocol.Path\n\t\tmatches     bool\n\t}{\n\t\t{\"/foo\", \"/foo\", true},\n\t\t{\"/foo/xyz\", \"/foo\", true},\n\t\t{\"/foo\", \"/bar\", false},\n\t\t{\"/fooxyz\", \"/foo\", false},\n\t\t{\"/foo\", \"/bar/xyz\", false},\n\t} {\n\t\tif !test.matches == matchesTopic(test.messagePath, test.routePath) {\n\t\t\tt.Errorf(\"error: expected %v, but: matchesTopic(%q, %q) = %v\",\n\t\t\t\ttest.matches, test.messagePath, test.routePath, matchesTopic(test.messagePath, test.routePath))\n\t\t}\n\t}\n}\n\nfunc TestRoute_IsRemovedIfChannelIsFull(t *testing.T) {\n\tctrl, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\ta := assert.New(t)\n\n\t// Given a Router with route\n\trouter, r := aRouterRoute(chanSize)\n\tr.timeout = 5 * time.Millisecond\n\n\tmsMock := NewMockMessageStore(ctrl)\n\trouter.messageStore = msMock\n\n\tmsMock.EXPECT().\n\t\tStoreMessage(gomock.Any(), gomock.Any()).\n\t\tDo(func(m *protocol.Message, nodeID uint8) (int, error) {\n\t\t\ta.Equal(r.Path, m.Path)\n\t\t\treturn 0, nil\n\t\t}).MaxTimes(chanSize + 1)\n\n\t// where the channel is full of messages\n\tfor i := 0; i < chanSize; i++ {\n\t\trouter.HandleMessage(&protocol.Message{Path: r.Path, Body: aTestByteMessage})\n\t}\n\n\t// when I send one more message\n\tdone := make(chan bool)\n\tgo func() {\n\t\trouter.HandleMessage(&protocol.Message{Path: r.Path, Body: aTestByteMessage})\n\t\tdone <- true\n\t}()\n\n\t// then: it returns immediately\n\tselect {\n\tcase <-done:\n\tcase <-time.After(time.Millisecond * 10):\n\t\ta.Fail(\"Not returning!\")\n\t}\n\n\ttime.Sleep(time.Millisecond)\n\n\t// fetch messages from the channel\n\tfor i := 0; i < chanSize; i++ {\n\t\tselect {\n\t\tcase _, open := <-r.MessagesChannel():\n\t\t\ta.True(open)\n\t\tcase <-time.After(time.Millisecond * 10):\n\t\t\ta.Fail(\"error not enough messages in channel\")\n\t\t}\n\t}\n\n\t// and the channel is closed\n\tselect {\n\tcase _, open := <-r.MessagesChannel():\n\t\ta.False(open)\n\tdefault:\n\t\tlogger.Debug(\"len(r.C): %v\", len(r.MessagesChannel()))\n\t\ta.Fail(\"channel was not closed\")\n\t}\n}\n\n// Router should handle the buffered messages also after the closing of the route\nfunc TestRouter_CleanShutdown(t *testing.T) {\n\tctrl, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\ta := assert.New(t)\n\n\tvar ID uint64\n\n\tmsMock := NewMockMessageStore(ctrl)\n\tmsMock.EXPECT().Store(\"blah\", gomock.Any(), gomock.Any()).\n\t\tReturn(nil).\n\t\tDo(func(partition string, callback func(msgID uint64) []byte) error {\n\t\t\tID++\n\t\t\tcallback(ID)\n\t\t\treturn nil\n\t\t}).\n\t\tAnyTimes()\n\n\trouter, _, _, _ := aStartedRouter()\n\trouter.messageStore = msMock\n\n\troute, err := router.Subscribe(NewRoute(\n\t\tRouteConfig{\n\t\t\tRouteParams: RouteParams{\"application_id\": \"appid01\", \"user_id\": \"user01\"},\n\t\t\tPath:        protocol.Path(\"/blah\"),\n\t\t\tChannelSize: 3,\n\t\t},\n\t))\n\ta.Nil(err)\n\n\tdoneC := make(chan bool)\n\n\t// read the messages until done is closed\n\tgo func() {\n\t\tfor {\n\t\t\t_, ok := <-route.MessagesChannel()\n\t\t\tselect {\n\t\t\tcase <-doneC:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\ta.True(ok)\n\t\t\t}\n\t\t}\n\t}()\n\n\t// Send messages in the router until error\n\tgo func() {\n\t\tfor {\n\t\t\terrHandle := router.HandleMessage(&protocol.Message{\n\t\t\t\tPath: protocol.Path(\"/blah\"),\n\t\t\t\tBody: aTestByteMessage,\n\t\t\t})\n\n\t\t\tif errHandle != nil {\n\t\t\t\tmse, ok := errHandle.(*ModuleStoppingError)\n\t\t\t\ta.True(ok)\n\t\t\t\ta.Equal(\"Router\", mse.Name)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// if doneC channel has been closed and no error then we must fail the test\n\t\t\tselect {\n\t\t\tcase _, ok := <-doneC:\n\t\t\t\tif !ok {\n\t\t\t\t\ta.Fail(\"Expected error from router handle message\")\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t}()\n\n\tclose(doneC)\n\terr = router.Stop()\n\ta.Nil(err)\n\n\t// wait for above goroutine to finish\n\t<-time.After(100 * time.Millisecond)\n}\n\nfunc TestRouter_Check(t *testing.T) {\n\tctrl, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\ta := assert.New(t)\n\n\tamMock := NewMockAccessManager(ctrl)\n\tmsMock := NewMockMessageStore(ctrl)\n\tkvsMock := NewMockKVStore(ctrl)\n\tmsCheckerMock := newMSChecker()\n\tkvsCheckerMock := newKVSChecker()\n\n\t// Given a Multiplexer with route\n\trouter, _, _, _ := aStartedRouter()\n\n\t// Test 0: Router is healthy by default\n\ta.Nil(router.Check())\n\n\t// Test 1a: Given accessManager is nil, then router's Check returns error\n\trouter.accessManager = nil\n\trouter.messageStore = msMock\n\trouter.kvStore = kvsMock\n\ta.NotNil(router.Check())\n\n\t// Test 1b: Given messageStore is nil, then router's Check returns error\n\trouter.accessManager = amMock\n\trouter.messageStore = nil\n\trouter.kvStore = kvsMock\n\ta.NotNil(router.Check())\n\n\t// Test 1c: Given kvStore is nil, then router's Check return error\n\trouter.accessManager = amMock\n\trouter.messageStore = msMock\n\trouter.kvStore = nil\n\ta.NotNil(router.Check())\n\n\t// Test 2: Given mocked store dependencies, both healthy\n\trouter.accessManager = amMock\n\trouter.messageStore = msCheckerMock\n\trouter.kvStore = kvsCheckerMock\n\n\tmsCheckerMock.MockChecker.EXPECT().Check().Return(nil)\n\tkvsCheckerMock.MockChecker.EXPECT().Check().Return(nil)\n\n\t// Then the aggregated router health check will return \"no error\" / nil\n\ta.Nil(router.Check())\n\n\t// Test 3: Given a mocked messageStore which returns error on Check(),\n\t// Then router's aggregated Check() should return error\n\tmsCheckerMock.MockChecker.EXPECT().Check().Return(errors.New(\"Storage is almost full\"))\n\ta.NotNil(router.Check())\n\n\t// Test 4: Given a mocked kvStore which returns an error on Check()\n\t// and a healthy messageStore,\n\t// Then router's aggregated Check should return error\n\tmsCheckerMock.MockChecker.EXPECT().Check().Return(nil)\n\tkvsCheckerMock.MockChecker.EXPECT().Check().Return(errors.New(\"DB closed\"))\n\ta.NotNil(router.Check())\n}\n\nfunc TestPanicOnInternalDependencies(t *testing.T) {\n\tdefer testutil.ExpectPanic(t)\n\trouter := New(nil, nil, nil, nil).(*router)\n\trouter.panicIfInternalDependenciesAreNil()\n}\n\nfunc aStartedRouter() (*router, auth.AccessManager, store.MessageStore, kvstore.KVStore) {\n\tam := auth.NewAllowAllAccessManager(true)\n\tkvs := kvstore.NewMemoryKVStore()\n\tms := dummystore.New(kvs)\n\trouter := New(am, ms, kvs, nil).(*router)\n\trouter.Start()\n\treturn router, am, ms, kvs\n}\n\nfunc aRouterRoute(unused int) (*router, *Route) {\n\trouter, _, _, _ := aStartedRouter()\n\troute, _ := router.Subscribe(NewRoute(\n\t\tRouteConfig{\n\t\t\tRouteParams: RouteParams{\"application_id\": \"appid01\", \"user_id\": \"user01\"},\n\t\t\tPath:        protocol.Path(\"/blah\"),\n\t\t\tChannelSize: chanSize,\n\t\t},\n\t))\n\treturn router, route\n}\n\nfunc assertChannelContainsMessage(a *assert.Assertions, c <-chan *protocol.Message, msg []byte) {\n\tselect {\n\tcase m := <-c:\n\t\ta.Equal(string(msg), string(m.Body))\n\tcase <-time.After(time.Millisecond * 5):\n\t\ta.Fail(\"No message received\")\n\t}\n}\n"
  },
  {
    "path": "server/service/logger.go",
    "content": "package service\n\nimport (\n\tlog \"github.com/Sirupsen/logrus\"\n)\n\nvar logger = log.WithFields(log.Fields{\n\t\"module\": \"service\",\n})\n"
  },
  {
    "path": "server/service/mocks_checker_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/docker/distribution/health (interfaces: Checker)\n\npackage service\n\nimport (\n\tgomock \"github.com/golang/mock/gomock\"\n)\n\n// Mock of Checker interface\ntype MockChecker struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockCheckerRecorder\n}\n\n// Recorder for MockChecker (not exported)\ntype _MockCheckerRecorder struct {\n\tmock *MockChecker\n}\n\nfunc NewMockChecker(ctrl *gomock.Controller) *MockChecker {\n\tmock := &MockChecker{ctrl: ctrl}\n\tmock.recorder = &_MockCheckerRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockChecker) EXPECT() *_MockCheckerRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockChecker) Check() error {\n\tret := _m.ctrl.Call(_m, \"Check\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockCheckerRecorder) Check() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Check\")\n}\n"
  },
  {
    "path": "server/service/mocks_router_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/smancke/guble/server/router (interfaces: Router)\n\npackage service\n\nimport (\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/auth\"\n\t\"github.com/smancke/guble/server/cluster\"\n\t\"github.com/smancke/guble/server/kvstore\"\n\t\"github.com/smancke/guble/server/router\"\n\t\"github.com/smancke/guble/server/store\"\n)\n\n// Mock of Router interface\ntype MockRouter struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockRouterRecorder\n}\n\n// Recorder for MockRouter (not exported)\ntype _MockRouterRecorder struct {\n\tmock *MockRouter\n}\n\nfunc NewMockRouter(ctrl *gomock.Controller) *MockRouter {\n\tmock := &MockRouter{ctrl: ctrl}\n\tmock.recorder = &_MockRouterRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockRouter) EXPECT() *_MockRouterRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockRouter) AccessManager() (auth.AccessManager, error) {\n\tret := _m.ctrl.Call(_m, \"AccessManager\")\n\tret0, _ := ret[0].(auth.AccessManager)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) AccessManager() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"AccessManager\")\n}\n\nfunc (_m *MockRouter) Cluster() *cluster.Cluster {\n\tret := _m.ctrl.Call(_m, \"Cluster\")\n\tret0, _ := ret[0].(*cluster.Cluster)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) Cluster() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Cluster\")\n}\n\nfunc (_m *MockRouter) Done() <-chan bool {\n\tret := _m.ctrl.Call(_m, \"Done\")\n\tret0, _ := ret[0].(<-chan bool)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) Done() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Done\")\n}\n\nfunc (_m *MockRouter) Fetch(_param0 *store.FetchRequest) error {\n\tret := _m.ctrl.Call(_m, \"Fetch\", _param0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) Fetch(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Fetch\", arg0)\n}\n\nfunc (_m *MockRouter) GetSubscribers(_param0 string) ([]byte, error) {\n\tret := _m.ctrl.Call(_m, \"GetSubscribers\", _param0)\n\tret0, _ := ret[0].([]byte)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) GetSubscribers(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"GetSubscribers\", arg0)\n}\n\nfunc (_m *MockRouter) HandleMessage(_param0 *protocol.Message) error {\n\tret := _m.ctrl.Call(_m, \"HandleMessage\", _param0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) HandleMessage(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"HandleMessage\", arg0)\n}\n\nfunc (_m *MockRouter) KVStore() (kvstore.KVStore, error) {\n\tret := _m.ctrl.Call(_m, \"KVStore\")\n\tret0, _ := ret[0].(kvstore.KVStore)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) KVStore() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"KVStore\")\n}\n\nfunc (_m *MockRouter) MessageStore() (store.MessageStore, error) {\n\tret := _m.ctrl.Call(_m, \"MessageStore\")\n\tret0, _ := ret[0].(store.MessageStore)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) MessageStore() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"MessageStore\")\n}\n\nfunc (_m *MockRouter) Subscribe(_param0 *router.Route) (*router.Route, error) {\n\tret := _m.ctrl.Call(_m, \"Subscribe\", _param0)\n\tret0, _ := ret[0].(*router.Route)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) Subscribe(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Subscribe\", arg0)\n}\n\nfunc (_m *MockRouter) Unsubscribe(_param0 *router.Route) {\n\t_m.ctrl.Call(_m, \"Unsubscribe\", _param0)\n}\n\nfunc (_mr *_MockRouterRecorder) Unsubscribe(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Unsubscribe\", arg0)\n}\n"
  },
  {
    "path": "server/service/module.go",
    "content": "package service\n\nimport (\n\t\"net/http\"\n\t\"sort\"\n)\n\n// Startable interface for modules which provide a start mechanism\ntype Startable interface {\n\tStart() error\n}\n\n// Stopable interface for modules which provide a stop mechanism\ntype Stopable interface {\n\tStop() error\n}\n\n// Endpoint adds a HTTP handler for the `GetPrefix()` to the webserver\ntype Endpoint interface {\n\thttp.Handler\n\tGetPrefix() string\n}\n\ntype module struct {\n\tiface      interface{}\n\tstartLevel int\n\tstopLevel  int\n}\n\ntype by func(m1, m2 *module) bool\n\ntype moduleSorter struct {\n\tmodules []module\n\tby      func(m1, m2 *module) bool\n}\n\nfunc (criteria by) sort(modules []module) {\n\tms := &moduleSorter{\n\t\tmodules: modules,\n\t\tby:      criteria,\n\t}\n\tsort.Sort(ms)\n}\n\n// functions implementing the sort.Interface\n\nfunc (s *moduleSorter) Len() int           { return len(s.modules) }\nfunc (s *moduleSorter) Swap(i, j int)      { s.modules[i], s.modules[j] = s.modules[j], s.modules[i] }\nfunc (s *moduleSorter) Less(i, j int) bool { return s.by(&s.modules[i], &s.modules[j]) }\n\nvar ascendingStartOrder = func(m1, m2 *module) bool {\n\treturn m1.startLevel < m2.startLevel\n}\n\nvar ascendingStopOrder = func(m1, m2 *module) bool {\n\treturn m1.stopLevel < m2.stopLevel\n}\n"
  },
  {
    "path": "server/service/service.go",
    "content": "package service\n\nimport (\n\tlog \"github.com/Sirupsen/logrus\"\n\t\"github.com/docker/distribution/health\"\n\n\t\"github.com/smancke/guble/server/metrics\"\n\t\"github.com/smancke/guble/server/router\"\n\t\"github.com/smancke/guble/server/webserver\"\n\n\t\"github.com/hashicorp/go-multierror\"\n\t\"net/http\"\n\t\"reflect\"\n\t\"time\"\n)\n\nconst (\n\tdefaultHealthFrequency = time.Second * 60\n\tdefaultHealthThreshold = 1\n)\n\n// Service is the main struct for controlling a guble server\ntype Service struct {\n\twebserver       *webserver.WebServer\n\trouter          router.Router\n\tmodules         []module\n\thealthEndpoint  string\n\thealthFrequency time.Duration\n\thealthThreshold int\n\tmetricsEndpoint string\n}\n\n// New creates a new Service, using the given Router and WebServer.\n// If the router has already a configured Cluster, it is registered as a service module.\n// The Router and Webserver are then registered as modules.\nfunc New(router router.Router, webserver *webserver.WebServer) *Service {\n\ts := &Service{\n\t\twebserver:       webserver,\n\t\trouter:          router,\n\t\thealthFrequency: defaultHealthFrequency,\n\t\thealthThreshold: defaultHealthThreshold,\n\t}\n\tcluster := router.Cluster()\n\tif cluster != nil {\n\t\ts.RegisterModules(1, 5, cluster)\n\t\trouter.Cluster().Router = router\n\t}\n\ts.RegisterModules(2, 2, s.router)\n\ts.RegisterModules(3, 4, s.webserver)\n\treturn s\n}\n\n// RegisterModules adds more modules (which can be Startable, Stopable, Endpoint etc.) to the service,\n// with their start and stop ordering across all the service's modules.\nfunc (s *Service) RegisterModules(startOrder int, stopOrder int, ifaces ...interface{}) {\n\tlogger.WithFields(log.Fields{\n\t\t\"numberOfNewModules\":      len(ifaces),\n\t\t\"numberOfExistingModules\": len(s.modules),\n\t}).Info(\"RegisterModules\")\n\n\tfor _, i := range ifaces {\n\t\tm := module{\n\t\t\tiface:      i,\n\t\t\tstartLevel: startOrder,\n\t\t\tstopLevel:  stopOrder,\n\t\t}\n\t\ts.modules = append(s.modules, m)\n\t}\n}\n\n// HealthEndpoint sets the endpoint used for health. Parameter for disabling the endpoint is: \"\". Returns the updated service.\nfunc (s *Service) HealthEndpoint(endpointPrefix string) *Service {\n\ts.healthEndpoint = endpointPrefix\n\treturn s\n}\n\n// MetricsEndpoint sets the endpoint used for metrics. Parameter for disabling the endpoint is: \"\". Returns the updated service.\nfunc (s *Service) MetricsEndpoint(endpointPrefix string) *Service {\n\ts.metricsEndpoint = endpointPrefix\n\treturn s\n}\n\n// Start checks the modules for the following interfaces and registers and/or starts:\n//   Startable:\n//   health.Checker:\n//   Endpoint: Register the handler function of the Endpoint in the http service at prefix\nfunc (s *Service) Start() error {\n\tvar multierr *multierror.Error\n\tif s.healthEndpoint != \"\" {\n\t\tlogger.WithField(\"healthEndpoint\", s.healthEndpoint).Info(\"Health endpoint\")\n\t\ts.webserver.Handle(s.healthEndpoint, http.HandlerFunc(health.StatusHandler))\n\t} else {\n\t\tlogger.Info(\"Health endpoint disabled\")\n\t}\n\tif s.metricsEndpoint != \"\" {\n\t\tlogger.WithField(\"metricsEndpoint\", s.metricsEndpoint).Info(\"Metrics endpoint\")\n\t\ts.webserver.Handle(s.metricsEndpoint, http.HandlerFunc(metrics.HttpHandler))\n\t} else {\n\t\tlogger.Info(\"Metrics endpoint disabled\")\n\t}\n\tfor order, iface := range s.ModulesSortedByStartOrder() {\n\t\tname := reflect.TypeOf(iface).String()\n\t\tif s, ok := iface.(Startable); ok {\n\t\t\tlogger.WithFields(log.Fields{\"name\": name, \"order\": order}).Info(\"Starting module\")\n\t\t\tif err := s.Start(); err != nil {\n\t\t\t\tlogger.WithError(err).WithField(\"name\", name).Error(\"Error while starting module\")\n\t\t\t\tmultierr = multierror.Append(multierr, err)\n\t\t\t}\n\t\t} else {\n\t\t\tlogger.WithFields(log.Fields{\"name\": name, \"order\": order}).Debug(\"Module is not startable\")\n\t\t}\n\t\tif c, ok := iface.(health.Checker); ok && s.healthEndpoint != \"\" {\n\t\t\tlogger.WithField(\"name\", name).Info(\"Registering module as Health-Checker\")\n\t\t\thealth.RegisterPeriodicThresholdFunc(name, s.healthFrequency, s.healthThreshold, health.CheckFunc(c.Check))\n\t\t}\n\t\tif e, ok := iface.(Endpoint); ok {\n\t\t\tprefix := e.GetPrefix()\n\t\t\tlogger.WithFields(log.Fields{\"name\": name, \"prefix\": prefix}).Info(\"Registering module as Endpoint\")\n\t\t\ts.webserver.Handle(prefix, e)\n\t\t}\n\t}\n\treturn multierr.ErrorOrNil()\n}\n\n// Stop stops the registered modules in their given order\nfunc (s *Service) Stop() error {\n\tvar multierr *multierror.Error\n\tfor order, iface := range s.modulesSortedBy(ascendingStopOrder) {\n\t\tname := reflect.TypeOf(iface).String()\n\t\tif s, ok := iface.(Stopable); ok {\n\t\t\tlogger.WithFields(log.Fields{\"name\": name, \"order\": order}).Info(\"Stopping module\")\n\t\t\tif err := s.Stop(); err != nil {\n\t\t\t\tmultierr = multierror.Append(multierr, err)\n\t\t\t}\n\t\t} else {\n\t\t\tlogger.WithFields(log.Fields{\"name\": name, \"order\": order}).Debug(\"Module is not stoppable\")\n\t\t}\n\t}\n\treturn multierr.ErrorOrNil()\n}\n\n// WebServer returns the service *webserver.WebServer instance\nfunc (s *Service) WebServer() *webserver.WebServer {\n\treturn s.webserver\n}\n\n// ModulesSortedByStartOrder returns the registered modules sorted by their startOrder property\nfunc (s *Service) ModulesSortedByStartOrder() []interface{} {\n\treturn s.modulesSortedBy(ascendingStartOrder)\n}\n\n// modulesSortedBy returns the registered modules sorted using a `by` criteria.\nfunc (s *Service) modulesSortedBy(criteria by) []interface{} {\n\tvar sorted []interface{}\n\tby(criteria).sort(s.modules)\n\tfor _, m := range s.modules {\n\t\tsorted = append(sorted, m.iface)\n\t}\n\treturn sorted\n}\n"
  },
  {
    "path": "server/service/service_test.go",
    "content": "package service\n\nimport (\n\t\"github.com/smancke/guble/server/kvstore\"\n\t\"github.com/smancke/guble/server/store\"\n\t\"github.com/smancke/guble/server/store/dummystore\"\n\t\"github.com/smancke/guble/server/webserver\"\n\t\"github.com/smancke/guble/testutil\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"errors\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestStartingOfModules(t *testing.T) {\n\tdefer testutil.ResetDefaultRegistryHealthCheck()\n\n\ta := assert.New(t)\n\tvar p interface{}\n\tfunc() {\n\t\tdefer func() {\n\t\t\tp = recover()\n\t\t}()\n\n\t\tservice, _, _, _ := aMockedServiceWithMockedRouterStandalone()\n\t\tservice.RegisterModules(0, 0, &testStartable{})\n\t\ta.Equal(3, len(service.ModulesSortedByStartOrder()))\n\t\tservice.Start()\n\t}()\n\ta.NotNil(p)\n}\n\nfunc TestStoppingOfModules(t *testing.T) {\n\tdefer testutil.ResetDefaultRegistryHealthCheck()\n\n\tvar p interface{}\n\tfunc() {\n\t\tdefer func() {\n\t\t\tp = recover()\n\t\t}()\n\n\t\tservice, _, _, _ := aMockedServiceWithMockedRouterStandalone()\n\t\tservice.RegisterModules(0, 0, &testStopable{})\n\t\tservice.Stop()\n\t}()\n\tassert.NotNil(t, p)\n}\n\nfunc TestEndpointRegisterAndServing(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\tdefer testutil.ResetDefaultRegistryHealthCheck()\n\ta := assert.New(t)\n\n\t// given:\n\tservice, _, _, _ := aMockedServiceWithMockedRouterStandalone()\n\n\t// when I register an endpoint at path /foo\n\tservice.RegisterModules(0, 0, &testEndpoint{})\n\ta.Equal(3, len(service.ModulesSortedByStartOrder()))\n\tservice.Start()\n\tdefer service.Stop()\n\ttime.Sleep(time.Millisecond * 10)\n\n\t// then I can call the handler\n\turl := fmt.Sprintf(\"http://%s/foo\", service.WebServer().GetAddr())\n\tresult, err := http.Get(url)\n\ta.NoError(err)\n\tbody := make([]byte, 3)\n\tresult.Body.Read(body)\n\ta.Equal(\"bar\", string(body))\n}\n\nfunc TestHealthUp(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\tdefer testutil.ResetDefaultRegistryHealthCheck()\n\ta := assert.New(t)\n\n\t// given:\n\tservice, _, _, _ := aMockedServiceWithMockedRouterStandalone()\n\tservice = service.HealthEndpoint(\"/health_url\")\n\ta.Equal(2, len(service.ModulesSortedByStartOrder()))\n\n\t// when starting the service\n\tdefer service.Stop()\n\tservice.Start()\n\ttime.Sleep(time.Millisecond * 10)\n\n\t// and when I call the health URL\n\turl := fmt.Sprintf(\"http://%s/health_url\", service.WebServer().GetAddr())\n\tresult, err := http.Get(url)\n\n\t// then I get status 200 and JSON: {}\n\ta.NoError(err)\n\tbody, err := ioutil.ReadAll(result.Body)\n\ta.NoError(err)\n\ta.Equal(\"{}\", string(body))\n}\n\nfunc TestHealthDown(t *testing.T) {\n\tctrl, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\tdefer testutil.ResetDefaultRegistryHealthCheck()\n\ta := assert.New(t)\n\n\t// given:\n\tservice, _, _, _ := aMockedServiceWithMockedRouterStandalone()\n\tservice = service.HealthEndpoint(\"/health_url\")\n\tmockChecker := NewMockChecker(ctrl)\n\tmockChecker.EXPECT().Check().Return(errors.New(\"sick\")).AnyTimes()\n\n\t// when starting the service with a short frequency\n\tdefer service.Stop()\n\tservice.healthFrequency = time.Millisecond * 3\n\tservice.RegisterModules(0, 0, mockChecker)\n\ta.Equal(3, len(service.ModulesSortedByStartOrder()))\n\n\tservice.Start()\n\ttime.Sleep(time.Millisecond * 10)\n\n\t// and when I can call the health URL\n\turl := fmt.Sprintf(\"http://%s/health_url\", service.WebServer().GetAddr())\n\tresult, err := http.Get(url)\n\t// then I receive status 503 and a JSON error message\n\ta.NoError(err)\n\ta.Equal(503, result.StatusCode)\n\tbody, err := ioutil.ReadAll(result.Body)\n\ta.NoError(err)\n\ta.Equal(\"{\\\"*service.MockChecker\\\":\\\"sick\\\"}\", string(body))\n}\n\nfunc TestMetricsEnabled(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\tdefer testutil.ResetDefaultRegistryHealthCheck()\n\ta := assert.New(t)\n\n\t// given:\n\tservice, _, _, _ := aMockedServiceWithMockedRouterStandalone()\n\tservice = service.MetricsEndpoint(\"/metrics_url\")\n\ta.Equal(2, len(service.ModulesSortedByStartOrder()))\n\n\t// when starting the service\n\tdefer service.Stop()\n\tservice.Start()\n\ttime.Sleep(time.Millisecond * 10)\n\n\t// and when I call the health URL\n\turl := fmt.Sprintf(\"http://%s/metrics_url\", service.WebServer().GetAddr())\n\tresult, err := http.Get(url)\n\n\t// then I get status 200 and JSON: {}\n\ta.NoError(err)\n\tbody, err := ioutil.ReadAll(result.Body)\n\ta.NoError(err)\n\ta.True(len(body) > 0)\n}\n\nfunc aMockedServiceWithMockedRouterStandalone() (*Service, kvstore.KVStore, store.MessageStore, *MockRouter) {\n\tkvStore := kvstore.NewMemoryKVStore()\n\tmessageStore := dummystore.New(kvStore)\n\trouterMock := NewMockRouter(testutil.MockCtrl)\n\trouterMock.EXPECT().Cluster().Return(nil).MaxTimes(2)\n\tservice := New(routerMock, webserver.New(\"localhost:0\"))\n\treturn service, kvStore, messageStore, routerMock\n}\n\ntype testEndpoint struct {\n}\n\nfunc (*testEndpoint) GetPrefix() string {\n\treturn \"/foo\"\n}\n\nfunc (*testEndpoint) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprint(w, \"bar\")\n\treturn\n}\n\ntype testStartable struct {\n}\n\nfunc (*testStartable) Start() error {\n\tpanic(fmt.Errorf(\"In a panic when I should start\"))\n}\n\ntype testStopable struct {\n}\n\nfunc (*testStopable) Stop() error {\n\tpanic(fmt.Errorf(\"In a panic when I should stop\"))\n}\n"
  },
  {
    "path": "server/sms/logger.go",
    "content": "package sms\n\nimport (\n\tlog \"github.com/Sirupsen/logrus\"\n)\n\nvar logger = log.WithField(\"module\", \"sms\")\n"
  },
  {
    "path": "server/sms/mocks_router_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/smancke/guble/server/router (interfaces: Router)\n\npackage sms\n\nimport (\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/auth\"\n\t\"github.com/smancke/guble/server/cluster\"\n\t\"github.com/smancke/guble/server/kvstore\"\n\t\"github.com/smancke/guble/server/router\"\n\t\"github.com/smancke/guble/server/store\"\n)\n\n// Mock of Router interface\ntype MockRouter struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockRouterRecorder\n}\n\n// Recorder for MockRouter (not exported)\ntype _MockRouterRecorder struct {\n\tmock *MockRouter\n}\n\nfunc NewMockRouter(ctrl *gomock.Controller) *MockRouter {\n\tmock := &MockRouter{ctrl: ctrl}\n\tmock.recorder = &_MockRouterRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockRouter) EXPECT() *_MockRouterRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockRouter) AccessManager() (auth.AccessManager, error) {\n\tret := _m.ctrl.Call(_m, \"AccessManager\")\n\tret0, _ := ret[0].(auth.AccessManager)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) AccessManager() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"AccessManager\")\n}\n\nfunc (_m *MockRouter) Cluster() *cluster.Cluster {\n\tret := _m.ctrl.Call(_m, \"Cluster\")\n\tret0, _ := ret[0].(*cluster.Cluster)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) Cluster() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Cluster\")\n}\n\nfunc (_m *MockRouter) Done() <-chan bool {\n\tret := _m.ctrl.Call(_m, \"Done\")\n\tret0, _ := ret[0].(<-chan bool)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) Done() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Done\")\n}\n\nfunc (_m *MockRouter) Fetch(_param0 *store.FetchRequest) error {\n\tret := _m.ctrl.Call(_m, \"Fetch\", _param0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) Fetch(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Fetch\", arg0)\n}\n\nfunc (_m *MockRouter) GetSubscribers(_param0 string) ([]byte, error) {\n\tret := _m.ctrl.Call(_m, \"GetSubscribers\", _param0)\n\tret0, _ := ret[0].([]byte)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) GetSubscribers(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"GetSubscribers\", arg0)\n}\n\nfunc (_m *MockRouter) HandleMessage(_param0 *protocol.Message) error {\n\tret := _m.ctrl.Call(_m, \"HandleMessage\", _param0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) HandleMessage(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"HandleMessage\", arg0)\n}\n\nfunc (_m *MockRouter) KVStore() (kvstore.KVStore, error) {\n\tret := _m.ctrl.Call(_m, \"KVStore\")\n\tret0, _ := ret[0].(kvstore.KVStore)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) KVStore() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"KVStore\")\n}\n\nfunc (_m *MockRouter) MessageStore() (store.MessageStore, error) {\n\tret := _m.ctrl.Call(_m, \"MessageStore\")\n\tret0, _ := ret[0].(store.MessageStore)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) MessageStore() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"MessageStore\")\n}\n\nfunc (_m *MockRouter) Subscribe(_param0 *router.Route) (*router.Route, error) {\n\tret := _m.ctrl.Call(_m, \"Subscribe\", _param0)\n\tret0, _ := ret[0].(*router.Route)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) Subscribe(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Subscribe\", arg0)\n}\n\nfunc (_m *MockRouter) Unsubscribe(_param0 *router.Route) {\n\t_m.ctrl.Call(_m, \"Unsubscribe\", _param0)\n}\n\nfunc (_mr *_MockRouterRecorder) Unsubscribe(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Unsubscribe\", arg0)\n}\n"
  },
  {
    "path": "server/sms/mocks_sender_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/smancke/guble/server/sms (interfaces: Sender)\n\npackage sms\n\nimport (\n\tgomock \"github.com/golang/mock/gomock\"\n\tprotocol \"github.com/smancke/guble/protocol\"\n)\n\n// Mock of Sender interface\ntype MockSender struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockSenderRecorder\n}\n\n// Recorder for MockSender (not exported)\ntype _MockSenderRecorder struct {\n\tmock *MockSender\n}\n\nfunc NewMockSender(ctrl *gomock.Controller) *MockSender {\n\tmock := &MockSender{ctrl: ctrl}\n\tmock.recorder = &_MockSenderRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockSender) EXPECT() *_MockSenderRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockSender) Send(_param0 *protocol.Message) error {\n\tret := _m.ctrl.Call(_m, \"Send\", _param0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockSenderRecorder) Send(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Send\", arg0)\n}\n"
  },
  {
    "path": "server/sms/mocks_store_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/smancke/guble/server/store (interfaces: MessageStore)\n\npackage sms\n\nimport (\n\tgomock \"github.com/golang/mock/gomock\"\n\tprotocol \"github.com/smancke/guble/protocol\"\n\tstore \"github.com/smancke/guble/server/store\"\n)\n\n// Mock of MessageStore interface\ntype MockMessageStore struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockMessageStoreRecorder\n}\n\n// Recorder for MockMessageStore (not exported)\ntype _MockMessageStoreRecorder struct {\n\tmock *MockMessageStore\n}\n\nfunc NewMockMessageStore(ctrl *gomock.Controller) *MockMessageStore {\n\tmock := &MockMessageStore{ctrl: ctrl}\n\tmock.recorder = &_MockMessageStoreRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockMessageStore) EXPECT() *_MockMessageStoreRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockMessageStore) DoInTx(_param0 string, _param1 func(uint64) error) error {\n\tret := _m.ctrl.Call(_m, \"DoInTx\", _param0, _param1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockMessageStoreRecorder) DoInTx(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"DoInTx\", arg0, arg1)\n}\n\nfunc (_m *MockMessageStore) Fetch(_param0 *store.FetchRequest) {\n\t_m.ctrl.Call(_m, \"Fetch\", _param0)\n}\n\nfunc (_mr *_MockMessageStoreRecorder) Fetch(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Fetch\", arg0)\n}\n\nfunc (_m *MockMessageStore) GenerateNextMsgID(_param0 string, _param1 byte) (uint64, int64, error) {\n\tret := _m.ctrl.Call(_m, \"GenerateNextMsgID\", _param0, _param1)\n\tret0, _ := ret[0].(uint64)\n\tret1, _ := ret[1].(int64)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}\n\nfunc (_mr *_MockMessageStoreRecorder) GenerateNextMsgID(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"GenerateNextMsgID\", arg0, arg1)\n}\n\nfunc (_m *MockMessageStore) MaxMessageID(_param0 string) (uint64, error) {\n\tret := _m.ctrl.Call(_m, \"MaxMessageID\", _param0)\n\tret0, _ := ret[0].(uint64)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockMessageStoreRecorder) MaxMessageID(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"MaxMessageID\", arg0)\n}\n\nfunc (_m *MockMessageStore) Partition(_param0 string) (store.MessagePartition, error) {\n\tret := _m.ctrl.Call(_m, \"Partition\", _param0)\n\tret0, _ := ret[0].(store.MessagePartition)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockMessageStoreRecorder) Partition(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Partition\", arg0)\n}\n\nfunc (_m *MockMessageStore) Partitions() ([]store.MessagePartition, error) {\n\tret := _m.ctrl.Call(_m, \"Partitions\")\n\tret0, _ := ret[0].([]store.MessagePartition)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockMessageStoreRecorder) Partitions() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Partitions\")\n}\n\nfunc (_m *MockMessageStore) Store(_param0 string, _param1 uint64, _param2 []byte) error {\n\tret := _m.ctrl.Call(_m, \"Store\", _param0, _param1, _param2)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockMessageStoreRecorder) Store(arg0, arg1, arg2 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Store\", arg0, arg1, arg2)\n}\n\nfunc (_m *MockMessageStore) StoreMessage(_param0 *protocol.Message, _param1 byte) (int, error) {\n\tret := _m.ctrl.Call(_m, \"StoreMessage\", _param0, _param1)\n\tret0, _ := ret[0].(int)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockMessageStoreRecorder) StoreMessage(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"StoreMessage\", arg0, arg1)\n}\n"
  },
  {
    "path": "server/sms/nexmo_sms.go",
    "content": "package sms\n\nimport \"encoding/json\"\n\ntype NexmoSms struct {\n\tApiKey    string `json:\"api_key,omitempty\"`\n\tApiSecret string `json:\"api_secret,omitempty\"`\n\tTo        string `json:\"to\"`\n\tFrom      string `json:\"from\"`\n\tText      string `json:\"text\"`\n}\n\nfunc (sms *NexmoSms) EncodeNexmoSms(apiKey, apiSecret string) ([]byte, error) {\n\tsms.ApiKey = apiKey\n\tsms.ApiSecret = apiSecret\n\n\td, err := json.Marshal(&sms)\n\tif err != nil {\n\t\tlogger.WithField(\"error\", err.Error()).Error(\"Could not encode sms as json\")\n\t\treturn nil, err\n\t}\n\treturn d, nil\n}\n"
  },
  {
    "path": "server/sms/nexmo_sms_sender.go",
    "content": "package sms\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"strconv\"\n\t\"time\"\n\n\tlog \"github.com/Sirupsen/logrus\"\n\t\"github.com/smancke/guble/protocol\"\n)\n\nvar (\n\tURL                = \"https://rest.nexmo.com/sms/json?\"\n\tMaxIdleConnections = 100\n\tRequestTimeout     = 500 * time.Millisecond\n)\n\ntype ResponseCode int\n\nconst (\n\tResponseSuccess ResponseCode = iota\n\tResponseThrottled\n\tResponseMissingParams\n\tResponseInvalidParams\n\tResponseInvalidCredentials\n\tResponseInternalError\n\tResponseInvalidMessage\n\tResponseNumberBarred\n\tResponsePartnerAcctBarred\n\tResponsePartnerQuotaExceeded\n\tResponseUnused\n\tResponseRESTNotEnabled\n\tResponseMessageTooLong\n\tResponseCommunicationFailed\n\tResponseInvalidSignature\n\tResponseInvalidSenderAddress\n\tResponseInvalidTTL\n\tResponseFacilityNotAllowed\n\tResponseInvalidMessageClass\n)\n\nvar (\n\tErrNoSMSSent                 = errors.New(\"No sms was sent to Nexmo\")\n\tErrIncompleteSMSSent         = errors.New(\"Nexmo sms was only partial delivered.One or more part returned an error\")\n\tErrSMSResponseDecodingFailed = errors.New(\"Nexmo response decoding failed.\")\n\tErrNoRetry                   = errors.New(\"SMS failed. No retrying.\")\n)\n\nvar nexmoResponseCodeMap = map[ResponseCode]string{\n\tResponseSuccess:              \"Success\",\n\tResponseThrottled:            \"Throttled\",\n\tResponseMissingParams:        \"Missing params\",\n\tResponseInvalidParams:        \"Invalid params\",\n\tResponseInvalidCredentials:   \"Invalid credentials\",\n\tResponseInternalError:        \"Internal error\",\n\tResponseInvalidMessage:       \"Invalid message\",\n\tResponseNumberBarred:         \"Number barred\",\n\tResponsePartnerAcctBarred:    \"Partner account barred\",\n\tResponsePartnerQuotaExceeded: \"Partner quota exceeded\",\n\tResponseRESTNotEnabled:       \"Account not enabled for REST\",\n\tResponseMessageTooLong:       \"Message too long\",\n\tResponseCommunicationFailed:  \"Communication failed\",\n\tResponseInvalidSignature:     \"Invalid signature\",\n\tResponseInvalidSenderAddress: \"Invalid sender address\",\n\tResponseInvalidTTL:           \"Invalid TTL\",\n\tResponseFacilityNotAllowed:   \"Facility not allowed\",\n\tResponseInvalidMessageClass:  \"Invalid message class\",\n}\n\nfunc (c ResponseCode) String() string {\n\treturn nexmoResponseCodeMap[c]\n}\n\n// NexmoMessageReport is the \"status report\" for a single SMS sent via the Nexmo API\ntype NexmoMessageReport struct {\n\tStatus           ResponseCode `json:\"status,string\"`\n\tMessageID        string       `json:\"message-id\"`\n\tTo               string       `json:\"to\"`\n\tClientReference  string       `json:\"client-ref\"`\n\tRemainingBalance string       `json:\"remaining-balance\"`\n\tMessagePrice     string       `json:\"message-price\"`\n\tNetwork          string       `json:\"network\"`\n\tErrorText        string       `json:\"error-text\"`\n}\n\ntype NexmoMessageResponse struct {\n\tMessageCount int                  `json:\"message-count,string\"`\n\tMessages     []NexmoMessageReport `json:\"messages\"`\n}\n\nfunc (nm NexmoMessageResponse) Check() error {\n\tif nm.MessageCount == 0 {\n\t\treturn ErrNoSMSSent\n\t}\n\tfor i := 0; i < nm.MessageCount; i++ {\n\t\tif nm.Messages[i].Status != ResponseSuccess {\n\t\t\tlogger.WithField(\"status\", nm.Messages[i].Status).\n\t\t\t\tWithField(\"error\", nm.Messages[i].ErrorText).\n\t\t\t\tError(\"Error received from Nexmo\")\n\n\t\t\tif nm.Messages[i].Status == ResponseInvalidSenderAddress {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\treturn ErrIncompleteSMSSent\n\t\t}\n\t}\n\treturn nil\n}\n\ntype NexmoSender struct {\n\tlogger    *log.Entry\n\tApiKey    string\n\tApiSecret string\n\n\thttpClient *http.Client\n}\n\nfunc NewNexmoSender(apiKey, apiSecret string) (*NexmoSender, error) {\n\tns := &NexmoSender{\n\t\tlogger:    logger.WithField(\"name\", \"nexmoSender\"),\n\t\tApiKey:    apiKey,\n\t\tApiSecret: apiSecret,\n\t}\n\tns.createHttpClient()\n\treturn ns, nil\n}\n\nfunc (ns *NexmoSender) Send(msg *protocol.Message) error {\n\tnexmoSMS := new(NexmoSms)\n\terr := json.Unmarshal(msg.Body, nexmoSMS)\n\tif err != nil {\n\t\tlogger.WithField(\"error\", err.Error()).Error(\"Could not decode message body to send to nexmo\")\n\t\treturn err\n\t}\n\tnexmoSMSResponse, err := ns.sendSms(nexmoSMS)\n\tif err != nil {\n\t\tlogger.WithField(\"error\", err.Error()).Error(\"Could not decode nexmo response message body\")\n\t\treturn err\n\t}\n\tlogger.WithField(\"response\", nexmoSMSResponse).Info(\"Decoded nexmo response\")\n\n\treturn nexmoSMSResponse.Check()\n}\n\nfunc (ns *NexmoSender) sendSms(sms *NexmoSms) (*NexmoMessageResponse, error) {\n\t// log before encoding\n\tlogger.WithField(\"sms_details\", sms).Info(\"sendSms\")\n\n\tsmsEncoded, err := sms.EncodeNexmoSms(ns.ApiKey, ns.ApiSecret)\n\tif err != nil {\n\t\tlogger.WithField(\"error\", err.Error()).Error(\"Error encoding sms\")\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(http.MethodPost, URL, bytes.NewBuffer(smsEncoded))\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\treq.Header.Add(\"Content-Length\", strconv.Itoa(len(smsEncoded)))\n\n\tresp, err := (&http.Client{}).Do(req)\n\tif err != nil {\n\t\tlogger.WithField(\"error\", err.Error()).Error(\"Error doing the request to nexmo endpoint\")\n\t\tns.createHttpClient()\n\t\tmTotalSendErrors.Add(1)\n\t\treturn nil, ErrNoSMSSent\n\t}\n\tdefer resp.Body.Close()\n\n\tvar messageResponse *NexmoMessageResponse\n\trespBody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlogger.WithField(\"error\", err.Error()).Error(\"Error reading the nexmo body response\")\n\t\tmTotalResponseInternalErrors.Add(1)\n\t\treturn nil, ErrSMSResponseDecodingFailed\n\t}\n\n\terr = json.Unmarshal(respBody, &messageResponse)\n\tif err != nil {\n\t\tlogger.WithField(\"error\", err.Error()).Error(\"Error decoding the response from nexmo endpoint\")\n\t\tmTotalResponseInternalErrors.Add(1)\n\t\treturn nil, ErrSMSResponseDecodingFailed\n\t}\n\tlogger.WithField(\"messageResponse\", messageResponse).Info(\"Actual nexmo response\")\n\n\treturn messageResponse, nil\n}\n\nfunc (ns *NexmoSender) createHttpClient() {\n\tlogger.Info(\"Recreating HTTP client for nexmo sender\")\n\tns.httpClient = &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tMaxIdleConnsPerHost: MaxIdleConnections,\n\t\t},\n\t\tTimeout: RequestTimeout,\n\t}\n}\n"
  },
  {
    "path": "server/sms/nexmo_sms_sender_test.go",
    "content": "package sms\n\nimport (\n\t\"encoding/json\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/testutil\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\nconst (\n\tKEY    = \"ce40b46d\"\n\tSECRET = \"153d2b2c72985370\"\n)\n\nfunc TestNexmoSender_Send(t *testing.T) {\n\ta := assert.New(t)\n\ttestutil.SkipIfDisabled(t)\n\tsender, err := NewNexmoSender(KEY, SECRET)\n\ta.NoError(err)\n\n\tsms := new(NexmoSms)\n\tsms.To = \"+40746278186\"\n\tsms.From = \"REWE Lieferservice\"\n\tsms.Text = \"Lieber Kunde! Ihre Lieferung kommt heute zwischen 12.04 und 12.34 Uhr. Vielen Dank für Ihre Bestellung! Ihr REWE Lieferservice\"\n\n\tresponse, err := sender.sendSms(sms)\n\ta.Equal(1, response.MessageCount)\n\ta.Equal(ResponseSuccess, response.Messages[0].Status)\n\ta.NoError(err)\n}\n\nfunc TestNexmoSender_SendWithError(t *testing.T) {\n\tRequestTimeout = time.Second\n\ta := assert.New(t)\n\tsender, err := NewNexmoSender(KEY, SECRET)\n\ta.NoError(err)\n\n\tsms := NexmoSms{\n\t\tTo:   \"toNumber\",\n\t\tFrom: \"FromNUmber\",\n\t\tText: \"body\",\n\t}\n\td, err := json.Marshal(&sms)\n\ta.NoError(err)\n\n\tmsg := protocol.Message{\n\t\tPath:          protocol.Path(SMSDefaultTopic),\n\t\tUserID:        \"samsa\",\n\t\tApplicationID: \"sms\",\n\t\tID:            uint64(4),\n\t\tBody:          d,\n\t}\n\n\terr = sender.Send(&msg)\n\ta.Error(err)\n\ta.Equal(ErrIncompleteSMSSent, err)\n}\n"
  },
  {
    "path": "server/sms/sms_gateway.go",
    "content": "package sms\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\n\t\"github.com/smancke/guble/server/connector\"\n\n\t\"time\"\n\n\tlog \"github.com/Sirupsen/logrus\"\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/metrics\"\n\t\"github.com/smancke/guble/server/router\"\n\t\"github.com/smancke/guble/server/store\"\n)\n\nconst (\n\tSMSSchema       = \"sms_notifications\"\n\tSMSDefaultTopic = \"/sms\"\n)\n\nvar (\n\tErrRetryFailed = errors.New(\"Failed retrying to send message.\")\n)\n\ntype Sender interface {\n\tSend(*protocol.Message) error\n}\n\ntype Config struct {\n\tEnabled         *bool\n\tAPIKey          *string\n\tAPISecret       *string\n\tWorkers         *int\n\tSMSTopic        *string\n\tIntervalMetrics *bool\n\n\tName   string\n\tSchema string\n}\n\ntype gateway struct {\n\tconfig *Config\n\n\tsender Sender\n\trouter router.Router\n\troute  *router.Route\n\n\tLastIDSent uint64\n\n\tctx        context.Context\n\tcancelFunc context.CancelFunc\n\n\tlogger *log.Entry\n}\n\nfunc New(router router.Router, sender Sender, config Config) (*gateway, error) {\n\tif *config.Workers <= 0 {\n\t\t*config.Workers = connector.DefaultWorkers\n\t}\n\tconfig.Schema = SMSSchema\n\tconfig.Name = SMSDefaultTopic\n\treturn &gateway{\n\t\tconfig: &config,\n\t\trouter: router,\n\t\tsender: sender,\n\t\tlogger: logger.WithField(\"name\", config.Name),\n\t}, nil\n}\n\nfunc (g *gateway) Start() error {\n\tg.logger.Debug(\"Starting gateway\")\n\n\terr := g.ReadLastID()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tg.ctx, g.cancelFunc = context.WithCancel(context.Background())\n\tg.initRoute()\n\n\tgo g.Run()\n\n\tg.startMetrics()\n\n\tg.logger.Debug(\"Started gateway\")\n\treturn nil\n}\n\nfunc (g *gateway) initRoute() {\n\tg.route = router.NewRoute(router.RouteConfig{\n\t\tPath:         protocol.Path(*g.config.SMSTopic),\n\t\tChannelSize:  5000,\n\t\tFetchRequest: g.fetchRequest(),\n\t})\n}\n\nfunc (g *gateway) fetchRequest() (fr *store.FetchRequest) {\n\tif g.LastIDSent > 0 {\n\t\tfr = store.NewFetchRequest(\n\t\t\tprotocol.Path(*g.config.SMSTopic).Partition(),\n\t\t\tg.LastIDSent+1,\n\t\t\t0,\n\t\t\tstore.DirectionForward, -1)\n\t}\n\treturn\n}\n\nfunc (g *gateway) Run() {\n\tg.logger.Debug(\"Run gateway\")\n\tvar provideErr error\n\tgo func() {\n\t\terr := g.route.Provide(g.router, true)\n\t\tif err != nil {\n\t\t\t// cancel subscription loop if there is an error on the provider\n\t\t\tlogger.WithField(\"error\", err.Error()).Error(\"Provide returned error\")\n\t\t\tprovideErr = err\n\t\t\tg.Cancel()\n\t\t}\n\t}()\n\n\tcurrentMsg, err := g.proxyLoop()\n\tif err != nil && provideErr == nil {\n\t\tg.logger.WithFields(log.Fields{\n\t\t\t\"error\":             err.Error(),\n\t\t\t\"is_incomplete_sms\": err == ErrIncompleteSMSSent,\n\t\t}).Error(\"Error returned by gateway proxy loop\")\n\n\t\tif err == ErrIncompleteSMSSent {\n\t\t\terr2 := g.retry(currentMsg)\n\t\t\tif err2 != nil {\n\t\t\t\tg.logger.WithField(\"error\", err2.Error()).Error(\"Error returned by retry.\")\n\t\t\t\tif err3 := g.SetLastSentID(currentMsg.ID); err3 != nil {\n\t\t\t\t\tg.logger.WithField(\"error\", err3.Error()).Error(\"Error setting last ID\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// If Route channel closed, try restarting\n\t\tif isRestartableErr(err) {\n\t\t\tg.Restart()\n\t\t\treturn\n\t\t}\n\n\t}\n\n\tif provideErr != nil {\n\t\t// TODO Bogdan Treat errors where a subscription provide fails\n\t\tg.logger.WithField(\"error\", provideErr.Error()).Error(\"Route provide error\")\n\n\t\t// Router closed the route, try restart\n\t\tif provideErr == router.ErrInvalidRoute {\n\t\t\tg.Restart()\n\t\t\treturn\n\t\t}\n\t\t// Router module is stopping, exit the process\n\t\tif _, ok := provideErr.(*router.ModuleStoppingError); ok {\n\t\t\treturn\n\t\t}\n\t}\n}\nfunc isRestartableErr(err error) bool {\n\treturn err == connector.ErrRouteChannelClosed ||\n\t\terr == ErrNoSMSSent ||\n\t\terr == ErrIncompleteSMSSent ||\n\t\terr == ErrSMSResponseDecodingFailed\n}\n\n// proxyLoop returns the current processed message alongside the error that\n// occured during sending of the message\nfunc (g *gateway) proxyLoop() (*protocol.Message, error) {\n\tvar (\n\t\topened      bool = true\n\t\treceivedMsg *protocol.Message\n\t)\n\tdefer func() { g.cancelFunc = nil }()\n\n\tfor opened {\n\t\tselect {\n\t\tcase receivedMsg, opened = <-g.route.MessagesChannel():\n\t\t\tif !opened {\n\t\t\t\tlogger.WithField(\"receivedMsg\", receivedMsg).Info(\"not open\")\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\terr := g.send(receivedMsg)\n\t\t\tif err != nil {\n\t\t\t\treturn receivedMsg, err\n\t\t\t}\n\t\tcase <-g.ctx.Done():\n\t\t\t// If the parent context is still running then only this subscriber context\n\t\t\t// has been cancelled\n\t\t\tif g.ctx.Err() == nil {\n\t\t\t\treturn nil, g.ctx.Err()\n\t\t\t}\n\t\t\treturn nil, nil\n\t\t}\n\t}\n\n\t//TODO Cosmin Bogdan returning this error can mean 2 things: overflow of route's channel, or intentional stopping of router / gubled.\n\treturn nil, connector.ErrRouteChannelClosed\n}\n\nfunc (g *gateway) retry(msg *protocol.Message) error {\n\tl := logger.WithField(\"message\", msg)\n\tl.Info(\"Retrying to send message\")\n\tfor i := 0; i < 3; i++ {\n\t\tl.WithField(\"retry\", i+1).Info(\"Sending message\")\n\t\terr := g.send(msg)\n\t\tif err != nil {\n\t\t\tl.WithFields(log.Fields{\n\t\t\t\t\"retry\": i + 1,\n\t\t\t\t\"err\":   err.Error(),\n\t\t\t}).Error(\"Retry failed\")\n\t\t} else {\n\t\t\tl.WithField(\"retry\", i+1).Info(\"Retry success\")\n\t\t\treturn nil\n\t\t}\n\t\ttime.Sleep(100 * time.Millisecond)\n\t}\n\treturn ErrRetryFailed\n}\n\nfunc (g *gateway) send(receivedMsg *protocol.Message) error {\n\terr := g.sender.Send(receivedMsg)\n\tif err != nil {\n\t\tlog.WithField(\"error\", err.Error()).Error(\"Sending of message failed\")\n\t\tmTotalResponseErrors.Add(1)\n\t\treturn err\n\t}\n\tmTotalSentMessages.Add(1)\n\tg.SetLastSentID(receivedMsg.ID)\n\treturn nil\n}\n\nfunc (g *gateway) Restart() error {\n\tg.logger.WithField(\"LastIDSent\", g.LastIDSent).Debug(\"Restart in progress\")\n\n\tg.Cancel()\n\tg.cancelFunc = nil\n\n\terr := g.ReadLastID()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tg.initRoute()\n\n\tgo g.Run()\n\n\tg.logger.WithField(\"LastIDSent\", g.LastIDSent).Debug(\"Restart finished\")\n\treturn nil\n}\n\nfunc (g *gateway) Stop() error {\n\tg.logger.Debug(\"Stopping gateway\")\n\tg.cancelFunc()\n\tg.logger.Debug(\"Stopped gateway\")\n\treturn nil\n}\n\nfunc (g *gateway) SetLastSentID(ID uint64) error {\n\tg.logger.WithField(\"LastIDSent\", ID).WithField(\"path\", *g.config.SMSTopic).Debug(\"Seting LastIDSent\")\n\n\tkvStore, err := g.router.KVStore()\n\tif err != nil {\n\t\tg.logger.WithField(\"error\", err.Error()).Error(\"KVStore could not be accesed from gateway\")\n\t\treturn err\n\t}\n\n\tdata, err := json.Marshal(struct{ ID uint64 }{ID: ID})\n\tif err != nil {\n\t\tg.logger.WithField(\"error\", err.Error()).Error(\"Error encoding last ID\")\n\t\treturn err\n\t}\n\terr = kvStore.Put(g.config.Schema, *g.config.SMSTopic, data)\n\tif err != nil {\n\t\tg.logger.WithField(\"error\", err.Error()).WithField(\"path\", *g.config.SMSTopic).Error(\"KVStore could not set value for LastIDSent for topic\")\n\t\treturn err\n\t}\n\tg.LastIDSent = ID\n\treturn nil\n}\n\nfunc (g *gateway) ReadLastID() error {\n\tkvStore, err := g.router.KVStore()\n\tif err != nil {\n\t\tg.logger.WithField(\"error\", err.Error()).Error(\"KVStore could not be accesed from sms gateway\")\n\t\treturn err\n\t}\n\tdata, exist, err := kvStore.Get(g.config.Schema, *g.config.SMSTopic)\n\tif err != nil {\n\t\tg.logger.WithField(\"error\", err.Error()).WithField(\"path\", *g.config.SMSTopic).Error(\"KvStore could not get value for LastIDSent for topic\")\n\t\treturn err\n\t}\n\tif !exist {\n\t\tg.LastIDSent = 0\n\t\treturn nil\n\t}\n\n\tv := &struct{ ID uint64 }{}\n\terr = json.Unmarshal(data, v)\n\tif err != nil {\n\t\tg.logger.WithField(\"error\", err.Error()).Error(\"Could not parse as uint64 the LastIDSent value stored in db\")\n\t\treturn err\n\t}\n\tg.LastIDSent = v.ID\n\n\tg.logger.WithField(\"LastIDSent\", g.LastIDSent).WithField(\"path\", *g.config.SMSTopic).Debug(\"ReadLastID\")\n\treturn nil\n}\n\nfunc (g *gateway) Cancel() {\n\tif g.cancelFunc != nil {\n\t\tg.cancelFunc()\n\t}\n}\n\nfunc (g *gateway) startMetrics() {\n\tmTotalSentMessages.Set(0)\n\tmTotalSendErrors.Set(0)\n\tmTotalResponseErrors.Set(0)\n\tmTotalResponseInternalErrors.Set(0)\n\n\tif *g.config.IntervalMetrics {\n\t\tg.startIntervalMetric(mMinute, time.Minute)\n\t\tg.startIntervalMetric(mHour, time.Hour)\n\t\tg.startIntervalMetric(mDay, time.Hour*24)\n\t}\n}\n\nfunc (g *gateway) startIntervalMetric(m metrics.Map, td time.Duration) {\n\tmetrics.RegisterInterval(g.ctx, m, td, resetIntervalMetrics, processAndResetIntervalMetrics)\n}\n"
  },
  {
    "path": "server/sms/sms_gateway_test.go",
    "content": "package sms\n\nimport (\n\t\"testing\"\n\n\t\"encoding/json\"\n\n\t\"github.com/smancke/guble/server/kvstore\"\n\t\"github.com/smancke/guble/testutil\"\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"strings\"\n\t\"time\"\n\n\t\"expvar\"\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/router\"\n\t\"github.com/smancke/guble/server/store/dummystore\"\n)\n\nfunc Test_StartStop(t *testing.T) {\n\tctrl, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\ta := assert.New(t)\n\n\tmockSmsSender := NewMockSender(ctrl)\n\tkvStore := kvstore.NewMemoryKVStore()\n\n\ta.NotNil(kvStore)\n\trouterMock := NewMockRouter(testutil.MockCtrl)\n\trouterMock.EXPECT().KVStore().AnyTimes().Return(kvStore, nil)\n\n\tmsgStore := dummystore.New(kvStore)\n\trouterMock.EXPECT().MessageStore().AnyTimes().Return(msgStore, nil)\n\ttopic := \"sms\"\n\tworker := 1\n\tintervalMetrics := true\n\tconfig := Config{\n\t\tWorkers:         &worker,\n\t\tSMSTopic:        &topic,\n\t\tName:            \"test_gateway\",\n\t\tSchema:          SMSSchema,\n\t\tIntervalMetrics: &intervalMetrics,\n\t}\n\n\trouterMock.EXPECT().Subscribe(gomock.Any()).Do(func(r *router.Route) (*router.Route, error) {\n\t\ta.Equal(topic, r.Path.Partition())\n\t\treturn r, nil\n\t})\n\n\tgw, err := New(routerMock, mockSmsSender, config)\n\ta.NoError(err)\n\n\terr = gw.Start()\n\ta.NoError(err)\n\n\terr = gw.Stop()\n\ta.NoError(err)\n\n\ttime.Sleep(100 * time.Millisecond)\n}\n\nfunc Test_SendOneSms(t *testing.T) {\n\tctrl, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\tdefer testutil.EnableDebugForMethod()()\n\ta := assert.New(t)\n\n\tmockSmsSender := NewMockSender(ctrl)\n\tkvStore := kvstore.NewMemoryKVStore()\n\n\ta.NotNil(kvStore)\n\trouterMock := NewMockRouter(testutil.MockCtrl)\n\trouterMock.EXPECT().KVStore().AnyTimes().Return(kvStore, nil)\n\tmsgStore := dummystore.New(kvStore)\n\trouterMock.EXPECT().MessageStore().AnyTimes().Return(msgStore, nil)\n\n\ttopic := \"/sms\"\n\tworker := 1\n\tintervalMetrics := true\n\tconfig := Config{\n\t\tWorkers:         &worker,\n\t\tSMSTopic:        &topic,\n\t\tName:            \"test_gateway\",\n\t\tSchema:          SMSSchema,\n\t\tIntervalMetrics: &intervalMetrics,\n\t}\n\n\trouterMock.EXPECT().Subscribe(gomock.Any()).Do(func(r *router.Route) (*router.Route, error) {\n\t\ta.Equal(topic, string(r.Path))\n\t\treturn r, nil\n\t})\n\n\tgw, err := New(routerMock, mockSmsSender, config)\n\ta.NoError(err)\n\n\terr = gw.Start()\n\ta.NoError(err)\n\n\tsms := NexmoSms{\n\t\tTo:   \"toNumber\",\n\t\tFrom: \"FromNUmber\",\n\t\tText: \"body\",\n\t}\n\td, err := json.Marshal(&sms)\n\ta.NoError(err)\n\n\tmsg := protocol.Message{\n\t\tPath: protocol.Path(topic),\n\t\tID:   uint64(4),\n\t\tBody: d,\n\t}\n\n\tmockSmsSender.EXPECT().Send(gomock.Eq(&msg)).Return(nil)\n\ta.NotNil(gw.route)\n\tgw.route.Deliver(&msg, true)\n\ttime.Sleep(100 * time.Millisecond)\n\n\terr = gw.Stop()\n\ta.NoError(err)\n\n\terr = gw.ReadLastID()\n\ta.NoError(err)\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\ttotalSentCount := expvar.NewInt(\"total_sent_messages\")\n\ttotalSentCount.Add(1)\n\ta.Equal(totalSentCount, mTotalSentMessages)\n}\n\nfunc Test_Restart(t *testing.T) {\n\tctrl, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\tdefer testutil.EnableDebugForMethod()()\n\ta := assert.New(t)\n\n\tmockSmsSender := NewMockSender(ctrl)\n\tkvStore := kvstore.NewMemoryKVStore()\n\n\ta.NotNil(kvStore)\n\trouterMock := NewMockRouter(testutil.MockCtrl)\n\trouterMock.EXPECT().KVStore().AnyTimes().Return(kvStore, nil)\n\tmsgStore := NewMockMessageStore(ctrl)\n\trouterMock.EXPECT().MessageStore().AnyTimes().Return(msgStore, nil)\n\n\ttopic := \"/sms\"\n\tworker := 1\n\tintervalMetrics := true\n\tconfig := Config{\n\t\tWorkers:  &worker,\n\t\tSMSTopic: &topic,\n\t\tName:     \"test_gateway\",\n\t\tSchema:   SMSSchema,\n\n\t\tIntervalMetrics: &intervalMetrics,\n\t}\n\n\trouterMock.EXPECT().Subscribe(gomock.Any()).Do(func(r *router.Route) (*router.Route, error) {\n\t\ta.Equal(strings.Split(topic, \"/\")[1], r.Path.Partition())\n\t\treturn r, nil\n\t}).Times(2)\n\n\tgw, err := New(routerMock, mockSmsSender, config)\n\ta.NoError(err)\n\n\terr = gw.Start()\n\ta.NoError(err)\n\n\tsms := NexmoSms{\n\t\tTo:   \"toNumber\",\n\t\tFrom: \"FromNUmber\",\n\t\tText: \"body\",\n\t}\n\td, err := json.Marshal(&sms)\n\ta.NoError(err)\n\n\tmsg := protocol.Message{\n\t\tPath:          protocol.Path(topic),\n\t\tUserID:        \"samsa\",\n\t\tApplicationID: \"sms\",\n\t\tID:            uint64(4),\n\t\tBody:          d,\n\t}\n\n\tmockSmsSender.EXPECT().Send(gomock.Eq(&msg)).Times(1).Return(ErrNoSMSSent)\n\n\tdoneC := make(chan bool)\n\trouterMock.EXPECT().Done().AnyTimes().Return(doneC)\n\t//\n\t//mockSmsSender.EXPECT().Send(gomock.Eq(&msg)).Return(nil)\n\n\ta.NotNil(gw.route)\n\tgw.route.Deliver(&msg, true)\n\ttime.Sleep(100 * time.Millisecond)\n}\n\nfunc TestReadLastID(t *testing.T) {\n\tctrl, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\tdefer testutil.EnableDebugForMethod()()\n\ta := assert.New(t)\n\n\tmockSmsSender := NewMockSender(ctrl)\n\tkvStore := kvstore.NewMemoryKVStore()\n\n\ta.NotNil(kvStore)\n\trouterMock := NewMockRouter(testutil.MockCtrl)\n\trouterMock.EXPECT().KVStore().AnyTimes().Return(kvStore, nil)\n\tmsgStore := dummystore.New(kvStore)\n\trouterMock.EXPECT().MessageStore().AnyTimes().Return(msgStore, nil)\n\n\ttopic := \"/sms\"\n\tworker := 1\n\tconfig := Config{\n\t\tWorkers:  &worker,\n\t\tSMSTopic: &topic,\n\t\tName:     \"test_gateway\",\n\t\tSchema:   SMSSchema,\n\t}\n\n\tgw, err := New(routerMock, mockSmsSender, config)\n\ta.NoError(err)\n\n\tgw.SetLastSentID(uint64(10))\n\n\tgw.ReadLastID()\n\n\ta.Equal(uint64(10), gw.LastIDSent)\n}\n"
  },
  {
    "path": "server/sms/sms_metrics.go",
    "content": "package sms\n\nimport (\n\t\"github.com/smancke/guble/server/metrics\"\n\t\"time\"\n)\n\nvar (\n\tns                           = metrics.NS(\"sms\")\n\tmTotalSentMessages           = ns.NewInt(\"total_sent_messages\")\n\tmTotalSendErrors             = ns.NewInt(\"total_sent_message_errors\")\n\tmTotalResponseErrors         = ns.NewInt(\"total_response_errors\")\n\tmTotalResponseInternalErrors = ns.NewInt(\"total_response_internal_errors\")\n\tmMinute                      = ns.NewMap(\"minute\")\n\tmHour                        = ns.NewMap(\"hour\")\n\tmDay                         = ns.NewMap(\"day\")\n)\n\nconst (\n\tcurrentTotalMessagesLatenciesKey = \"current_messages_total_latencies_nanos\"\n\tcurrentTotalMessagesKey          = \"current_messages_count\"\n\tcurrentTotalErrorsLatenciesKey   = \"current_errors_total_latencies_nanos\"\n\tcurrentTotalErrorsKey            = \"current_errors_count\"\n)\n\nfunc processAndResetIntervalMetrics(m metrics.Map, td time.Duration, t time.Time) {\n\tmsgLatenciesValue := m.Get(currentTotalMessagesLatenciesKey)\n\tmsgNumberValue := m.Get(currentTotalMessagesKey)\n\terrLatenciesValue := m.Get(currentTotalErrorsLatenciesKey)\n\terrNumberValue := m.Get(currentTotalErrorsKey)\n\n\tm.Init()\n\tresetIntervalMetrics(m, t)\n\tmetrics.SetRate(m, \"last_messages_rate_sec\", msgNumberValue, td, time.Second)\n\tmetrics.SetRate(m, \"last_errors_rate_sec\", errNumberValue, td, time.Second)\n\tmetrics.SetAverage(m, \"last_messages_average_latency_msec\",\n\t\tmsgLatenciesValue, msgNumberValue, metrics.MilliPerNano, metrics.DefaultAverageLatencyJSONValue)\n\tmetrics.SetAverage(m, \"last_errors_average_latency_msec\",\n\t\terrLatenciesValue, errNumberValue, metrics.MilliPerNano, metrics.DefaultAverageLatencyJSONValue)\n}\n\nfunc resetIntervalMetrics(m metrics.Map, t time.Time) {\n\tm.Set(\"current_interval_start\", metrics.NewTime(t))\n\tmetrics.AddToMaps(currentTotalMessagesLatenciesKey, 0, m)\n\tmetrics.AddToMaps(currentTotalMessagesKey, 0, m)\n\tmetrics.AddToMaps(currentTotalErrorsLatenciesKey, 0, m)\n\tmetrics.AddToMaps(currentTotalErrorsKey, 0, m)\n}"
  },
  {
    "path": "server/store/dummystore/dummy_message_store.go",
    "content": "package dummystore\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/kvstore\"\n\t\"github.com/smancke/guble/server/store\"\n)\n\nconst topicSchema = \"topic_sequence\"\n\n// DummyMessageStore is a minimal implementation of the MessageStore interface.\n// Everything it does is storing the message ids in the key value store to\n// ensure a monotonic incremented id.\n// It is intended for testing and demo purpose, as well as dummy for services without persistence.\n// TODO: implement a simple logic to preserve the last N messages\ntype DummyMessageStore struct {\n\ttopicSequences     map[string]uint64\n\ttopicSequencesLock sync.RWMutex\n\tkvStore            kvstore.KVStore\n\tisSyncStarted      bool\n\n\tstopC    chan bool // used to send the stop request to the syc goroutine\n\tstoppedC chan bool // answer from the syc goroutine, when it is stopped\n\n\tidSyncDuration time.Duration\n}\n\n// New returns a new DummyMessageStore.\nfunc New(kvStore kvstore.KVStore) *DummyMessageStore {\n\treturn &DummyMessageStore{\n\t\ttopicSequences: make(map[string]uint64),\n\t\tkvStore:        kvStore,\n\t\tidSyncDuration: time.Millisecond * 100,\n\t\tstopC:          make(chan bool, 1),\n\t\tstoppedC:       make(chan bool, 1),\n\t}\n}\n\n// Start the DummyMessageStore.\nfunc (dms *DummyMessageStore) Start() error {\n\tgo dms.startSequenceSync()\n\tdms.isSyncStarted = true\n\treturn nil\n}\n\n// Stop the DummyMessageStore.\nfunc (dms *DummyMessageStore) Stop() error {\n\tif !dms.isSyncStarted {\n\t\treturn nil\n\t}\n\tdms.stopC <- true\n\t<-dms.stoppedC\n\treturn nil\n}\n\n// StoreMessage is a part of the `store.MessageStore` implementation.\nfunc (dms *DummyMessageStore) StoreMessage(message *protocol.Message, nodeID uint8) (int, error) {\n\tpartitionName := message.Path.Partition()\n\tnextID, ts, err := dms.GenerateNextMsgID(partitionName, 0)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tmessage.ID = nextID\n\tmessage.Time = ts\n\tmessage.NodeID = nodeID\n\tdata := message.Bytes()\n\tif err := dms.Store(partitionName, nextID, data); err != nil {\n\t\treturn 0, err\n\t}\n\treturn len(data), nil\n}\n\n// Store is a part of the `store.MessageStore` implementation.\nfunc (dms *DummyMessageStore) Store(partition string, msgID uint64, msg []byte) error {\n\tdms.topicSequencesLock.Lock()\n\tdefer dms.topicSequencesLock.Unlock()\n\treturn dms.store(partition, msgID, msg)\n}\n\nfunc (dms *DummyMessageStore) store(partition string, msgId uint64, msg []byte) error {\n\tmaxID, err := dms.maxMessageID(partition)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif msgId > 1+maxID {\n\t\treturn fmt.Errorf(\"DummyMessageStore: Invalid message id for partition %v. Next id should be %v, but was %q\",\n\t\t\tpartition, 1+maxID, msgId)\n\t}\n\tdms.setID(partition, msgId)\n\treturn nil\n}\n\n// Fetch does nothing in this dummy implementation.\n// It is a part of the `store.MessageStore` implementation.\nfunc (dms *DummyMessageStore) Fetch(req *store.FetchRequest) {\n}\n\n// MaxMessageID is a part of the `store.MessageStore` implementation.\nfunc (dms *DummyMessageStore) MaxMessageID(partition string) (uint64, error) {\n\tdms.topicSequencesLock.Lock()\n\tdefer dms.topicSequencesLock.Unlock()\n\treturn dms.maxMessageID(partition)\n}\n\n// DoInTx is a part of the `store.MessageStore` implementation.\nfunc (dms *DummyMessageStore) DoInTx(partition string, fnToExecute func(maxMessageId uint64) error) error {\n\tdms.topicSequencesLock.Lock()\n\tdefer dms.topicSequencesLock.Unlock()\n\tmaxID, err := dms.maxMessageID(partition)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn fnToExecute(maxID)\n}\n\n// GenerateNextMsgID is a part of the `store.MessageStore` implementation.\nfunc (dms *DummyMessageStore) GenerateNextMsgID(partitionName string, nodeID uint8) (uint64, int64, error) {\n\tdms.topicSequencesLock.Lock()\n\tdefer dms.topicSequencesLock.Unlock()\n\tts := time.Now().Unix()\n\tmax, err := dms.maxMessageID(partitionName)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\tnext := max + 1\n\tdms.setID(partitionName, next)\n\treturn next, ts, nil\n}\n\nfunc (dms *DummyMessageStore) maxMessageID(partition string) (uint64, error) {\n\tsequenceValue, exist := dms.topicSequences[partition]\n\tif !exist {\n\t\tval, existInKVStore, err := dms.kvStore.Get(topicSchema, partition)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tif existInKVStore {\n\t\t\tsequenceValue, err = strconv.ParseUint(string(val), 10, 0)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t} else {\n\t\t\tsequenceValue = uint64(0)\n\t\t}\n\t}\n\tdms.topicSequences[partition] = sequenceValue\n\treturn sequenceValue, nil\n}\n\n// the id to a new value\nfunc (dms *DummyMessageStore) setID(partition string, id uint64) {\n\tdms.topicSequences[partition] = id\n}\n\nfunc (dms *DummyMessageStore) startSequenceSync() {\n\tlastSyncValues := make(map[string]uint64)\n\ttopicsToUpdate := []string{}\n\n\tshouldStop := false\n\tfor !shouldStop {\n\t\tselect {\n\t\tcase <-time.After(dms.idSyncDuration):\n\t\tcase <-dms.stopC:\n\t\t\tshouldStop = true\n\t\t}\n\n\t\tdms.topicSequencesLock.Lock()\n\t\ttopicsToUpdate = topicsToUpdate[:0]\n\t\tfor topic, seq := range dms.topicSequences {\n\t\t\tif lastSyncValues[topic] != seq {\n\t\t\t\ttopicsToUpdate = append(topicsToUpdate, topic)\n\t\t\t}\n\t\t}\n\t\tdms.topicSequencesLock.Unlock()\n\n\t\tfor _, topic := range topicsToUpdate {\n\t\t\tdms.topicSequencesLock.Lock()\n\t\t\tlatestValue := dms.topicSequences[topic]\n\t\t\tdms.topicSequencesLock.Unlock()\n\n\t\t\tlastSyncValues[topic] = latestValue\n\t\t\tdms.kvStore.Put(topicSchema, topic, []byte(strconv.FormatUint(latestValue, 10)))\n\t\t}\n\t}\n\tdms.stoppedC <- true\n}\n\nfunc (dms *DummyMessageStore) Check() error {\n\treturn nil\n}\n\nfunc (dms *DummyMessageStore) Partition(name string) (store.MessagePartition, error) {\n\treturn nil, nil\n}\n\nfunc (dms *DummyMessageStore) Partitions() ([]store.MessagePartition, error) {\n\treturn nil, nil\n}\n"
  },
  {
    "path": "server/store/dummystore/dummy_message_store_test.go",
    "content": "package dummystore\n\nimport (\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/smancke/guble/server/kvstore\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc Test_DummyMessageStore_IncreaseOnStore(t *testing.T) {\n\ta := assert.New(t)\n\n\tdms := New(kvstore.NewMemoryKVStore())\n\n\ta.Equal(uint64(0), fne(dms.MaxMessageID(\"partition\")))\n\ta.NoError(dms.Store(\"partition\", 1, []byte{}))\n\ta.NoError(dms.Store(\"partition\", 2, []byte{}))\n\ta.Equal(uint64(2), fne(dms.MaxMessageID(\"partition\")))\n}\n\nfunc Test_DummyMessageStore_ErrorOnWrongMessageId(t *testing.T) {\n\ta := assert.New(t)\n\n\tstore := New(kvstore.NewMemoryKVStore())\n\n\ta.Equal(uint64(0), fne(store.MaxMessageID(\"partition\")))\n\ta.Error(store.Store(\"partition\", 42, []byte{}))\n}\n\nfunc Test_DummyMessageStore_InitIdsFromKvStore(t *testing.T) {\n\ta := assert.New(t)\n\n\t// given: a kv-store with some values, and a dummy-message-store based on it\n\tkvStore := kvstore.NewMemoryKVStore()\n\tkvStore.Put(topicSchema, \"partition1\", []byte(\"42\"))\n\tkvStore.Put(topicSchema, \"partition2\", []byte(\"21\"))\n\tdms := New(kvStore)\n\n\t// then\n\ta.Equal(uint64(42), fne(dms.MaxMessageID(\"partition1\")))\n\ta.Equal(uint64(21), fne(dms.MaxMessageID(\"partition2\")))\n}\n\nfunc Test_DummyMessageStore_SyncIds(t *testing.T) {\n\ta := assert.New(t)\n\n\t// given: a store which syncs every 1ms\n\tkvStore := kvstore.NewMemoryKVStore()\n\tdms := New(kvStore)\n\tdms.idSyncDuration = time.Millisecond\n\n\ta.Equal(uint64(0), fne(dms.MaxMessageID(\"partition\")))\n\t_, exist, _ := kvStore.Get(topicSchema, \"partition\")\n\ta.False(exist)\n\n\t// and is started\n\tdms.Start()\n\tdefer dms.Stop()\n\n\t// when: we set an id and wait longer than 1ms\n\n\t// lock&unlock mutex here, because normal invocation of setId() in the code is done while already protected by mutex\n\tdms.topicSequencesLock.Lock()\n\tdms.setID(\"partition\", uint64(42))\n\tdms.topicSequencesLock.Unlock()\n\ttime.Sleep(time.Millisecond * 4)\n\n\t// the value is synced to the kv store\n\tvalue, exist, _ := kvStore.Get(topicSchema, \"partition\")\n\ta.True(exist)\n\ta.Equal([]byte(strconv.FormatUint(uint64(42), 10)), value)\n}\n\nfunc Test_DummyMessageStore_SyncIdsOnStop(t *testing.T) {\n\ta := assert.New(t)\n\n\t// given: a store which syncs nearly never\n\tkvStore := kvstore.NewMemoryKVStore()\n\tdms := New(kvStore)\n\tdms.idSyncDuration = time.Hour\n\n\t// and is started\n\tdms.Start()\n\n\t// when: we set an id\n\tdms.topicSequencesLock.Lock()\n\tdms.setID(\"partition\", uint64(42))\n\tdms.topicSequencesLock.Unlock()\n\n\t// then it is not synced after some wait\n\ttime.Sleep(time.Millisecond * 2)\n\t_, exist, _ := kvStore.Get(topicSchema, \"partition\")\n\ta.False(exist)\n\n\t// but\n\n\t// when: we stop the store\n\tdms.Stop()\n\n\t// then: the the value is synced to the kv store\n\tvalue, exist, _ := kvStore.Get(topicSchema, \"partition\")\n\ta.True(exist)\n\ta.Equal([]byte(strconv.FormatUint(uint64(42), 10)), value)\n}\n\nfunc fne(args ...interface{}) interface{} {\n\tif args[1] != nil {\n\t\tpanic(args[1])\n\t}\n\treturn args[0]\n}\n"
  },
  {
    "path": "server/store/fetch_request.go",
    "content": "package store\n\nimport (\n\t\"errors\"\n\t\"math\"\n\t\"sync\"\n)\n\nvar ErrRequestDone = errors.New(\"Fetch request is done\")\n\nconst (\n\tDirectionOneMessage FetchDirection = 0\n\tDirectionForward    FetchDirection = 1\n\tDirectionBackwards  FetchDirection = -1\n\n\t// TODO Bogdan decide the channel size and if should be customizable\n\tFetchBufferSize = 10\n)\n\ntype FetchDirection int\n\n// FetchedMessage is a struct containing a pair: guble Message and its ID.\ntype FetchedMessage struct {\n\tID      uint64\n\tMessage []byte\n}\n\n// FetchRequest is used for fetching messages in a MessageStore.\ntype FetchRequest struct {\n\tsync.RWMutex\n\n\t// Partition is the Store name to search for messages\n\tPartition string\n\n\t// StartID is the message sequence id to start\n\tStartID uint64\n\n\t// EndID is the message sequence id to finish. If  will not be used.\n\tEndID uint64\n\n\t// Direction has 3 possible values:\n\t// Direction == 0: Only the Message with StartId\n\t// Direction == 1: Fetch also the next Count Messages with a higher MessageId\n\t// Direction == -1: Fetch also the next Count Messages with a lower MessageId\n\tDirection FetchDirection\n\n\t// Count is the maximum number of messages to return\n\tCount int\n\n\t// MessageC is the channel to send the message back to the receiver\n\tMessageC chan *FetchedMessage\n\n\t// ErrorC is a channel if an error occurs\n\tErrorC chan error\n\n\t// StartC Through this channel , the total number or result\n\t// is returned, before sending the first message.\n\t// The Fetch() methods blocks on putting the number to the start channel.\n\tStartC chan int\n\n\tdone bool\n}\n\n// NewFetchRequest creates a new FetchRequest pointer initialized with provided values\n// if `count` is negative will be set to MaxInt32\nfunc NewFetchRequest(partition string, start, end uint64, direction FetchDirection, count int) *FetchRequest {\n\tif count < 0 {\n\t\tcount = math.MaxInt32\n\t}\n\treturn &FetchRequest{\n\t\tPartition: partition,\n\t\tStartID:   start,\n\t\tEndID:     end,\n\t\tDirection: direction,\n\n\t\tCount: count,\n\t}\n}\n\nfunc (fr *FetchRequest) Init() {\n\tfr.Lock()\n\tdefer fr.Unlock()\n\tfr.done = false\n\n\tfr.StartC = make(chan int)\n\tfr.MessageC = make(chan *FetchedMessage, FetchBufferSize)\n\tfr.ErrorC = make(chan error)\n}\n\n// Ready returns the count of messages that will be returned meaning that\n// the fetch is starting. It reads the number from the StartC channel.\nfunc (fr *FetchRequest) Ready() int {\n\treturn <-fr.StartC\n}\n\nfunc (fr *FetchRequest) Messages() <-chan *FetchedMessage {\n\treturn fr.MessageC\n}\n\nfunc (fr *FetchRequest) Errors() <-chan error {\n\treturn fr.ErrorC\n}\n\nfunc (fr *FetchRequest) Error(err error) {\n\tfr.ErrorC <- err\n}\n\nfunc (fr *FetchRequest) Push(id uint64, message []byte) {\n\tfr.PushFetchMessage(&FetchedMessage{id, message})\n}\n\nfunc (fr *FetchRequest) PushFetchMessage(fm *FetchedMessage) {\n\tfr.MessageC <- fm\n}\n\nfunc (fr *FetchRequest) PushError(err error) {\n\tfr.ErrorC <- err\n}\n\nfunc (fr *FetchRequest) IsDone() bool {\n\tfr.RLock()\n\tdefer fr.RUnlock()\n\treturn fr.done\n}\n\nfunc (fr *FetchRequest) Done() {\n\tfr.Lock()\n\tdefer fr.Unlock()\n\tfr.done = true\n\n\tclose(fr.MessageC)\n}\n"
  },
  {
    "path": "server/store/filestore/cache.go",
    "content": "package filestore\n\nimport (\n\t\"sync\"\n\n\t\"github.com/smancke/guble/server/store\"\n)\n\ntype cache struct {\n\tentries []*cacheEntry\n\tsync.RWMutex\n}\n\nfunc newCache() *cache {\n\tc := &cache{\n\t\tentries: make([]*cacheEntry, 0),\n\t}\n\treturn c\n}\n\nfunc (c *cache) length() int {\n\tc.RLock()\n\tdefer c.RUnlock()\n\n\treturn len(c.entries)\n}\n\nfunc (c *cache) add(entry *cacheEntry) {\n\tc.Lock()\n\tdefer c.Unlock()\n\n\tc.entries = append(c.entries, entry)\n}\n\ntype cacheEntry struct {\n\tmin, max uint64\n}\n\n// Contains returns true if the req.StartID is between the min and max\n// There is a chance the request messages to be found in this range\nfunc (entry *cacheEntry) Contains(req *store.FetchRequest) bool {\n\tif req.StartID == 0 {\n\t\treq.Direction = 1\n\t\treturn true\n\t}\n\tif req.Direction >= 0 {\n\t\treturn req.StartID >= entry.min && req.StartID <= entry.max\n\t}\n\treturn req.StartID >= entry.min\n}\n"
  },
  {
    "path": "server/store/filestore/index_list.go",
    "content": "package filestore\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\n\tlog \"github.com/Sirupsen/logrus\"\n\t\"github.com/smancke/guble/server/store\"\n)\n\n// IndexList a sorted list of fetch entries\ntype indexList struct {\n\titems []*index\n\n\tsync.RWMutex\n}\n\nfunc newIndexList(size int) *indexList {\n\treturn &indexList{items: make([]*index, 0, size)}\n}\n\nfunc (l *indexList) len() int {\n\tl.RLock()\n\tdefer l.RUnlock()\n\n\treturn len(l.items)\n}\n\nfunc (l *indexList) insertList(other *indexList) {\n\tl.insert(other.toSliceArray()...)\n}\n\n//Insert  adds in the sorted list a new element\nfunc (l *indexList) insert(items ...*index) {\n\tfor _, elem := range items {\n\t\tl.insertElem(elem)\n\t}\n}\n\nfunc (l *indexList) insertElem(elem *index) {\n\tl.Lock()\n\tdefer l.Unlock()\n\n\t// first element on list just append at the end\n\tif len(l.items) == 0 {\n\t\tl.items = append(l.items, elem)\n\t\treturn\n\t}\n\n\t// if the first element in list have a bigger id...insert new element on the start of list\n\tif l.items[0].id >= elem.id {\n\t\tl.items = append([]*index{elem}, l.items...)\n\t\treturn\n\t}\n\n\tif l.items[len(l.items)-1].id <= elem.id {\n\t\tl.items = append(l.items, elem)\n\t\treturn\n\t}\n\n\t//found the correct position to make an insertion sort\n\tfor i := 1; i <= len(l.items)-1; i++ {\n\t\tif l.items[i].id > elem.id {\n\t\t\tl.items = append(l.items[:i], append([]*index{elem}, l.items[i:]...)...)\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// Clear empties the current list\nfunc (l *indexList) clear() {\n\tl.items = make([]*index, 0)\n}\n\n// GetIndexEntryFromID performs a binarySearch retrieving the\n// true, the position and list and the actual entry if found\n// false , -1 ,nil if position is not found\n// search performs a binary search returning:\n// - `true` in case the item was found\n// - `position` position of the item\n// - `bestIndex` the closest index to the searched item if not found.\n// - `index` the index if found\nfunc (l *indexList) search(searchID uint64) (bool, int, int, *index) {\n\tl.RLock()\n\tdefer l.RUnlock()\n\n\tif len(l.items) == 0 {\n\t\treturn false, -1, -1, nil\n\t}\n\n\th := len(l.items) - 1\n\tf := 0\n\tbestIndex := f\n\n\tfor f <= h {\n\t\tmid := (h + f) / 2\n\t\tif l.items[mid].id == searchID {\n\t\t\treturn true, mid, bestIndex, l.items[mid]\n\t\t} else if l.items[mid].id < searchID {\n\t\t\tf = mid + 1\n\t\t} else {\n\t\t\th = mid - 1\n\t\t}\n\n\t\tif abs(l.items[mid].id, searchID) <= abs(l.items[bestIndex].id, searchID) {\n\t\t\tbestIndex = mid\n\t\t}\n\t}\n\n\treturn false, -1, bestIndex, nil\n}\n\n//Back retrieves the element with the biggest id or nil if list is empty\nfunc (l *indexList) back() *index {\n\tl.RLock()\n\tdefer l.RUnlock()\n\n\tif len(l.items) == 0 {\n\t\treturn nil\n\t}\n\n\treturn l.items[len(l.items)-1]\n}\n\n//Front retrieves the element with the smallest id or nil if list is empty\nfunc (l *indexList) front() *index {\n\tl.RLock()\n\tdefer l.RUnlock()\n\n\tif len(l.items) == 0 {\n\t\treturn nil\n\t}\n\n\treturn l.items[0]\n}\n\nfunc (l *indexList) toSliceArray() []*index {\n\tl.RLock()\n\tdefer l.RUnlock()\n\n\treturn l.items\n}\n\n//Front retrieves the element at the given index or nil if position is incorrect or list is empty\nfunc (l *indexList) get(pos int) *index {\n\tl.RLock()\n\tdefer l.RUnlock()\n\n\tif len(l.items) == 0 || pos < 0 || pos >= len(l.items) {\n\t\tlogger.WithFields(log.Fields{\n\t\t\t\"len\": len(l.items),\n\t\t\t\"pos\": pos,\n\t\t}).Info(\"Empty list or invalid index\")\n\t\treturn nil\n\t}\n\n\treturn l.items[pos]\n}\n\nfunc (l *indexList) mapWithPredicate(predicate func(elem *index, i int) error) error {\n\tl.RLock()\n\tdefer l.RUnlock()\n\n\tfor i, elem := range l.items {\n\t\tif err := predicate(elem, i); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (l *indexList) String() string {\n\tl.RLock()\n\tdefer l.RUnlock()\n\n\ts := \"\"\n\tfor i, elem := range l.items {\n\t\ts += fmt.Sprintf(\"[%d:%d %d] \", i, elem.id, elem.fileID)\n\t}\n\treturn s\n}\n\n// Contains returns true if given ID is between first and last item in the list\nfunc (l *indexList) contains(id uint64) bool {\n\tl.RLock()\n\tdefer l.RUnlock()\n\n\tif len(l.items) == 0 {\n\t\treturn false\n\t}\n\n\tif id == 0 {\n\t\treturn true\n\t}\n\n\treturn l.items[0].id <= id && id <= l.items[len(l.items)-1].id\n}\n\n// Extract will return a new list containing items requested by the FetchRequest from this list\nfunc (l *indexList) extract(req *store.FetchRequest) *indexList {\n\tpotentialEntries := newIndexList(0)\n\tfound, pos, lastPos, _ := l.search(req.StartID)\n\tcurrentPos := lastPos\n\tif found {\n\t\tcurrentPos = pos\n\t}\n\n\tfor potentialEntries.len() < req.Count && currentPos >= 0 && currentPos < l.len() {\n\t\telem := l.get(currentPos)\n\t\tlogger.WithFields(log.Fields{\n\t\t\t\"elem\":       *elem,\n\t\t\t\"currentPos\": currentPos,\n\t\t\t\"req\":        *req,\n\t\t}).Debug(\"Elem in retrieve\")\n\n\t\tif elem == nil {\n\t\t\tlogger.WithFields(log.Fields{\n\t\t\t\t\"pos\":     currentPos,\n\t\t\t\t\"l.Len\":   l.len(),\n\t\t\t\t\"len\":     potentialEntries.len(),\n\t\t\t\t\"startID\": req.StartID,\n\t\t\t\t\"count\":   req.Count,\n\t\t\t}).Error(\"Error in retrieving from list.Got nil entry\")\n\t\t\tbreak\n\t\t}\n\n\t\tpotentialEntries.insert(elem)\n\t\tcurrentPos += int(req.Direction)\n\n\t\t// // if we reach req.EndID than we break\n\t\tif req.EndID > 0 && elem.id >= req.EndID {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn potentialEntries\n}\n\nfunc abs(m1, m2 uint64) uint64 {\n\tif m1 > m2 {\n\t\treturn m1 - m2\n\t}\n\n\treturn m2 - m1\n}\n"
  },
  {
    "path": "server/store/filestore/index_list_test.go",
    "content": "package filestore\n\nimport (\n\t\"math/rand\"\n\t\"testing\"\n\n\t\"github.com/Sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc Test_SortedListSanity(t *testing.T) {\n\n\ta := assert.New(t)\n\tlist := newIndexList(1000)\n\n\tgeneratedIds := make([]uint64, 0, 11)\n\n\tfor i := 0; i < 11; i++ {\n\t\tmsgID := uint64(rand.Intn(50))\n\t\tgeneratedIds = append(generatedIds, msgID)\n\n\t\tentry := &index{\n\t\t\tsize:   3,\n\t\t\tid:     uint64(msgID),\n\t\t\toffset: 128,\n\t\t}\n\n\t\tlist.insert(entry)\n\t}\n\tmin := uint64(200)\n\tmax := uint64(0)\n\n\tfor _, id := range generatedIds {\n\t\tif max < id {\n\t\t\tmax = id\n\t\t}\n\t\tif min > id {\n\t\t\tmin = id\n\t\t}\n\t\tfound, pos, _, foundEntry := list.search(id)\n\t\ta.True(found)\n\t\ta.Equal(foundEntry.id, id)\n\t\ta.True(pos >= 0 && pos <= len(generatedIds))\n\t}\n\n\tlogrus.WithField(\"generatedIds\", generatedIds).Info(\"IdS\")\n\n\ta.Equal(min, list.front().id)\n\ta.Equal(max, list.back().id)\n\n\tfound, pos, bestIndex, foundEntry := list.search(uint64(46))\n\ta.False(found, \"Element should not be found since is a number greater than the random generated upper limit\")\n\ta.Equal(pos, -1)\n\ta.Nil(foundEntry)\n\tlogrus.WithField(\"bestIndex\", bestIndex).Info(\"Searching for closest position\")\n\n\ta.Equal(list.front().id, list.get(0).id, \"First element should contain the smallest element\")\n\ta.Nil(list.get(-1), \"Trying to get an invalid index will return nil\")\n\n\tlist.clear()\n\ta.Nil(list.front())\n\ta.Nil(list.back())\n\n}\n"
  },
  {
    "path": "server/store/filestore/logger.go",
    "content": "package filestore\n\nimport (\n\tlog \"github.com/Sirupsen/logrus\"\n)\n\nvar logger = log.WithField(\"module\", \"filestore\")\n"
  },
  {
    "path": "server/store/filestore/message_partition.go",
    "content": "package filestore\n\nimport (\n\t\"encoding/binary\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/smancke/guble/server/store\"\n\n\t\"io\"\n\n\tlog \"github.com/Sirupsen/logrus\"\n)\n\nvar (\n\tmagicNumber       = []byte{42, 249, 180, 108, 82, 75, 222, 182}\n\tfileFormatVersion = []byte{1}\n\tmessagesPerFile   = uint64(10000)\n\tindexEntrySize    = 20\n)\n\nconst (\n\tgubleNodeIdBits    = 3\n\tsequenceBits       = 12\n\tgubleNodeIdShift   = sequenceBits\n\ttimestampLeftShift = sequenceBits + gubleNodeIdBits\n\tgubleEpoch         = 1467714505012\n)\n\ntype index struct {\n\tid     uint64\n\toffset uint64\n\tsize   uint32\n\tfileID int\n}\n\ntype messagePartition struct {\n\tbasedir               string\n\tname                  string\n\tappendFile            *os.File\n\tindexFile             *os.File\n\tappendFilePosition    uint64\n\tmaxMessageID          uint64\n\tsequenceNumber        uint64\n\ttotalNumberOfMessages uint64\n\tentriesCount          uint64\n\tlist                  *indexList\n\tfileCache             *cache\n\n\tsync.RWMutex\n}\n\nfunc newMessagePartition(basedir string, storeName string) (*messagePartition, error) {\n\tp := &messagePartition{\n\t\tbasedir:   basedir,\n\t\tname:      storeName,\n\t\tlist:      newIndexList(int(messagesPerFile)),\n\t\tfileCache: newCache(),\n\t}\n\treturn p, p.initialize()\n}\n\nfunc (p *messagePartition) Name() string {\n\treturn p.name\n}\n\nfunc (p *messagePartition) MaxMessageID() uint64 {\n\tp.RLock()\n\tdefer p.RUnlock()\n\n\treturn p.maxMessageID\n}\n\nfunc (p *messagePartition) Count() uint64 {\n\tp.RLock()\n\tdefer p.RUnlock()\n\n\treturn p.totalNumberOfMessages\n}\n\nfunc (p *messagePartition) initialize() error {\n\tp.Lock()\n\tdefer p.Unlock()\n\n\t// reset the cache entries\n\tp.fileCache = newCache()\n\terr := p.readIdxFiles()\n\tif err != nil {\n\t\tlogger.WithField(\"err\", err).Error(\"MessagePartition error on scanFiles\")\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// Returns the start messages ids for all available message files\n// in a sorted list\nfunc (p *messagePartition) readIdxFiles() error {\n\tallFiles, err := ioutil.ReadDir(p.basedir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar indexFilenames []string\n\tfor _, fileInfo := range allFiles {\n\t\tif strings.HasPrefix(fileInfo.Name(), p.name+\"-\") && strings.HasSuffix(fileInfo.Name(), \".idx\") {\n\t\t\tfileIDString := filepath.Join(p.basedir, fileInfo.Name())\n\t\t\tlogger.WithField(\"name\", fileIDString).Info(\"Index name\")\n\t\t\tindexFilenames = append(indexFilenames, fileIDString)\n\t\t}\n\t}\n\n\t// if no .idx file are found.. there is nothing to load\n\tif len(indexFilenames) == 0 {\n\t\tlogger.Info(\"No .idx files found\")\n\t\treturn nil\n\t}\n\n\t//load the filecache from all the files\n\tlogger.WithFields(log.Fields{\n\t\t\"filenames\":  indexFilenames,\n\t\t\"totalFiles\": len(indexFilenames),\n\t}).Info(\"Found files\")\n\n\tfor i := 0; i < len(indexFilenames)-1; i++ {\n\t\tcEntry, err := readCacheEntryFromIdxFile(indexFilenames[i])\n\t\tif err != nil {\n\t\t\tlogger.WithFields(log.Fields{\n\t\t\t\t\"idxFilename\": indexFilenames[i],\n\t\t\t\t\"err\":         err,\n\t\t\t}).Error(\"Error loading existing .idxFile\")\n\t\t\treturn err\n\t\t}\n\t\t//add to total number of messages per partition\n\t\tp.totalNumberOfMessages += messagesPerFile\n\n\t\t// put entry in file cache\n\t\tp.fileCache.add(cEntry)\n\t\tlogger.\n\t\t\tWithField(\"entries\", p.fileCache.entries).\n\t\t\tWithField(\"filename\", indexFilenames[i]).\n\t\t\tError(\"Entries\")\n\n\t\t// check the message id's for max value\n\t\tif cEntry.max >= p.maxMessageID {\n\t\t\tp.maxMessageID = cEntry.max\n\t\t}\n\t}\n\n\t// read the  idx file with   biggest id and load in the sorted cache\n\tif err := p.loadLastIndexList(indexFilenames[len(indexFilenames)-1]); err != nil {\n\t\tlogger.WithFields(log.Fields{\n\t\t\t\"idxFilename\": indexFilenames[(len(indexFilenames) - 1)],\n\t\t\t\"err\":         err,\n\t\t}).Error(\"Error loading last .idx file\")\n\t\treturn err\n\t}\n\t//add the last part\n\tp.totalNumberOfMessages += uint64(p.list.len())\n\tback := p.list.back()\n\n\tif back != nil && back.id >= p.maxMessageID {\n\t\tp.maxMessageID = back.id\n\t}\n\n\treturn nil\n}\n\nfunc (p *messagePartition) closeAppendFiles() error {\n\tif p.appendFile != nil {\n\t\tif err := p.appendFile.Close(); err != nil {\n\t\t\tif p.indexFile != nil {\n\t\t\t\tdefer p.indexFile.Close()\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tp.appendFile = nil\n\t}\n\n\tif p.indexFile != nil {\n\t\terr := p.indexFile.Close()\n\t\tp.indexFile = nil\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// readCacheEntryFromIdxFile  reads the first and last entry from a idx file which should be sorted\nfunc readCacheEntryFromIdxFile(filename string) (entry *cacheEntry, err error) {\n\tentriesInIndex, err := calculateNoEntries(filename)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfile, err := os.Open(filename)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer file.Close()\n\n\tmin, _, _, err := readIndexEntry(file, 0)\n\tif err != nil {\n\t\treturn\n\t}\n\tmax, _, _, err := readIndexEntry(file, int64((entriesInIndex-1)*uint64(indexEntrySize)))\n\tif err != nil {\n\t\treturn\n\t}\n\n\tentry = &cacheEntry{min, max}\n\treturn\n}\n\nfunc (p *messagePartition) createNextAppendFiles() error {\n\tfilename := p.composeMsgFilenameForPosition(uint64(p.fileCache.length()))\n\tlogger.WithField(\"filename\", filename).Info(\"Creating next append files\")\n\n\tappendfile, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// write file header on new files\n\tif stat, _ := appendfile.Stat(); stat.Size() == 0 {\n\t\tp.appendFilePosition = uint64(stat.Size())\n\n\t\t_, err = appendfile.Write(magicNumber)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = appendfile.Write(fileFormatVersion)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tindexfile, errIndex := os.OpenFile(p.composeIdxFilenameForPosition(uint64(p.fileCache.length())), os.O_RDWR|os.O_CREATE, 0666)\n\tif errIndex != nil {\n\t\tdefer appendfile.Close()\n\t\tdefer os.Remove(appendfile.Name())\n\t\treturn err\n\t}\n\n\tp.appendFile = appendfile\n\tp.indexFile = indexfile\n\tstat, err := appendfile.Stat()\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.appendFilePosition = uint64(stat.Size())\n\n\treturn nil\n}\n\nfunc (p *messagePartition) generateNextMsgID(nodeID uint8) (uint64, int64, error) {\n\tp.Lock()\n\tdefer p.Unlock()\n\n\t//Get the local Timestamp\n\tcurrTime := time.Now()\n\t// timestamp in Seconds will be return to client\n\ttimestamp := currTime.Unix()\n\n\t//Use the unixNanoTimestamp for generating id\n\tnanoTimestamp := currTime.UnixNano()\n\n\tif nanoTimestamp < gubleEpoch {\n\t\terr := fmt.Errorf(\"Clock is moving backwards. Rejecting requests until %d.\", timestamp)\n\t\treturn 0, 0, err\n\t}\n\n\tid := (uint64(nanoTimestamp-gubleEpoch) << timestampLeftShift) |\n\t\t(uint64(nodeID) << gubleNodeIdShift) | p.sequenceNumber\n\n\tp.sequenceNumber++\n\n\tlogger.WithFields(log.Fields{\n\t\t\"id\":                  id,\n\t\t\"messagePartition\":    p.basedir,\n\t\t\"localSequenceNumber\": p.sequenceNumber,\n\t\t\"currentNode\":         nodeID,\n\t}).Debug(\"Generated id\")\n\n\treturn id, timestamp, nil\n}\n\nfunc (p *messagePartition) Close() error {\n\tp.Lock()\n\tdefer p.Unlock()\n\n\treturn p.closeAppendFiles()\n}\n\nfunc (p *messagePartition) DoInTx(fnToExecute func(maxMessageId uint64) error) error {\n\tp.Lock()\n\tdefer p.Unlock()\n\treturn fnToExecute(p.maxMessageID)\n}\n\nfunc (p *messagePartition) Store(msgID uint64, msg []byte) error {\n\tp.Lock()\n\tdefer p.Unlock()\n\n\treturn p.store(msgID, msg)\n}\n\nfunc (p *messagePartition) store(messageID uint64, data []byte) error {\n\tif p.entriesCount == messagesPerFile ||\n\t\tp.appendFile == nil ||\n\t\tp.indexFile == nil {\n\n\t\tlogger.WithFields(log.Fields{\n\t\t\t\"msgId\":        messageID,\n\t\t\t\"entriesCount\": p.entriesCount,\n\t\t\t\"fileCache\":    p.fileCache,\n\t\t}).Debug(\"store\")\n\n\t\tif err := p.closeAppendFiles(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif p.entriesCount == messagesPerFile {\n\n\t\t\tlogger.WithFields(log.Fields{\n\t\t\t\t\"msgId\":        messageID,\n\t\t\t\t\"entriesCount\": p.entriesCount,\n\t\t\t}).Info(\"Dumping current file\")\n\n\t\t\t//sort the indexFile\n\t\t\terr := p.rewriteSortedIdxFile(p.composeIdxFilenameForPosition(uint64(p.fileCache.length())))\n\t\t\tif err != nil {\n\t\t\t\tlogger.WithError(err).Error(\"Error dumping file\")\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t//Add items in the filecache\n\t\t\tp.fileCache.add(&cacheEntry{\n\t\t\t\tmin: p.list.front().id,\n\t\t\t\tmax: p.list.back().id,\n\t\t\t})\n\n\t\t\t//clear the current sorted cache\n\t\t\tp.list.clear()\n\t\t\tp.entriesCount = 0\n\t\t}\n\n\t\tif err := p.createNextAppendFiles(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// write the message size and the message id: 32 bit and 64 bit, so 12 bytes\n\tsizeAndID := make([]byte, 12)\n\tbinary.LittleEndian.PutUint32(sizeAndID, uint32(len(data)))\n\tbinary.LittleEndian.PutUint64(sizeAndID[4:], messageID)\n\n\tif _, err := p.appendFile.Write(sizeAndID); err != nil {\n\t\treturn err\n\t}\n\n\t// write the message\n\tif _, err := p.appendFile.Write(data); err != nil {\n\t\treturn err\n\t}\n\n\t// write the index entry to the index file\n\tmessageOffset := p.appendFilePosition + uint64(len(sizeAndID))\n\terr := writeIndexEntry(p.indexFile, messageID, messageOffset, uint32(len(data)), p.entriesCount)\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.entriesCount++\n\tp.totalNumberOfMessages++\n\n\tlogger.WithFields(log.Fields{\n\t\t\"p.noOfEntriesInIndexFile\": p.entriesCount,\n\t\t\"msgID\":                    messageID,\n\t\t\"msgSize\":                  uint32(len(data)),\n\t\t\"msgOffset\":                messageOffset,\n\t\t\"filename\":                 p.indexFile.Name(),\n\t}).Debug(\"Wrote in indexFile\")\n\n\t//create entry for l\n\te := &index{\n\t\tid:     messageID,\n\t\toffset: messageOffset,\n\t\tsize:   uint32(len(data)),\n\t\tfileID: p.fileCache.length(),\n\t}\n\tp.list.insert(e)\n\n\tp.appendFilePosition += uint64(len(sizeAndID) + len(data))\n\n\tif messageID > p.maxMessageID {\n\t\tp.maxMessageID = messageID\n\t}\n\n\treturn nil\n}\n\n// Fetch fetches a set of messages\nfunc (p *messagePartition) Fetch(req *store.FetchRequest) {\n\tle := logger.WithFields(log.Fields{\n\t\t\"partition\": req.Partition,\n\t\t\"startID\":   req.StartID,\n\t\t\"endID\":     req.EndID,\n\t\t\"Count\":     req.Count,\n\t})\n\tle.Debug(\"Fetching\")\n\n\tgo func() {\n\t\tfetchList, err := p.calculateFetchList(req)\n\n\t\tif err != nil {\n\t\t\tlog.WithField(\"err\", err).Error(\"Error calculating list\")\n\t\t\treq.ErrorC <- err\n\t\t\treturn\n\t\t}\n\t\treq.StartC <- fetchList.len()\n\n\t\terr = p.fetchByFetchlist(fetchList, req)\n\n\t\tif err != nil {\n\t\t\tle.WithField(\"err\", err).Error(\"Error calculating list\")\n\t\t\treq.Error(err)\n\t\t\treturn\n\t\t}\n\t\treq.Done()\n\t}()\n}\n\n// fetchByFetchlist fetches the messages in the supplied fetchlist and sends them to the message-channel\nfunc (p *messagePartition) fetchByFetchlist(fetchList *indexList, req *store.FetchRequest) error {\n\treturn fetchList.mapWithPredicate(func(index *index, _ int) error {\n\t\tif req.IsDone() {\n\t\t\treturn store.ErrRequestDone\n\t\t}\n\n\t\tfilename := p.composeMsgFilenameForPosition(uint64(index.fileID))\n\t\tfile, err := os.Open(filename)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer file.Close()\n\n\t\tmsg := make([]byte, index.size, index.size)\n\t\t_, err = file.ReadAt(msg, int64(index.offset))\n\t\tif err != nil {\n\t\t\tlogger.WithFields(log.Fields{\n\t\t\t\t\"err\":    err,\n\t\t\t\t\"offset\": index.offset,\n\t\t\t}).Error(\"Error ReadAt\")\n\t\t\treturn err\n\t\t}\n\n\t\treq.Push(index.id, msg)\n\t\treturn nil\n\t})\n}\n\n// calculateFetchList returns a list of fetchEntry records for all messages in the fetch request.\nfunc (p *messagePartition) calculateFetchList(req *store.FetchRequest) (*indexList, error) {\n\tif req.Direction == 0 {\n\t\treq.Direction = 1\n\t}\n\n\tpotentialEntries := newIndexList(0)\n\n\t// reading from IndexFiles\n\t// TODO: fix  prev when EndID logic will be done\n\t// prev specifies if we found anything in the previous list, in which case\n\t// it is possible the items to continue in the next list\n\tprev := false\n\n\tp.fileCache.RLock()\n\n\tfor i, fce := range p.fileCache.entries {\n\t\tif fce.Contains(req) || (prev && potentialEntries.len() < req.Count) {\n\t\t\tprev = true\n\n\t\t\tl, err := p.loadIndexList(i)\n\t\t\tif err != nil {\n\t\t\t\tlogger.WithError(err).Info(\"Error loading idx file in memory\")\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tpotentialEntries.insert(l.extract(req).toSliceArray()...)\n\t\t} else {\n\t\t\tprev = false\n\t\t}\n\t}\n\n\t// Read from current cached value (the idx file which size is smaller than MESSAGE_PER_FILE\n\tif p.list.contains(req.StartID) || (prev && potentialEntries.len() < req.Count) {\n\t\tpotentialEntries.insert(p.list.extract(req).toSliceArray()...)\n\t}\n\n\t// Currently potentialEntries contains a potentials IDs from any files and\n\t// from in memory. From this will select only Count.\n\tfetchList := potentialEntries.extract(req)\n\n\tp.fileCache.RUnlock()\n\n\treturn fetchList, nil\n}\n\nfunc (p *messagePartition) rewriteSortedIdxFile(filename string) error {\n\tlogger.WithFields(log.Fields{\n\t\t\"filename\": filename,\n\t}).Info(\"Dumping Sorted list\")\n\n\tfile, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0666)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\n\tlastID := uint64(0)\n\tfor i := 0; i < p.list.len(); i++ {\n\t\titem := p.list.get(i)\n\n\t\tif lastID >= item.id {\n\t\t\tlogger.WithFields(log.Fields{\n\t\t\t\t\"err\":      err,\n\t\t\t\t\"filename\": filename,\n\t\t\t}).Error(\"Sorted list is not sorted\")\n\n\t\t\treturn err\n\t\t}\n\t\tlastID = item.id\n\n\t\terr := writeIndexEntry(file, item.id, item.offset, item.size, uint64(i))\n\t\tlogger.WithFields(log.Fields{\n\t\t\t\"curMsgId\": item.id,\n\t\t\t\"err\":      err,\n\t\t\t\"pos\":      i,\n\t\t\t\"filename\": file.Name(),\n\t\t}).Debug(\"Wrote while dumpSortedIndexFile\")\n\n\t\tif err != nil {\n\t\t\tlogger.WithField(\"err\", err).Error(\"Error writing indexfile in sorted way.\")\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n// readIndexEntry reads from a .idx file from the given `position` the msgID msgOffset and msgSize\nfunc readIndexEntry(file *os.File, position int64) (uint64, uint64, uint32, error) {\n\toffsetBuffer := make([]byte, indexEntrySize)\n\tif _, err := file.ReadAt(offsetBuffer, position); err != nil {\n\t\tlogger.WithFields(log.Fields{\n\t\t\t\"err\":      err,\n\t\t\t\"file\":     file.Name(),\n\t\t\t\"indexPos\": position,\n\t\t}).Error(\"Error reading index entry\")\n\t\treturn 0, 0, 0, err\n\t}\n\n\tid := binary.LittleEndian.Uint64(offsetBuffer)\n\toffset := binary.LittleEndian.Uint64(offsetBuffer[8:])\n\tsize := binary.LittleEndian.Uint32(offsetBuffer[16:])\n\treturn id, offset, size, nil\n}\n\n// writeIndexEntry write in a .idx file to  the given `pos` the msgIDm msgOffset and msgSize\nfunc writeIndexEntry(w io.WriterAt, id uint64, offset uint64, size uint32, pos uint64) error {\n\tposition := int64(uint64(indexEntrySize) * pos)\n\toffsetBuffer := make([]byte, indexEntrySize)\n\n\tbinary.LittleEndian.PutUint64(offsetBuffer, id)\n\tbinary.LittleEndian.PutUint64(offsetBuffer[8:], offset)\n\tbinary.LittleEndian.PutUint32(offsetBuffer[16:], size)\n\n\tif _, err := w.WriteAt(offsetBuffer, position); err != nil {\n\t\tlogger.WithFields(log.Fields{\n\t\t\t\"err\":      err,\n\t\t\t\"position\": position,\n\t\t\t\"id\":       id,\n\t\t}).Error(\"Error writing index entry\")\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// calculateNoEntries reads the idx file with name `filename` and will calculate how many entries are\nfunc calculateNoEntries(filename string) (uint64, error) {\n\tstat, err := os.Stat(filename)\n\tif err != nil {\n\t\tlogger.WithField(\"err\", err).Error(\"Stat failed\")\n\t\treturn 0, err\n\t}\n\tentriesInIndex := uint64(stat.Size() / int64(indexEntrySize))\n\treturn entriesInIndex, nil\n}\n\n// loadLastIndexFile will construct the current Sorted List for fetch entries which corresponds to the idx file with the biggest name\nfunc (p *messagePartition) loadLastIndexList(filename string) error {\n\tlogger.WithField(\"filename\", filename).Info(\"Loading last index file\")\n\n\tl, err := p.loadIndexList(p.fileCache.length())\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"Error loading last index filename\")\n\t\treturn err\n\t}\n\n\tp.list = l\n\tp.entriesCount = uint64(l.len())\n\n\treturn nil\n}\n\n// loadIndexFile will read a file and will return a sorted list for fetchEntries\nfunc (p *messagePartition) loadIndexList(fileID int) (*indexList, error) {\n\tfilename := p.composeIdxFilenameForPosition(uint64(fileID))\n\tl := newIndexList(int(messagesPerFile))\n\tlogger.WithField(\"filename\", filename).Debug(\"loadIndexFile\")\n\n\tentriesInIndex, err := calculateNoEntries(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfile, err := os.Open(filename)\n\tif err != nil {\n\t\tlogger.WithField(\"err\", err).Error(\"os.Open failed\")\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\n\tfor i := uint64(0); i < entriesInIndex; i++ {\n\t\tid, offset, size, err := readIndexEntry(file, int64(i*uint64(indexEntrySize)))\n\t\tlogger.WithFields(log.Fields{\n\t\t\t\"offset\": offset,\n\t\t\t\"size\":   size,\n\t\t\t\"id\":     id,\n\t\t\t\"err\":    err,\n\t\t}).Debug(\"readIndexEntry\")\n\n\t\tif err != nil {\n\t\t\tlogger.WithField(\"err\", err).Error(\"Read error\")\n\t\t\treturn nil, err\n\t\t}\n\n\t\te := &index{\n\t\t\tid:     id,\n\t\t\tsize:   size,\n\t\t\toffset: offset,\n\t\t\tfileID: fileID,\n\t\t}\n\t\tl.insert(e)\n\t\tlogger.WithField(\"len\", l.len()).Debug(\"loadIndexFile\")\n\t}\n\treturn l, nil\n}\n\nfunc (p *messagePartition) composeMsgFilenameForPosition(value uint64) string {\n\treturn filepath.Join(p.basedir, fmt.Sprintf(\"%s-%020d.msg\", p.name, value))\n}\n\nfunc (p *messagePartition) composeIdxFilenameForPosition(value uint64) string {\n\treturn filepath.Join(p.basedir, fmt.Sprintf(\"%s-%020d.idx\", p.name, value))\n}\n"
  },
  {
    "path": "server/store/filestore/message_partition_robustness_test.go",
    "content": "package filestore\n\nimport (\n\tlog \"github.com/Sirupsen/logrus\"\n\t\"github.com/smancke/guble/server/store\"\n\t\"github.com/smancke/guble/testutil\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"io/ioutil\"\n\t\"math\"\n\t\"os\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc Test_MessagePartition_forConcurrentWriteAndReads(t *testing.T) {\n\ttestutil.SkipIfShort(t)\n\t// testutil.PprofDebug()\n\ta := assert.New(t)\n\tdir, _ := ioutil.TempDir(\"\", \"guble_partition_store_test\")\n\tdefer os.RemoveAll(dir)\n\n\tstore, _ := newMessagePartition(dir, \"myMessages\")\n\n\tn := 2000 * 100\n\tnReaders := 7\n\n\twriterDone := make(chan bool)\n\tgo messagePartitionWriter(a, store, n, writerDone)\n\n\treaderDone := make(chan bool)\n\tfor i := 1; i <= nReaders; i++ {\n\t\tgo messagePartitionReader(\"reader\"+strconv.Itoa(i), a, store, n, readerDone)\n\t}\n\n\tselect {\n\tcase <-writerDone:\n\tcase <-time.After(time.Second * 30):\n\t\ta.Fail(\"writer timed out\")\n\t}\n\n\ttimeout := time.After(time.Second * 30)\n\tfor i := 0; i < nReaders; i++ {\n\t\tselect {\n\t\tcase <-readerDone:\n\t\tcase <-timeout:\n\t\t\ta.Fail(\"reader timed out\")\n\t\t}\n\t}\n}\n\nfunc messagePartitionWriter(a *assert.Assertions, store *messagePartition, n int, done chan bool) {\n\tfor i := 1; i <= n; i++ {\n\t\tmsg := []byte(\"Hello \" + strconv.Itoa(i))\n\t\ta.NoError(store.Store(uint64(i), msg))\n\t}\n\tdone <- true\n}\n\nfunc messagePartitionReader(name string, a *assert.Assertions, mStore *messagePartition, n int, done chan bool) {\n\tlastReadMessage := 0\n\n\tfor lastReadMessage < n {\n\t\tmsgC := make(chan *store.FetchedMessage, 10)\n\t\terrorC := make(chan error)\n\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"module\":      \"testing\",\n\t\t\t\"name\":        name,\n\t\t\t\"lastReadMsg\": lastReadMessage + 1,\n\t\t}).Debug(\"Start fetching\")\n\n\t\tmStore.Fetch(&store.FetchRequest{\n\t\t\tPartition: \"myMessages\",\n\t\t\tStartID:   uint64(lastReadMessage + 1),\n\t\t\tDirection: 1,\n\t\t\tCount:     math.MaxInt32,\n\t\t\tMessageC:  msgC,\n\t\t\tErrorC:    errorC,\n\t\t\tStartC:    make(chan int, 1),\n\t\t})\n\n\tFETCH:\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase msgAndID, open := <-msgC:\n\t\t\t\tif !open {\n\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\"module\":      \"testing\",\n\t\t\t\t\t\t\"name\":        name,\n\t\t\t\t\t\t\"lastReadMsg\": lastReadMessage,\n\t\t\t\t\t}).Debug(\"Stop fetching\")\n\t\t\t\t\tbreak FETCH\n\t\t\t\t}\n\t\t\t\ta.Equal(lastReadMessage+1, int(msgAndID.ID), \"Reader: \"+name)\n\t\t\t\tlastReadMessage = int(msgAndID.ID)\n\t\t\tcase err := <-errorC:\n\t\t\t\ta.Fail(\"received error\", err.Error())\n\t\t\t\t<-done\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"module\":      \"testing\",\n\t\t\"name\":        name,\n\t\t\"lastReadMsg\": lastReadMessage,\n\t}).Debug(\"Ready got id\")\n\n\tdone <- true\n}\n"
  },
  {
    "path": "server/store/filestore/message_partition_test.go",
    "content": "package filestore\n\nimport (\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/smancke/guble/server/store\"\n\n\t\"errors\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestFileMessageStore_GenerateNextMsgId(t *testing.T) {\n\ta := assert.New(t)\n\n\tdir, _ := ioutil.TempDir(\"\", \"guble_message_partition_test\")\n\tdefer os.RemoveAll(dir)\n\tmStore, err := newMessagePartition(dir, \"node1\")\n\ta.Nil(err)\n\n\tvar generatedIDs []uint64\n\tlastID := uint64(0)\n\n\tfor i := 0; i < 1000; i++ {\n\t\tid, _, err := mStore.generateNextMsgID(1)\n\t\tgeneratedIDs = append(generatedIDs, id)\n\t\ta.True(id > lastID, \"Ids should be monotonic\")\n\t\tlastID = id\n\t\ta.Nil(err)\n\t}\n}\n\nfunc TestFileMessageStore_GenerateNextMsgIdMultipleNodes(t *testing.T) {\n\ta := assert.New(t)\n\n\tdir, _ := ioutil.TempDir(\"\", \"guble_message_partition_test\")\n\tdefer os.RemoveAll(dir)\n\tmStore, err := newMessagePartition(dir, \"node1\")\n\ta.Nil(err)\n\n\tdir2, _ := ioutil.TempDir(\"\", \"guble_message_partition_test2\")\n\tdefer os.RemoveAll(dir2)\n\tmStore2, err := newMessagePartition(dir2, \"node1\")\n\ta.Nil(err)\n\n\tvar generatedIDs []uint64\n\tlastID := uint64(0)\n\n\tfor i := 0; i < 1000; i++ {\n\t\tid, _, err := mStore.generateNextMsgID(1)\n\t\tid2, _, err := mStore2.generateNextMsgID(2)\n\t\ta.True(id2 > id, \"Ids should be monotonic\")\n\t\tgeneratedIDs = append(generatedIDs, id)\n\t\tgeneratedIDs = append(generatedIDs, id2)\n\t\ttime.Sleep(1 * time.Millisecond)\n\t\ta.True(id > lastID, \"Ids should be monotonic\")\n\t\ta.True(id2 > lastID, \"Ids should be monotonic\")\n\t\tlastID = id2\n\t\ta.Nil(err)\n\t}\n\n\tfor i := 0; i < len(generatedIDs)-1; i++ {\n\t\tif generatedIDs[i] >= generatedIDs[i+1] {\n\t\t\ta.FailNow(\"Not Sorted\")\n\t\t}\n\t}\n}\n\nfunc Test_MessagePartition_loadFiles(t *testing.T) {\n\ta := assert.New(t)\n\t// allow five messages per file\n\tmessagesPerFile = uint64(5)\n\n\tdir, _ := ioutil.TempDir(\"\", \"guble_message_partition_test\")\n\tdefer os.RemoveAll(dir)\n\tmStore, _ := newMessagePartition(dir, \"myMessages\")\n\n\tmsgData := []byte(\"aaaaaaaaaa\")             // 10 bytes message\n\ta.NoError(mStore.Store(uint64(3), msgData)) // stored offset 21, size: 10\n\ta.NoError(mStore.Store(uint64(4), msgData)) // stored offset 21+10+12=43\n\n\ta.NoError(mStore.Store(uint64(10), msgData)) // stored offset 43+22=65\n\n\ta.NoError(mStore.Store(uint64(9), msgData)) // stored offset 65+22=87\n\ta.NoError(mStore.Store(uint64(5), msgData)) // stored offset 87+22=109\n\n\t// here second file will start\n\ta.NoError(mStore.Store(uint64(8), msgData))  // stored offset 21\n\ta.NoError(mStore.Store(uint64(15), msgData)) // stored offset 43\n\ta.NoError(mStore.Store(uint64(13), msgData)) // stored offset 65\n\n\ta.NoError(mStore.Store(uint64(22), msgData)) // stored offset 87\n\ta.NoError(mStore.Store(uint64(23), msgData)) // stored offset 109\n\n\t// third file\n\ta.NoError(mStore.Store(uint64(24), msgData)) // stored offset 21\n\ta.NoError(mStore.Store(uint64(26), msgData)) // stored offset 43\n\n\ta.NoError(mStore.Store(uint64(30), msgData)) // stored offset 65\n\ta.Equal(uint64(13), mStore.Count())\n\n\ta.NoError(mStore.Close())\n\n\terr := mStore.initialize()\n\ta.NoError(err)\n\n\tcEntry, err := readCacheEntryFromIdxFile(path.Join(dir, \"myMessages-00000000000000000000.idx\"))\n\ta.Equal(uint64(3), cEntry.min)\n\ta.Equal(uint64(10), cEntry.max)\n\ta.NoError(err)\n\n\ta.Equal(uint64(26), mStore.Count())\n\n}\n\nfunc Test_MessagePartition_correctIdAfterRestart(t *testing.T) {\n\ta := assert.New(t)\n\tdir, _ := ioutil.TempDir(\"\", \"guble_message_partition_test\")\n\tdefer os.RemoveAll(dir)\n\tmStore, _ := newMessagePartition(dir, \"myMessages\")\n\n\ta.NoError(mStore.Store(uint64(1), []byte(\"aaaaaaaaaa\")))\n\ta.NoError(mStore.Store(uint64(2), []byte(\"aaaaaaaaaa\")))\n\ta.Equal(uint64(2), mStore.MaxMessageID())\n\ta.NoError(mStore.Close())\n\ta.Equal(uint64(2), mStore.Count())\n\n\tnewMStore, err := newMessagePartition(dir, \"myMessages\")\n\ta.NoError(err)\n\ta.Equal(uint64(2), newMStore.MaxMessageID())\n\ta.Equal(uint64(2), newMStore.Count())\n}\n\nfunc Benchmark_Storing_HelloWorld_Messages(b *testing.B) {\n\ta := assert.New(b)\n\tdir, _ := ioutil.TempDir(\"\", \"guble_message_partition_test\")\n\tdefer os.RemoveAll(dir)\n\tmStore, _ := newMessagePartition(dir, \"myMessages\")\n\n\tb.ResetTimer()\n\tfor i := 1; i <= b.N; i++ {\n\t\ta.NoError(mStore.Store(uint64(i), []byte(\"Hello World\")))\n\t}\n\ta.NoError(mStore.Close())\n\tb.StopTimer()\n}\n\nfunc Benchmark_Storing_1Kb_Messages(b *testing.B) {\n\ta := assert.New(b)\n\tdir, _ := ioutil.TempDir(\"\", \"guble_message_partition_test\")\n\tdefer os.RemoveAll(dir)\n\tmStore, _ := newMessagePartition(dir, \"myMessages\")\n\n\tmessage := make([]byte, 1024)\n\tfor i := range message {\n\t\tmessage[i] = 'a'\n\t}\n\n\tb.ResetTimer()\n\tfor i := 1; i <= b.N; i++ {\n\t\ta.NoError(mStore.Store(uint64(i), message))\n\t}\n\ta.NoError(mStore.Close())\n\tb.StopTimer()\n}\n\nfunc Benchmark_Storing_1MB_Messages(b *testing.B) {\n\ta := assert.New(b)\n\tdir, _ := ioutil.TempDir(\"\", \"guble_message_partition_test\")\n\tdefer os.RemoveAll(dir)\n\tmStore, _ := newMessagePartition(dir, \"myMessages\")\n\n\tmessage := make([]byte, 1024*1024)\n\tfor i := range message {\n\t\tmessage[i] = 'a'\n\t}\n\n\tb.ResetTimer()\n\tfor i := 1; i <= b.N; i++ {\n\t\ta.NoError(mStore.Store(uint64(i), message))\n\t}\n\ta.NoError(mStore.Close())\n\tb.StopTimer()\n}\n\nfunc Test_calculateFetchList(t *testing.T) {\n\t// allow five messages per file\n\tmessagesPerFile = uint64(5)\n\n\tmsgData := []byte(\"aaaaaaaaaa\") // 10 bytes message\n\n\ta := assert.New(t)\n\tdir, _ := ioutil.TempDir(\"\", \"guble_message_partition_test\")\n\tdefer os.RemoveAll(dir)\n\n\tmStore, _ := newMessagePartition(dir, \"myMessages\")\n\n\t// File header: MAGIC_NUMBER + FILE_NUMBER_VERSION = 9 bytes in the file\n\t// For each stored message there is a 12 bytes write that contains the msgID and size\n\n\ta.NoError(mStore.Store(uint64(3), msgData)) // stored offset 21, size: 10\n\ta.NoError(mStore.Store(uint64(4), msgData)) // stored offset 21+10+12=43\n\n\ta.NoError(mStore.Store(uint64(10), msgData)) // stored offset 43+22=65\n\n\ta.NoError(mStore.Store(uint64(9), msgData)) // stored offset 65+22=87\n\ta.NoError(mStore.Store(uint64(5), msgData)) // stored offset 87+22=109\n\n\t// here second file will start\n\ta.NoError(mStore.Store(uint64(8), msgData))  // stored offset 21\n\ta.NoError(mStore.Store(uint64(15), msgData)) // stored offset 43\n\ta.NoError(mStore.Store(uint64(13), msgData)) // stored offset 65\n\n\ta.NoError(mStore.Store(uint64(22), msgData)) // stored offset 87\n\ta.NoError(mStore.Store(uint64(23), msgData)) // stored offset 109\n\n\t// third file\n\ta.NoError(mStore.Store(uint64(24), msgData)) // stored offset 21\n\ta.NoError(mStore.Store(uint64(26), msgData)) // stored offset 43\n\n\ta.NoError(mStore.Store(uint64(30), msgData)) // stored offset 65\n\n\tdefer a.NoError(mStore.Close())\n\n\ttestCases := []struct {\n\t\tdescription     string\n\t\treq             store.FetchRequest\n\t\texpectedResults indexList\n\t}{\n\t\t{`direct match`,\n\t\t\tstore.FetchRequest{StartID: 3, Direction: 0, Count: 1},\n\t\t\tindexList{\n\t\t\t\titems: []*index{{3, uint64(21), 10, 0}}, // messageId, offset, size, fileId\n\t\t\t},\n\t\t},\n\t\t{`direct match in second file`,\n\t\t\tstore.FetchRequest{StartID: 8, Direction: 0, Count: 1},\n\t\t\tindexList{\n\t\t\t\titems: []*index{{8, uint64(21), 10, 1}}, // messageId, offset, size, fileId,\n\t\t\t},\n\t\t},\n\t\t{`direct match in second file, not first position`,\n\t\t\tstore.FetchRequest{StartID: 13, Direction: 0, Count: 1},\n\t\t\tindexList{\n\t\t\t\titems: []*index{{13, uint64(65), 10, 1}}, // messageId, offset, size, fileId,\n\t\t\t},\n\t\t},\n\t\t// TODO this is caused by hasStartID() functions.This will be done when implementing the EndID logic\n\t\t// {`next entry matches`,\n\t\t// \tstore.FetchRequest{StartID: 1, Direction: 0, Count: 1},\n\t\t// \tSortedIndexList{\n\t\t// \t\t{3, uint64(21), 10, 0}, // messageId, offset, size, fileId\n\t\t// \t},\n\t\t// },\n\t\t{`entry before matches`,\n\t\t\tstore.FetchRequest{StartID: 5, Direction: -1, Count: 2},\n\t\t\tindexList{\n\t\t\t\titems: []*index{\n\t\t\t\t\t{4, uint64(43), 10, 0},  // messageId, offset, size, fileId\n\t\t\t\t\t{5, uint64(109), 10, 0}, // messageId, offset, size, fileId\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{`backward, no match`,\n\t\t\tstore.FetchRequest{StartID: 1, Direction: -1, Count: 1},\n\t\t\tindexList{},\n\t\t},\n\t\t{`forward, no match (out of files)`,\n\t\t\tstore.FetchRequest{StartID: 99999999999, Direction: 1, Count: 1},\n\t\t\tindexList{},\n\t\t},\n\t\t{`forward, no match (after last id in last file)`,\n\t\t\tstore.FetchRequest{StartID: 31, Direction: 1, Count: 1},\n\t\t\tindexList{},\n\t\t},\n\t\t{`forward, overlapping files`,\n\t\t\tstore.FetchRequest{StartID: 9, Direction: 1, Count: 3},\n\t\t\tindexList{\n\t\t\t\titems: []*index{\n\t\t\t\t\t{9, uint64(87), 10, 0},  // messageId, offset, size, fileId\n\t\t\t\t\t{10, uint64(65), 10, 0}, // messageId, offset, size, fileId\n\t\t\t\t\t{13, uint64(65), 10, 1}, // messageId, offset, size, fileId\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{`backward, overlapping files`,\n\t\t\tstore.FetchRequest{StartID: 26, Direction: -1, Count: 4},\n\t\t\tindexList{\n\t\t\t\titems: []*index{\n\t\t\t\t\t// {15, uint64(43), 10, 1},  // messageId, offset, size, fileId\n\t\t\t\t\t{22, uint64(87), 10, 1},  // messageId, offset, size, fileId\n\t\t\t\t\t{23, uint64(109), 10, 1}, // messageId, offset, size, fileId\n\t\t\t\t\t{24, uint64(21), 10, 2},  // messageId, offset, size, fileId\n\t\t\t\t\t{26, uint64(43), 10, 2},  // messageId, offset, size, fileId\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{`forward, over more then 2 files`,\n\t\t\tstore.FetchRequest{StartID: 5, Direction: 1, Count: 10},\n\t\t\tindexList{\n\t\t\t\titems: []*index{\n\t\t\t\t\t{5, uint64(109), 10, 0},  // messageId, offset, size, fileId\n\t\t\t\t\t{8, uint64(21), 10, 1},   // messageId, offset, size, fileId\n\t\t\t\t\t{9, uint64(87), 10, 0},   // messageId, offset, size, fileId\n\t\t\t\t\t{10, uint64(65), 10, 0},  // messageId, offset, size, fileId\n\t\t\t\t\t{13, uint64(65), 10, 1},  // messageId, offset, size, fileId\n\t\t\t\t\t{15, uint64(43), 10, 1},  // messageId, offset, size, fileId\n\t\t\t\t\t{22, uint64(87), 10, 1},  // messageId, offset, size, fileId\n\t\t\t\t\t{23, uint64(109), 10, 1}, // messageId, offset, size, fileId\n\t\t\t\t\t{24, uint64(21), 10, 2},  // messageId, offset, size, fileId\n\t\t\t\t\t{26, uint64(43), 10, 2},  // messageId, offset, size, fileId\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, testcase := range testCases {\n\t\ttestcase.req.Partition = \"myMessages\"\n\t\tfetchEntries, err := mStore.calculateFetchList(&testcase.req)\n\t\ta.NoError(err, \"Tescase: \"+testcase.description)\n\t\ta.True(matchSortedList(t, testcase.expectedResults, *fetchEntries), \"Tescase: \"+testcase.description)\n\t}\n}\n\nfunc matchSortedList(t *testing.T, expected, actual indexList) bool {\n\tif !assert.Equal(t, expected.len(), actual.len(), \"Invalid length\") {\n\t\treturn false\n\t}\n\n\terr := expected.mapWithPredicate(func(elem *index, i int) error {\n\t\ta := actual.get(i)\n\t\tassert.Equal(t, *elem, *a)\n\t\tif elem.id != a.id ||\n\t\t\telem.offset != a.offset ||\n\t\t\telem.size != a.size ||\n\t\t\telem.fileID != a.fileID {\n\t\t\treturn errors.New(\"Element not equal!\")\n\t\t}\n\t\treturn nil\n\t})\n\treturn assert.NoError(t, err)\n}\n\nfunc Test_Partition_Fetch(t *testing.T) {\n\ta := assert.New(t)\n\n\t// allow five messages per file\n\tmessagesPerFile = uint64(5)\n\n\tmsgData := []byte(\"1111111111\")  // 10 bytes message\n\tmsgData2 := []byte(\"2222222222\") // 10 bytes message\n\tmsgData3 := []byte(\"3333333333\") // 10 bytes message\n\n\tdir, _ := ioutil.TempDir(\"\", \"guble_message_partition_test\")\n\tdefer os.RemoveAll(dir)\n\n\tmStore, _ := newMessagePartition(dir, \"myMessages\")\n\n\t// File header: MAGIC_NUMBER + FILE_NUMBER_VERSION = 9 bytes in the file\n\t// For each stored message there is a 12 bytes write that contains the msgID and size\n\n\ta.NoError(mStore.Store(uint64(3), msgData)) // stored offset 21, size: 10\n\ta.NoError(mStore.Store(uint64(4), msgData)) // stored offset 21+10+12=43\n\n\ta.NoError(mStore.Store(uint64(10), msgData)) // stored offset 43+22=65\n\n\ta.NoError(mStore.Store(uint64(9), msgData2)) // stored offset 65+22=87\n\ta.NoError(mStore.Store(uint64(5), msgData3)) // stored offset 87+22=109\n\n\t// here second file will start\n\ta.NoError(mStore.Store(uint64(8), msgData2))  // stored offset 21\n\ta.NoError(mStore.Store(uint64(15), msgData))  // stored offset 43\n\ta.NoError(mStore.Store(uint64(13), msgData3)) // stored offset 65\n\n\ta.NoError(mStore.Store(uint64(22), msgData)) // stored offset 87\n\ta.NoError(mStore.Store(uint64(23), msgData)) // stored offset 109\n\n\t// third file\n\ta.NoError(mStore.Store(uint64(24), msgData)) // stored offset 21\n\ta.NoError(mStore.Store(uint64(26), msgData)) // stored offset 43\n\n\ta.NoError(mStore.Store(uint64(30), msgData)) // stored offset 65\n\n\tdefer a.NoError(mStore.Close())\n\n\ttestCases := []struct {\n\t\tdescription     string\n\t\treq             store.FetchRequest\n\t\texpectedResults []string\n\t}{\n\t\t{`direct match`,\n\t\t\tstore.FetchRequest{StartID: 3, Direction: 0, Count: 1},\n\t\t\t[]string{\"1111111111\"},\n\t\t},\n\t\t{`direct match in second file`,\n\t\t\tstore.FetchRequest{StartID: 8, Direction: 0, Count: 1},\n\t\t\t[]string{\"2222222222\"},\n\t\t},\n\t\t{`next entry matches`,\n\t\t\tstore.FetchRequest{StartID: 13, Direction: 0, Count: 1},\n\t\t\t[]string{\"3333333333\"},\n\t\t},\n\t\t{`entry before matches`,\n\t\t\tstore.FetchRequest{StartID: 5, Direction: -1, Count: 2},\n\t\t\t[]string{\"1111111111\", \"3333333333\"},\n\t\t},\n\t\t{`backward, no match`,\n\t\t\tstore.FetchRequest{StartID: 1, Direction: -1, Count: 1},\n\t\t\t[]string{},\n\t\t},\n\t\t{`forward, no match (out of files)`,\n\t\t\tstore.FetchRequest{StartID: 99999999999, Direction: 1, Count: 1},\n\t\t\t[]string{},\n\t\t},\n\t\t{`forward, no match (after last id in last file)`,\n\t\t\tstore.FetchRequest{StartID: mStore.maxMessageID + uint64(8), Direction: 1, Count: 1},\n\t\t\t[]string{},\n\t\t},\n\t\t{`forward, overlapping files`,\n\t\t\tstore.FetchRequest{StartID: 9, Direction: 1, Count: 3},\n\t\t\t[]string{\"2222222222\", \"1111111111\", \"3333333333\"},\n\t\t},\n\t\t{`forward, over more then 2 files`,\n\t\t\tstore.FetchRequest{StartID: 5, Direction: 1, Count: 10},\n\t\t\t[]string{\"3333333333\", \"2222222222\", \"2222222222\", \"1111111111\", \"3333333333\", \"1111111111\", \"1111111111\", \"1111111111\", \"1111111111\", \"1111111111\"},\n\t\t},\n\t\t{`backward, overlapping files`,\n\t\t\tstore.FetchRequest{StartID: 26, Direction: -1, Count: 4},\n\t\t\t[]string{\"1111111111\", \"1111111111\", \"1111111111\", \"1111111111\"},\n\t\t},\n\t\t{`backward, all messages`,\n\t\t\tstore.FetchRequest{StartID: uint64(100), Direction: -1, Count: 100},\n\t\t\t[]string{\"1111111111\", \"1111111111\", \"3333333333\", \"2222222222\", \"2222222222\", \"1111111111\", \"3333333333\", \"1111111111\", \"1111111111\", \"1111111111\", \"1111111111\", \"1111111111\", \"1111111111\"},\n\t\t},\n\t}\n\tfor _, testcase := range testCases {\n\t\ttestcase.req.Partition = \"myMessages\"\n\t\ttestcase.req.MessageC = make(chan *store.FetchedMessage)\n\t\ttestcase.req.ErrorC = make(chan error)\n\t\ttestcase.req.StartC = make(chan int)\n\n\t\tmessages := []string{}\n\n\t\tmStore.Fetch(&testcase.req)\n\n\t\tselect {\n\t\tcase numberOfResults := <-testcase.req.StartC:\n\t\t\ta.Equal(len(testcase.expectedResults), numberOfResults)\n\t\tcase <-time.After(time.Second):\n\t\t\ta.Fail(\"timeout\")\n\t\t\treturn\n\t\t}\n\n\tloop:\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase msg, open := <-testcase.req.MessageC:\n\t\t\t\tif !open {\n\t\t\t\t\tbreak loop\n\t\t\t\t}\n\t\t\t\tmessages = append(messages, string(msg.Message))\n\t\t\tcase err := <-testcase.req.ErrorC:\n\t\t\t\ta.Fail(err.Error())\n\t\t\t\tbreak loop\n\t\t\tcase <-time.After(time.Second):\n\t\t\t\ta.Fail(\"timeout\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\ta.Equal(testcase.expectedResults, messages, \"Tescase: \"+testcase.description)\n\t}\n}\n\nfunc TestFilenameGeneration(t *testing.T) {\n\ta := assert.New(t)\n\n\tmStore := &messagePartition{\n\t\tbasedir:   \"/foo/bar/\",\n\t\tname:      \"myMessages\",\n\t\tfileCache: newCache(),\n\t}\n\n\ta.Equal(\"/foo/bar/myMessages-00000000000000000000.msg\", mStore.composeMsgFilenameForPosition(uint64(mStore.fileCache.length())))\n\ta.Equal(\"/foo/bar/myMessages-00000000000000000042.idx\", mStore.composeIdxFilenameForPosition(42))\n\ta.Equal(\"/foo/bar/myMessages-00000000000000000000.idx\", mStore.composeIdxFilenameForPosition(0))\n\ta.Equal(fmt.Sprintf(\"/foo/bar/myMessages-%020d.idx\", messagesPerFile), mStore.composeIdxFilenameForPosition(messagesPerFile))\n}\n"
  },
  {
    "path": "server/store/filestore/message_store.go",
    "content": "// Package filestore is a filesystem-based implementation of the MessageStore interface.\npackage filestore\n\nimport (\n\t\"errors\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\n\tlog \"github.com/Sirupsen/logrus\"\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/store\"\n)\n\n// FileMessageStore is a struct used by the filesystem-based implementation of the MessageStore interface.\n// It holds the base directory, a map of messagePartitions etc.\ntype FileMessageStore struct {\n\tpartitions map[string]*messagePartition\n\tbasedir    string\n\tmutex      sync.RWMutex\n}\n\n// New returns a new FileMessageStore.\nfunc New(basedir string) *FileMessageStore {\n\treturn &FileMessageStore{\n\t\tpartitions: make(map[string]*messagePartition),\n\t\tbasedir:    basedir,\n\t}\n}\n\n// MaxMessageID is a part of the `store.MessageStore` implementation.\nfunc (fms *FileMessageStore) MaxMessageID(partition string) (uint64, error) {\n\tp, err := fms.Partition(partition)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn p.MaxMessageID(), nil\n}\n\n// Stop the FileMessageStore.\n// Implements the service.stopable interface.\nfunc (fms *FileMessageStore) Stop() error {\n\tfms.mutex.Lock()\n\tdefer fms.mutex.Unlock()\n\n\tlogger.Info(\"Stopping\")\n\n\tvar returnError error\n\tfor key, partition := range fms.partitions {\n\t\tif err := partition.Close(); err != nil {\n\t\t\treturnError = err\n\t\t\tlogger.WithFields(log.Fields{\n\t\t\t\t\"key\": key,\n\t\t\t\t\"err\": err,\n\t\t\t}).Error(\"Error on closing message store partition\")\n\t\t}\n\t\tdelete(fms.partitions, key)\n\t}\n\treturn returnError\n}\n\n// GenerateNextMsgID is a part of the `store.MessageStore` implementation.\nfunc (fms *FileMessageStore) GenerateNextMsgID(partitionName string, nodeID uint8) (uint64, int64, error) {\n\tp, err := fms.Partition(partitionName)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\treturn p.(*messagePartition).generateNextMsgID(nodeID)\n}\n\n// StoreMessage is a part of the `store.MessageStore` implementation.\nfunc (fms *FileMessageStore) StoreMessage(message *protocol.Message, nodeID uint8) (int, error) {\n\tpartitionName := message.Path.Partition()\n\n\t// If nodeID is zero means we are running in standalone more, otherwise\n\t// if the message has no nodeID it means it was received by this node\n\tif nodeID == 0 || message.NodeID == 0 {\n\t\tid, ts, err := fms.GenerateNextMsgID(partitionName, nodeID)\n\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).Error(\"Generation of id failed\")\n\t\t\treturn 0, err\n\t\t}\n\n\t\tmessage.ID = id\n\t\tmessage.Time = ts\n\t\tmessage.NodeID = nodeID\n\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"generatedID\":   id,\n\t\t\t\"generatedTime\": message.Time,\n\t\t}).Debug(\"Locally generated ID for message\")\n\t}\n\n\tdata := message.Bytes()\n\n\tif err := fms.Store(partitionName, message.ID, message.Bytes()); err != nil {\n\t\tlogger.\n\t\t\tWithError(err).WithField(\"partition\", partitionName).\n\t\t\tError(\"Error storing locally generated  messagein partition\")\n\t\treturn 0, err\n\t}\n\n\tlogger.WithFields(log.Fields{\n\t\t\"id\":            message.ID,\n\t\t\"ts\":            message.Time,\n\t\t\"partition\":     partitionName,\n\t\t\"messageUserID\": message.UserID,\n\t\t\"nodeID\":        nodeID,\n\t}).Debug(\"Stored message\")\n\n\treturn len(data), nil\n}\n\n// Store stores a message within a partition.\n// It is a part of the `store.MessageStore` implementation.\nfunc (fms *FileMessageStore) Store(partition string, msgID uint64, msg []byte) error {\n\tp, err := fms.Partition(partition)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn p.Store(msgID, msg)\n}\n\n// Fetch asynchronously fetches a set of messages defined by the fetch request.\n// It is a part of the `store.MessageStore` implementation.\nfunc (fms *FileMessageStore) Fetch(req *store.FetchRequest) {\n\tp, err := fms.Partition(req.Partition)\n\tif err != nil {\n\t\treq.ErrorC <- err\n\t\treturn\n\t}\n\tp.Fetch(req)\n}\n\n// DoInTx is a part of the `store.MessageStore` implementation.\nfunc (fms *FileMessageStore) DoInTx(partition string, fnToExecute func(maxMessageId uint64) error) error {\n\tp, err := fms.Partition(partition)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn p.DoInTx(fnToExecute)\n}\n\n// Partitions will walk the filesystem and return all message partitions\n// TODO Bogdan This is not required anymore as the store already read the partitions\n// and saved them in the cacheEntry for the store. Retrieve from there if possible\nfunc (fms *FileMessageStore) Partitions() (partitions []store.MessagePartition, err error) {\n\tentries, err := ioutil.ReadDir(fms.basedir)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"Error reading partitions\")\n\t\treturn nil, err\n\t}\n\n\tfor _, entry := range entries {\n\t\tif entry.IsDir() {\n\t\t\tpartition, err := fms.Partition(entry.Name())\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tpartitions = append(partitions, partition)\n\t\t}\n\t}\n\treturn\n}\n\nfunc (fms *FileMessageStore) Partition(partition string) (store.MessagePartition, error) {\n\tfms.mutex.Lock()\n\tdefer fms.mutex.Unlock()\n\n\tpartitionStore, exist := fms.partitions[partition]\n\tif !exist {\n\t\tdir := path.Join(fms.basedir, partition)\n\t\tif _, errStat := os.Stat(dir); errStat != nil {\n\t\t\tif os.IsNotExist(errStat) {\n\t\t\t\tif errMkdir := os.MkdirAll(dir, 0700); errMkdir != nil {\n\t\t\t\t\tlogger.WithError(errMkdir).Error(\"partitionStore\")\n\t\t\t\t\treturn nil, errMkdir\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlogger.WithError(errStat).Error(\"partitionStore\")\n\t\t\t\treturn nil, errStat\n\t\t\t}\n\t\t}\n\t\tvar err error\n\t\tpartitionStore, err = newMessagePartition(dir, partition)\n\t\tif err != nil {\n\t\t\tlogger.WithField(\"err\", err).Error(\"partitionStore\")\n\t\t\treturn nil, err\n\t\t}\n\t\tfms.partitions[partition] = partitionStore\n\t}\n\treturn partitionStore, nil\n}\n\n// Check returns if available storage space is still above a certain threshold.\nfunc (fms *FileMessageStore) Check() error {\n\tvar stat syscall.Statfs_t\n\n\tsyscall.Statfs(fms.basedir, &stat)\n\n\t// available space in bytes = available blocks * size per block\n\tfreeSpace := stat.Bavail * uint64(stat.Bsize)\n\t// total space in bytes = total system blocks * size per block\n\ttotalSpace := stat.Blocks * uint64(stat.Bsize)\n\n\tusedSpacePercentage := 1 - (float64(freeSpace) / float64(totalSpace))\n\n\tif usedSpacePercentage > 0.95 {\n\t\terrorMessage := \"Storage is almost full\"\n\t\tlogger.WithFields(log.Fields{\n\t\t\t\"percentage\": usedSpacePercentage,\n\t\t}).Warn(errorMessage)\n\t\treturn errors.New(errorMessage)\n\t}\n\n\treturn nil\n}\n\n// extractPartitionName returns the partition name from a filepath\n// The files would have this format /basepath/partition-number.extenstion\n// if filepath is not in the right format empty string is returned\nfunc extractPartitionName(p string) string {\n\ts := strings.SplitN(path.Base(p), \"-\", 2)\n\tif len(s) <= 2 {\n\t\treturn \"\"\n\t}\n\treturn s[0]\n}\n"
  },
  {
    "path": "server/store/filestore/message_store_test.go",
    "content": "package filestore\n\nimport (\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/smancke/guble/server/store\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc Test_Fetch(t *testing.T) {\n\ta := assert.New(t)\n\tdir, _ := ioutil.TempDir(\"\", \"guble_message_store_test\")\n\t//defer os.RemoveAll(dir)\n\n\t// when i store a message\n\tmStore := New(dir)\n\ta.NoError(mStore.Store(\"p1\", uint64(1), []byte(\"aaaaaaaaaa\")))\n\ta.NoError(mStore.Store(\"p1\", uint64(2), []byte(\"bbbbbbbbbb\")))\n\ta.NoError(mStore.Store(\"p2\", uint64(1), []byte(\"1111111111\")))\n\ta.NoError(mStore.Store(\"p2\", uint64(2), []byte(\"2222222222\")))\n\n\ttestCases := []struct {\n\t\tdescription     string\n\t\treq             store.FetchRequest\n\t\texpectedResults []string\n\t}{\n\t\t{`match in partition 1`,\n\t\t\tstore.FetchRequest{Partition: \"p1\", StartID: 2, Count: 1},\n\t\t\t[]string{\"bbbbbbbbbb\"},\n\t\t},\n\t\t{`match in partition 2`,\n\t\t\tstore.FetchRequest{Partition: \"p2\", StartID: 2, Count: 1},\n\t\t\t[]string{\"2222222222\"},\n\t\t},\n\t}\n\n\tfor _, testcase := range testCases {\n\t\ttestcase.req.MessageC = make(chan *store.FetchedMessage)\n\t\ttestcase.req.ErrorC = make(chan error)\n\t\ttestcase.req.StartC = make(chan int)\n\n\t\tmessages := []string{}\n\n\t\tmStore.Fetch(&testcase.req)\n\n\t\tselect {\n\t\tcase numberOfResults := <-testcase.req.StartC:\n\t\t\ta.Equal(len(testcase.expectedResults), numberOfResults)\n\t\tcase <-time.After(time.Second):\n\t\t\ta.Fail(\"timeout\")\n\t\t\treturn\n\t\t}\n\n\tloop:\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase msg, open := <-testcase.req.MessageC:\n\t\t\t\tif !open {\n\t\t\t\t\tbreak loop\n\t\t\t\t}\n\t\t\t\tmessages = append(messages, string(msg.Message))\n\t\t\tcase err := <-testcase.req.ErrorC:\n\t\t\t\ta.Fail(err.Error())\n\t\t\t\tbreak loop\n\t\t\tcase <-time.After(time.Second):\n\t\t\t\ta.Fail(\"timeout\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\ta.Equal(testcase.expectedResults, messages, \"Tescase: \"+testcase.description)\n\t}\n}\n\nfunc Test_MessageStore_Close(t *testing.T) {\n\ta := assert.New(t)\n\tdir, _ := ioutil.TempDir(\"\", \"guble_message_store_test\")\n\t//defer os.RemoveAll(dir)\n\n\t// when i store a message\n\tstore := New(dir)\n\ta.NoError(store.Store(\"p1\", uint64(1), []byte(\"aaaaaaaaaa\")))\n\ta.NoError(store.Store(\"p2\", uint64(1), []byte(\"1111111111\")))\n\n\ta.Equal(2, len(store.partitions))\n\n\ta.NoError(store.Stop())\n\n\ta.Equal(0, len(store.partitions))\n}\n\nfunc Test_MaxMessageId(t *testing.T) {\n\ta := assert.New(t)\n\tdir, _ := ioutil.TempDir(\"\", \"guble_message_store_test\")\n\t//defer os.RemoveAll(dir)\n\texpectedMaxID := 2\n\n\t// when i store a message\n\tstore := New(dir)\n\ta.NoError(store.Store(\"p1\", uint64(1), []byte(\"aaaaaaaaaa\")))\n\ta.NoError(store.Store(\"p1\", uint64(expectedMaxID), []byte(\"bbbbbbbbbb\")))\n\n\tmaxID, err := store.MaxMessageID(\"p1\")\n\ta.Nil(err, \"No error should be received for partition p1\")\n\ta.Equal(maxID, uint64(expectedMaxID), fmt.Sprintf(\"MaxId should be [%d]\", expectedMaxID))\n}\n\nfunc Test_MaxMessageIdError(t *testing.T) {\n\ta := assert.New(t)\n\tstore := New(\"/TestDir\")\n\n\t_, err := store.MaxMessageID(\"p2\")\n\ta.NotNil(err)\n}\n\nfunc Test_MessagePartitionReturningError(t *testing.T) {\n\ta := assert.New(t)\n\n\tstore := New(\"/TestDir\")\n\t_, err := store.Partition(\"p1\")\n\ta.NotNil(err)\n\tfmt.Println(err)\n\n\tstore2 := New(\"/\")\n\t_, err2 := store2.Partition(\"p1\")\n\tfmt.Println(err2)\n}\n\nfunc Test_FetchWithError(t *testing.T) {\n\ta := assert.New(t)\n\tmStore := New(\"/TestDir\")\n\n\tchanCallBack := make(chan error, 1)\n\taFetchRequest := store.FetchRequest{Partition: \"p1\", StartID: 2, Count: 1, ErrorC: chanCallBack}\n\tmStore.Fetch(&aFetchRequest)\n\terr := <-aFetchRequest.ErrorC\n\ta.NotNil(err)\n}\n\nfunc Test_StoreWithError(t *testing.T) {\n\ta := assert.New(t)\n\tmStore := New(\"/TestDir\")\n\n\terr := mStore.Store(\"p1\", uint64(1), []byte(\"124151qfas\"))\n\ta.NotNil(err)\n}\n\nfunc Test_DoInTx(t *testing.T) {\n\ta := assert.New(t)\n\tdir, _ := ioutil.TempDir(\"\", \"guble_message_store_test\")\n\tmStore := New(dir)\n\ta.NoError(mStore.Store(\"p1\", uint64(1), []byte(\"aaaaaaaaaa\")))\n\n\terr := mStore.DoInTx(\"p1\", func(maxId uint64) error {\n\t\treturn nil\n\t})\n\ta.Nil(err)\n}\n\nfunc Test_DoInTxError(t *testing.T) {\n\ta := assert.New(t)\n\tmStore := New(\"/TestDir\")\n\n\terr := mStore.DoInTx(\"p2\", nil)\n\ta.NotNil(err)\n}\n\nfunc Test_Check(t *testing.T) {\n\ta := assert.New(t)\n\tdir, _ := ioutil.TempDir(\"\", \"guble_message_store_test\")\n\tmStore := New(dir)\n\ta.NoError(mStore.Store(\"p1\", uint64(1), []byte(\"aaaaaaaaaa\")))\n\n\terr := mStore.Check()\n\ta.Nil(err)\n}\n\n// func Test_Partitions(t *testing.T) {\n// \t// Store multiple partitions then recreate the store and see if they are picked up\n// \ta := assert.New(t)\n// \tmsg := []byte(\"test message data\")\n\n// \tdir, err := ioutil.TempDir(\"\", \"guble_message_store_test\")\n// \ta.NoError(err)\n// \tstore := New(dir)\n\n// \ta.NoError(store.Store(\"p1\", uint64(2), msg))\n// \ta.NoError(store.Store(\"p2\", uint64(2), msg))\n// \ta.NoError(store.Store(\"p3\", uint64(2), msg))\n\n// \tstore2 := New(dir)\n// \tpartitions, err := store2.Partitions()\n// \ta.NoError(err)\n// \ta.Equal(3, len(partitions))\n// \ta.Equal(\"p1\", partitions[0].Name)\n// \ta.Equal(\"p2\", partitions[1].Name)\n// \ta.Equal(\"p3\", partitions[2].Name)\n\n// }\n"
  },
  {
    "path": "server/store/store.go",
    "content": "package store\n\nimport \"github.com/smancke/guble/protocol\"\n\n// MessageStore is an interface for a persistence backend storing topics.\ntype MessageStore interface {\n\n\t// Store a message within a partition.\n\t// The message id must be equal to MaxMessageId +1.\n\t// So the caller has to maintain the consistence between\n\t// fetching an id and storing the message.\n\tStore(partition string, messageID uint64, data []byte) error\n\n\t// Generates a new ID for the message if it's new and stores it\n\t// Returns the size of the new message or error\n\t// Takes the message and cluster node ID as parameters.\n\tStoreMessage(*protocol.Message, uint8) (int, error)\n\n\t// Fetch fetches a set of messages.\n\t// The results, as well as errors are communicated asynchronously using\n\t// the channels, supplied by the FetchRequest.\n\tFetch(*FetchRequest)\n\n\t// MaxMessageId returns the highest message id for a particular partition\n\tMaxMessageID(partition string) (uint64, error)\n\n\t// DoInTx executes the supplied function within the locking context of the message partition.\n\t// This ensures, that wile the code is executed, no change to the supplied maxMessageId can occur.\n\t// The error result if the fnToExecute or an error while locking will be returned by DoInTx.\n\tDoInTx(partition string, fnToExecute func(uint64) error) error\n\n\t// GenerateNextMsgId generates a new message ID based on a timestamp in a strictly monotonically order\n\tGenerateNextMsgID(partition string, nodeID uint8) (uint64, int64, error)\n\n\tPartition(string) (MessagePartition, error)\n\n\t// Partitions returns a slice of `MessagePartition` available in the store\n\tPartitions() ([]MessagePartition, error)\n}\n\ntype MessagePartition interface {\n\n\t// Name returns the name of the partition\n\tName() string\n\n\t// MaxMessageID return the last message ID stored in this partition\n\tMaxMessageID() uint64\n\n\tCount() uint64\n\n\tStore(uint64, []byte) error\n\n\tFetch(req *FetchRequest)\n\n\tDoInTx(func(uint64) error) error\n}\n"
  },
  {
    "path": "server/utils_test.go",
    "content": "package server\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/smancke/guble/server/connector\"\n\t\"github.com/smancke/guble/server/fcm\"\n\n\t\"errors\"\n\n\t\"github.com/smancke/guble/client\"\n\t\"github.com/smancke/guble/server/service\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"gopkg.in/alecthomas/kingpin.v2\"\n)\n\ntype testClusterNodeConfig struct {\n\tHttpListen  string // \"host:port\" format or just \":port\"\n\tNodeID      int\n\tNodePort    int\n\tStoragePath string // if empty it will create a temporary directory\n\tMemoryStore string\n\tKVStore     string\n\tRemotes     string\n}\n\nfunc (tnc *testClusterNodeConfig) parseConfig() error {\n\tvar err error\n\n\tdir := tnc.StoragePath\n\tif dir == \"\" {\n\t\tdir, err = ioutil.TempDir(\"\", \"guble_test\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\ttnc.StoragePath = dir\n\n\targs := []string{\n\t\t\"--log\", \"debug\",\n\t\t\"--http\", tnc.HttpListen,\n\t\t\"--storage-path\", tnc.StoragePath,\n\t\t\"--health-endpoint\", \"\",\n\n\t\t\"--fcm\",\n\t\t\"--fcm-api-key\", \"WILL BE OVERWRITTEN\",\n\t\t\"--fcm-workers\", \"4\",\n\t}\n\n\tif tnc.MemoryStore != \"\" {\n\t\targs = append(args, \"--ms\", tnc.MemoryStore)\n\t}\n\n\tif tnc.KVStore != \"\" {\n\t\targs = append(args, \"--kvs\", tnc.KVStore)\n\t}\n\n\tif tnc.NodeID > 0 {\n\t\tif tnc.Remotes == \"\" {\n\t\t\treturn fmt.Errorf(\"Missing Remotes value when running in cluster mode.\")\n\t\t}\n\n\t\targs = append(\n\t\t\targs,\n\t\t\t\"--node-id\", strconv.Itoa(tnc.NodeID),\n\t\t\t\"--node-port\", strconv.Itoa(tnc.NodePort),\n\t\t\t\"--remotes\", tnc.Remotes,\n\t\t)\n\t}\n\n\t_, err = kingpin.CommandLine.Parse(args)\n\treturn err\n}\n\ntype testClusterNode struct {\n\ttestClusterNodeConfig\n\tt       *testing.T\n\tFCM     *TestFCM\n\tService *service.Service\n}\n\nfunc newTestClusterNode(t *testing.T, nodeConfig testClusterNodeConfig) *testClusterNode {\n\ta := assert.New(t)\n\n\terr := nodeConfig.parseConfig()\n\tif !a.NoError(err) {\n\t\treturn nil\n\t}\n\n\ts := StartService()\n\n\tvar (\n\t\tfcmConnector connector.ResponsiveConnector\n\t\tok           bool\n\t)\n\tfor _, iface := range s.ModulesSortedByStartOrder() {\n\t\tif fcmConnector, ok = iface.(connector.ResponsiveConnector); ok {\n\t\t\tbreak\n\t\t}\n\t}\n\tif !a.True(ok, \"There should be a module of type GCMConnector\") {\n\t\treturn nil\n\t}\n\n\treturn &testClusterNode{\n\t\ttestClusterNodeConfig: nodeConfig,\n\t\tt: t,\n\t\tFCM: &TestFCM{\n\t\t\tt:         t,\n\t\t\tConnector: fcmConnector,\n\t\t},\n\t\tService: s,\n\t}\n}\n\nfunc (tcn *testClusterNode) client(userID string, bufferSize int, autoReconnect bool) (client.Client, error) {\n\tserverAddr := tcn.Service.WebServer().GetAddr()\n\twsURL := \"ws://\" + serverAddr + \"/stream/user/\" + userID\n\thttpURL := \"http://\" + serverAddr\n\n\treturn client.Open(wsURL, httpURL, bufferSize, autoReconnect)\n}\n\nfunc (tcn *testClusterNode) Subscribe(topic, id string) {\n\ttcn.FCM.subscribe(tcn.Service.WebServer().GetAddr(), topic, id)\n}\n\nfunc (tcn *testClusterNode) Unsubscribe(topic, id string) {\n\ttcn.FCM.unsubscribe(tcn.Service.WebServer().GetAddr(), topic, id)\n}\n\nfunc (tcn *testClusterNode) cleanup(removeDir bool) {\n\ttcn.FCM.cleanup()\n\terr := tcn.Service.Stop()\n\tassert.NoError(tcn.t, err)\n\n\tif removeDir {\n\t\terr = os.RemoveAll(tcn.StoragePath)\n\t\tassert.NoError(tcn.t, err)\n\t}\n}\n\ntype TestFCM struct {\n\tsync.RWMutex\n\tt         *testing.T\n\tConnector connector.ResponsiveConnector\n\tReceived  int // received messages\n\treceiveC  chan bool\n\ttimeout   time.Duration\n}\n\nfunc (tfcm *TestFCM) setupRoundTripper(timeout time.Duration, bufferSize int, response string) {\n\ttfcm.receiveC = make(chan bool, bufferSize)\n\ttfcm.timeout = timeout\n\tsender, err := fcm.CreateFcmSender(response, tfcm.receiveC, timeout)\n\tassert.NoError(tfcm.t, err)\n\ttfcm.Connector.SetSender(sender)\n\t// start counting the received messages to FCM\n\ttfcm.receive()\n}\n\nfunc (tfcm *TestFCM) subscribe(addr, topic, id string) {\n\turlFormat := fmt.Sprintf(\"http://%s/fcm/user_%%s/gcm_%%s/%%s\", addr)\n\n\ta := assert.New(tfcm.t)\n\n\tresponse, err := http.Post(\n\t\tfmt.Sprintf(urlFormat, id, id, strings.TrimPrefix(topic, \"/\")), \"text/plain\", bytes.NewBufferString(\"\"),\n\t)\n\tif a.NoError(err) {\n\t\ta.Equal(response.StatusCode, 200)\n\t}\n\n\tbody, err := ioutil.ReadAll(response.Body)\n\ta.NoError(err)\n\ta.Equal(fmt.Sprintf(\"{\\\"subscribed\\\":\\\"%s\\\"}\", topic), string(body))\n}\n\nfunc (tfcm *TestFCM) unsubscribe(addr, topic, id string) {\n\turlFormat := fmt.Sprintf(\"http://%s/fcm/user_%%s/gcm_%%s/%%s\", addr)\n\n\ta := assert.New(tfcm.t)\n\n\treq, err := http.NewRequest(\n\t\thttp.MethodDelete,\n\t\tfmt.Sprintf(urlFormat, id, id, strings.TrimPrefix(topic, \"/\")),\n\t\tbytes.NewBufferString(\"\"))\n\ta.NoError(err)\n\n\thc := &http.Client{}\n\n\tresponse, err := hc.Do(req)\n\tif a.NoError(err) {\n\t\ta.Equal(response.StatusCode, 200)\n\t}\n\n\tbody, err := ioutil.ReadAll(response.Body)\n\ta.NoError(err)\n\ta.Equal(fmt.Sprintf(`{\"unsubscribed\":\"%s\"}`, topic), string(body))\n}\n\n// Wait waits count * tgcm.timeout, wait ensure count number of messages have been waited to pass\n// through GCM round tripper\nfunc (tfcm *TestFCM) wait(count int) {\n\ttime.Sleep(time.Duration(count) * tfcm.timeout)\n}\n\n// Receive starts a goroutine that will receive on the receiveC and increment the Received counter\n// Returns an error if channel is not create\nfunc (tfcm *TestFCM) receive() error {\n\tif tfcm.receiveC == nil {\n\t\treturn errors.New(\"Round tripper not created\")\n\t}\n\n\tgo func() {\n\t\tfor {\n\t\t\tif _, opened := <-tfcm.receiveC; opened {\n\t\t\t\ttfcm.Lock()\n\t\t\t\ttfcm.Received++\n\t\t\t\ttfcm.Unlock()\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn nil\n}\n\nfunc (tfcm *TestFCM) checkReceived(expected int) {\n\ttime.Sleep((50 * time.Millisecond) + tfcm.timeout)\n\ttfcm.RLock()\n\tdefer tfcm.RUnlock()\n\tassert.Equal(tfcm.t, expected, tfcm.Received)\n}\n\nfunc (tfcm *TestFCM) reset() {\n\ttfcm.Lock()\n\tdefer tfcm.Unlock()\n\ttfcm.Received = 0\n}\n\nfunc (tfcm *TestFCM) cleanup() {\n\tif tfcm.receiveC != nil {\n\t\tclose(tfcm.receiveC)\n\t}\n}\n"
  },
  {
    "path": "server/webserver/logger.go",
    "content": "package webserver\n\nimport (\n\tlog \"github.com/Sirupsen/logrus\"\n)\n\nvar logger = log.WithFields(log.Fields{\n\t\"module\": \"webserver\",\n})\n"
  },
  {
    "path": "server/webserver/web_server.go",
    "content": "package webserver\n\nimport (\n\t\"net\"\n\t\"net/http\"\n\t\"strings\"\n\t\"time\"\n)\n\n// WebServer is a struct representing a HTTP Server (using a net.Listener and a ServeMux multiplexer).\ntype WebServer struct {\n\tserver *http.Server\n\tln     net.Listener\n\tmux    *http.ServeMux\n\taddr   string\n}\n\n// New returns a new WebServer.\nfunc New(addr string) *WebServer {\n\treturn &WebServer{\n\t\tmux:  http.NewServeMux(),\n\t\taddr: addr,\n\t}\n}\n\n// Start the WebServer (implementing service.startable interface).\nfunc (ws *WebServer) Start() (err error) {\n\tlogger.WithField(\"address\", ws.addr).Info(\"Http server is starting up on address\")\n\n\tws.server = &http.Server{Addr: ws.addr, Handler: ws.mux}\n\tws.ln, err = net.Listen(\"tcp\", ws.addr)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tgo func() {\n\t\terr = ws.server.Serve(tcpKeepAliveListener{TCPListener: ws.ln.(*net.TCPListener)})\n\t\tif err != nil && !strings.HasSuffix(err.Error(), \"use of closed network connection\") {\n\t\t\tlogger.WithError(err).Error(\"ListenAndServe\")\n\t\t}\n\t\tlogger.WithField(\"address\", ws.addr).Info(\"Http server stopped\")\n\t}()\n\treturn\n}\n\n// Stop the WebServer (implementing service.stopable interface).\nfunc (ws *WebServer) Stop() (err error) {\n\tif ws.ln != nil {\n\t\terr = ws.ln.Close()\n\t}\n\n\t// reset the mux\n\tws.mux = http.NewServeMux()\n\treturn\n}\n\n// Handle the given prefix using the given handler.\n// It is a part of the service.endpoint interface.\nfunc (ws *WebServer) Handle(prefix string, handler http.Handler) {\n\tws.mux.Handle(prefix, handler)\n}\n\n// GetAddr returns the address on which the WebServer is listening.\n// It is a part of the service.endpoint interface.\nfunc (ws *WebServer) GetAddr() string {\n\tif ws.ln == nil {\n\t\treturn \"::unknown::\"\n\t}\n\treturn ws.ln.Addr().String()\n}\n\n// copied from golang: net/http/server.go\n// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted\n// connections. It's used by ListenAndServe and ListenAndServeTLS so\n// dead TCP connections (e.g. closing laptop mid-download) eventually\n// go away.\ntype tcpKeepAliveListener struct {\n\t*net.TCPListener\n}\n\nfunc (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {\n\ttc, err := ln.AcceptTCP()\n\tif err != nil {\n\t\treturn\n\t}\n\ttc.SetKeepAlive(true)\n\ttc.SetKeepAlivePeriod(10 * time.Second)\n\treturn tc, nil\n}\n"
  },
  {
    "path": "server/webserver/web_server_test.go",
    "content": "package webserver\n\nimport (\n\t\"bytes\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestStartAndStopWebServer(t *testing.T) {\n\n\t// given: a configured echo webserver\n\tserver := New(\"localhost:3333\")\n\tserver.mux.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tbytes, _ := ioutil.ReadAll(r.Body)\n\t\tw.Write(bytes)\n\t})\n\n\t// when: I start the server\n\tserver.Start()\n\ttime.Sleep(time.Millisecond * 10)\n\taddr := server.GetAddr()\n\n\t// and: send a testmessage\n\tresp, err := http.Post(\"http://\"+addr, \"text/plain\", bytes.NewBufferString(\"hello\"))\n\n\t// then: the message is returned\n\tassert.NoError(t, err)\n\tresponseBody, _ := ioutil.ReadAll(resp.Body)\n\tassert.Equal(t, \"hello\", string(responseBody))\n\n\t// and when: we stop the service\n\tserver.Stop()\n\ttime.Sleep(time.Millisecond * 100)\n\n\t// then: the next call returns an error\n\t//       because the server is closed\n\tc2 := &http.Client{}\n\tc2.Transport = &http.Transport{DisableKeepAlives: true}\n\t_, err = c2.Post(\"http://\"+addr, \"text/plain\", bytes.NewBufferString(\"hello\"))\n\tassert.Error(t, err)\n}\n"
  },
  {
    "path": "server/websocket/logger.go",
    "content": "package websocket\n\nimport (\n\tlog \"github.com/Sirupsen/logrus\"\n)\n\nvar logger = log.WithFields(log.Fields{\n\t\"module\": \"websocket\",\n})\n"
  },
  {
    "path": "server/websocket/mocks_auth_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/smancke/guble/server/auth (interfaces: AccessManager)\n\npackage websocket\n\nimport (\n\tgomock \"github.com/golang/mock/gomock\"\n\tprotocol \"github.com/smancke/guble/protocol\"\n\tauth \"github.com/smancke/guble/server/auth\"\n)\n\n// Mock of AccessManager interface\ntype MockAccessManager struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockAccessManagerRecorder\n}\n\n// Recorder for MockAccessManager (not exported)\ntype _MockAccessManagerRecorder struct {\n\tmock *MockAccessManager\n}\n\nfunc NewMockAccessManager(ctrl *gomock.Controller) *MockAccessManager {\n\tmock := &MockAccessManager{ctrl: ctrl}\n\tmock.recorder = &_MockAccessManagerRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockAccessManager) EXPECT() *_MockAccessManagerRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockAccessManager) IsAllowed(_param0 auth.AccessType, _param1 string, _param2 protocol.Path) bool {\n\tret := _m.ctrl.Call(_m, \"IsAllowed\", _param0, _param1, _param2)\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}\n\nfunc (_mr *_MockAccessManagerRecorder) IsAllowed(arg0, arg1, arg2 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"IsAllowed\", arg0, arg1, arg2)\n}\n"
  },
  {
    "path": "server/websocket/mocks_router_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/smancke/guble/server/router (interfaces: Router)\n\npackage websocket\n\nimport (\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/auth\"\n\t\"github.com/smancke/guble/server/cluster\"\n\t\"github.com/smancke/guble/server/kvstore\"\n\t\"github.com/smancke/guble/server/router\"\n\t\"github.com/smancke/guble/server/store\"\n)\n\n// Mock of Router interface\ntype MockRouter struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockRouterRecorder\n}\n\n// Recorder for MockRouter (not exported)\ntype _MockRouterRecorder struct {\n\tmock *MockRouter\n}\n\nfunc NewMockRouter(ctrl *gomock.Controller) *MockRouter {\n\tmock := &MockRouter{ctrl: ctrl}\n\tmock.recorder = &_MockRouterRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockRouter) EXPECT() *_MockRouterRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockRouter) AccessManager() (auth.AccessManager, error) {\n\tret := _m.ctrl.Call(_m, \"AccessManager\")\n\tret0, _ := ret[0].(auth.AccessManager)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) AccessManager() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"AccessManager\")\n}\n\nfunc (_m *MockRouter) Cluster() *cluster.Cluster {\n\tret := _m.ctrl.Call(_m, \"Cluster\")\n\tret0, _ := ret[0].(*cluster.Cluster)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) Cluster() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Cluster\")\n}\n\nfunc (_m *MockRouter) Done() <-chan bool {\n\tret := _m.ctrl.Call(_m, \"Done\")\n\tret0, _ := ret[0].(<-chan bool)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) Done() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Done\")\n}\n\nfunc (_m *MockRouter) Fetch(_param0 *store.FetchRequest) error {\n\tret := _m.ctrl.Call(_m, \"Fetch\", _param0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) Fetch(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Fetch\", arg0)\n}\n\nfunc (_m *MockRouter) GetSubscribers(_param0 string) ([]byte, error) {\n\tret := _m.ctrl.Call(_m, \"GetSubscribers\", _param0)\n\tret0, _ := ret[0].([]byte)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) GetSubscribers(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"GetSubscribers\", arg0)\n}\n\nfunc (_m *MockRouter) HandleMessage(_param0 *protocol.Message) error {\n\tret := _m.ctrl.Call(_m, \"HandleMessage\", _param0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockRouterRecorder) HandleMessage(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"HandleMessage\", arg0)\n}\n\nfunc (_m *MockRouter) KVStore() (kvstore.KVStore, error) {\n\tret := _m.ctrl.Call(_m, \"KVStore\")\n\tret0, _ := ret[0].(kvstore.KVStore)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) KVStore() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"KVStore\")\n}\n\nfunc (_m *MockRouter) MessageStore() (store.MessageStore, error) {\n\tret := _m.ctrl.Call(_m, \"MessageStore\")\n\tret0, _ := ret[0].(store.MessageStore)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) MessageStore() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"MessageStore\")\n}\n\nfunc (_m *MockRouter) Subscribe(_param0 *router.Route) (*router.Route, error) {\n\tret := _m.ctrl.Call(_m, \"Subscribe\", _param0)\n\tret0, _ := ret[0].(*router.Route)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockRouterRecorder) Subscribe(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Subscribe\", arg0)\n}\n\nfunc (_m *MockRouter) Unsubscribe(_param0 *router.Route) {\n\t_m.ctrl.Call(_m, \"Unsubscribe\", _param0)\n}\n\nfunc (_mr *_MockRouterRecorder) Unsubscribe(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Unsubscribe\", arg0)\n}\n"
  },
  {
    "path": "server/websocket/mocks_store_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/smancke/guble/server/store (interfaces: MessageStore)\n\npackage websocket\n\nimport (\n\tgomock \"github.com/golang/mock/gomock\"\n\tprotocol \"github.com/smancke/guble/protocol\"\n\tstore \"github.com/smancke/guble/server/store\"\n)\n\n// Mock of MessageStore interface\ntype MockMessageStore struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockMessageStoreRecorder\n}\n\n// Recorder for MockMessageStore (not exported)\ntype _MockMessageStoreRecorder struct {\n\tmock *MockMessageStore\n}\n\nfunc NewMockMessageStore(ctrl *gomock.Controller) *MockMessageStore {\n\tmock := &MockMessageStore{ctrl: ctrl}\n\tmock.recorder = &_MockMessageStoreRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockMessageStore) EXPECT() *_MockMessageStoreRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockMessageStore) DoInTx(_param0 string, _param1 func(uint64) error) error {\n\tret := _m.ctrl.Call(_m, \"DoInTx\", _param0, _param1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockMessageStoreRecorder) DoInTx(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"DoInTx\", arg0, arg1)\n}\n\nfunc (_m *MockMessageStore) Fetch(_param0 *store.FetchRequest) {\n\t_m.ctrl.Call(_m, \"Fetch\", _param0)\n}\n\nfunc (_mr *_MockMessageStoreRecorder) Fetch(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Fetch\", arg0)\n}\n\nfunc (_m *MockMessageStore) GenerateNextMsgID(_param0 string, _param1 byte) (uint64, int64, error) {\n\tret := _m.ctrl.Call(_m, \"GenerateNextMsgID\", _param0, _param1)\n\tret0, _ := ret[0].(uint64)\n\tret1, _ := ret[1].(int64)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}\n\nfunc (_mr *_MockMessageStoreRecorder) GenerateNextMsgID(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"GenerateNextMsgID\", arg0, arg1)\n}\n\nfunc (_m *MockMessageStore) MaxMessageID(_param0 string) (uint64, error) {\n\tret := _m.ctrl.Call(_m, \"MaxMessageID\", _param0)\n\tret0, _ := ret[0].(uint64)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockMessageStoreRecorder) MaxMessageID(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"MaxMessageID\", arg0)\n}\n\nfunc (_m *MockMessageStore) Partition(_param0 string) (store.MessagePartition, error) {\n\tret := _m.ctrl.Call(_m, \"Partition\", _param0)\n\tret0, _ := ret[0].(store.MessagePartition)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockMessageStoreRecorder) Partition(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Partition\", arg0)\n}\n\nfunc (_m *MockMessageStore) Partitions() ([]store.MessagePartition, error) {\n\tret := _m.ctrl.Call(_m, \"Partitions\")\n\tret0, _ := ret[0].([]store.MessagePartition)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockMessageStoreRecorder) Partitions() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Partitions\")\n}\n\nfunc (_m *MockMessageStore) Store(_param0 string, _param1 uint64, _param2 []byte) error {\n\tret := _m.ctrl.Call(_m, \"Store\", _param0, _param1, _param2)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockMessageStoreRecorder) Store(arg0, arg1, arg2 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Store\", arg0, arg1, arg2)\n}\n\nfunc (_m *MockMessageStore) StoreMessage(_param0 *protocol.Message, _param1 byte) (int, error) {\n\tret := _m.ctrl.Call(_m, \"StoreMessage\", _param0, _param1)\n\tret0, _ := ret[0].(int)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\nfunc (_mr *_MockMessageStoreRecorder) StoreMessage(arg0, arg1 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"StoreMessage\", arg0, arg1)\n}\n"
  },
  {
    "path": "server/websocket/mocks_websocket_gen_test.go",
    "content": "// Automatically generated by MockGen. DO NOT EDIT!\n// Source: github.com/smancke/guble/server/websocket (interfaces: WSConnection)\n\npackage websocket\n\nimport (\n\tgomock \"github.com/golang/mock/gomock\"\n)\n\n// Mock of WSConnection interface\ntype MockWSConnection struct {\n\tctrl     *gomock.Controller\n\trecorder *_MockWSConnectionRecorder\n}\n\n// Recorder for MockWSConnection (not exported)\ntype _MockWSConnectionRecorder struct {\n\tmock *MockWSConnection\n}\n\nfunc NewMockWSConnection(ctrl *gomock.Controller) *MockWSConnection {\n\tmock := &MockWSConnection{ctrl: ctrl}\n\tmock.recorder = &_MockWSConnectionRecorder{mock}\n\treturn mock\n}\n\nfunc (_m *MockWSConnection) EXPECT() *_MockWSConnectionRecorder {\n\treturn _m.recorder\n}\n\nfunc (_m *MockWSConnection) Close() {\n\t_m.ctrl.Call(_m, \"Close\")\n}\n\nfunc (_mr *_MockWSConnectionRecorder) Close() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Close\")\n}\n\nfunc (_m *MockWSConnection) Receive(_param0 *[]byte) error {\n\tret := _m.ctrl.Call(_m, \"Receive\", _param0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockWSConnectionRecorder) Receive(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Receive\", arg0)\n}\n\nfunc (_m *MockWSConnection) Send(_param0 []byte) error {\n\tret := _m.ctrl.Call(_m, \"Send\", _param0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\nfunc (_mr *_MockWSConnectionRecorder) Send(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCall(_mr.mock, \"Send\", arg0)\n}\n"
  },
  {
    "path": "server/websocket/receiver.go",
    "content": "package websocket\n\nimport (\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/router\"\n\t\"github.com/smancke/guble/server/store\"\n\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"strconv\"\n\t\"strings\"\n\n\tlog \"github.com/Sirupsen/logrus\"\n)\n\nvar errUnreadMsgsAvailable = errors.New(\"unread messages available\")\n\n// Receiver is a helper class, for managing a combined pull push on a topic.\n// It is used for implementation of the + (receive) command in the guble protocol.\ntype Receiver struct {\n\tcancelC             chan bool\n\tsendC               chan []byte\n\tapplicationID       string\n\trouter              router.Router\n\tmessageStore        store.MessageStore\n\tpath                protocol.Path\n\tdoFetch             bool\n\tdoSubscription      bool\n\tstartID             int64\n\tmaxCount            int\n\tlastSentID          uint64\n\tshouldStop          bool\n\troute               *router.Route\n\tenableNotifications bool\n\tuserID              string\n}\n\n// NewReceiverFromCmd parses the info in the command\nfunc NewReceiverFromCmd(\n\tapplicationID string,\n\tcmd *protocol.Cmd,\n\tsendChannel chan []byte,\n\trouter router.Router,\n\tuserID string) (rec *Receiver, err error) {\n\n\tmessageStore, err := router.MessageStore()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trec = &Receiver{\n\t\tapplicationID:       applicationID,\n\t\tsendC:               sendChannel,\n\t\trouter:              router,\n\t\tmessageStore:        messageStore,\n\t\tcancelC:             make(chan bool, 1),\n\t\tenableNotifications: true,\n\t\tuserID:              userID,\n\t}\n\tif len(cmd.Arg) == 0 || cmd.Arg[0] != '/' {\n\t\treturn nil, fmt.Errorf(\"command requires at least a path argument, but non given\")\n\t}\n\n\targs := strings.SplitN(cmd.Arg, \" \", 3)\n\trec.path = protocol.Path(args[0])\n\n\tif len(args) > 1 {\n\t\trec.doFetch = true\n\t\trec.startID, err = strconv.ParseInt(args[1], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"startid has to be empty or int, but was %q: %v\", args[1], err)\n\t\t}\n\t}\n\n\trec.doSubscription = true\n\tif len(args) > 2 {\n\t\trec.doSubscription = false\n\t\trec.maxCount, err = strconv.Atoi(args[2])\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"maxCount has to be empty or int, but was %q: %v\", args[1], err)\n\t\t}\n\t}\n\n\treturn rec, nil\n}\n\n// Start starts the receiver loop\nfunc (rec *Receiver) Start() error {\n\trec.shouldStop = false\n\tif rec.doFetch && !rec.doSubscription {\n\t\tgo rec.fetchOnlyLoop()\n\t} else {\n\t\tgo rec.subscriptionLoop()\n\t}\n\treturn nil\n}\n\nfunc (rec *Receiver) subscriptionLoop() {\n\tfor !rec.shouldStop {\n\t\tif rec.doFetch {\n\n\t\t\tif err := rec.fetch(); err != nil {\n\t\t\t\tlogger.WithError(err).WithField(\"rec\", rec).Error(\"Error while fetching subscription\")\n\t\t\t\trec.sendError(protocol.ERROR_INTERNAL_SERVER, err.Error())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif err := rec.messageStore.DoInTx(rec.path.Partition(), rec.subscribeIfNoUnreadMessagesAvailable); err != nil {\n\t\t\t\tif err == errUnreadMsgsAvailable {\n\t\t\t\t\tlogger.WithFields(log.Fields{\n\t\t\t\t\t\t\"lastSentId\": rec.lastSentID,\n\t\t\t\t\t\t\"receiver\":   rec,\n\t\t\t\t\t}).Error(\"errUnreadMsgsAvailable\")\n\t\t\t\t\trec.startID = int64(rec.lastSentID) + 1\n\t\t\t\t\tcontinue // fetch again\n\t\t\t\t} else {\n\t\t\t\t\tlogger.WithError(err).WithField(\"recStartId\", rec.startID).\n\t\t\t\t\t\tError(\"Error while subscribeIfNoUnreadMessagesAvailable\")\n\t\t\t\t\trec.sendError(protocol.ERROR_INTERNAL_SERVER, err.Error())\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\trec.subscribe()\n\t\t}\n\t\trec.receiveFromSubscription()\n\n\t\tif !rec.shouldStop {\n\t\t\t//fmt.Printf(\" router closed .. on msg: %v\\n\", rec.lastSendId)\n\t\t\t// the router kicked us out, because we are too slow for realtime listening,\n\t\t\t// so we setup parameters for fetching and closing the gap. Than we can subscribe again.\n\t\t\trec.startID = int64(rec.lastSentID) + 1\n\t\t\trec.doFetch = true\n\t\t}\n\t}\n}\n\nfunc (rec *Receiver) subscribeIfNoUnreadMessagesAvailable(maxMessageID uint64) error {\n\tif maxMessageID > rec.lastSentID {\n\t\treturn errUnreadMsgsAvailable\n\t}\n\trec.subscribe()\n\treturn nil\n}\n\nfunc (rec *Receiver) subscribe() {\n\trec.route = router.NewRoute(\n\t\trouter.RouteConfig{\n\t\t\tRouteParams: router.RouteParams{\"application_id\": rec.applicationID, \"user_id\": rec.userID},\n\t\t\tPath:        rec.path,\n\t\t\tChannelSize: 10,\n\t\t},\n\t)\n\n\t_, err := rec.router.Subscribe(rec.route)\n\tif err != nil {\n\t\trec.sendError(protocol.ERROR_SUBSCRIBED_TO, string(rec.path), err.Error())\n\t} else {\n\t\trec.sendOK(protocol.SUCCESS_SUBSCRIBED_TO, string(rec.path))\n\t}\n}\n\nfunc (rec *Receiver) receiveFromSubscription() {\n\tfor {\n\t\tselect {\n\t\tcase m, ok := <-rec.route.MessagesChannel():\n\t\t\tif !ok {\n\n\t\t\t\tlogger.WithFields(log.Fields{\n\t\t\t\t\t\"applicationId\": rec.applicationID,\n\t\t\t\t}).Debug(\"Router closed the channel returning from subscription for\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tlogger.WithFields(log.Fields{\n\t\t\t\t\"applicationId\":   rec.applicationID,\n\t\t\t\t\"messageMetadata\": m.Metadata(),\n\t\t\t}).Debug(\"Delivering message\")\n\n\t\t\tif m.ID > rec.lastSentID {\n\t\t\t\trec.lastSentID = m.ID\n\t\t\t\trec.sendC <- m.Bytes()\n\t\t\t} else {\n\t\t\t\tlogger.WithFields(log.Fields{\n\t\t\t\t\t\"msgId\": m.ID,\n\t\t\t\t}).Debug(\"Message already sent to client. Dropping message.\")\n\t\t\t}\n\t\tcase <-rec.cancelC:\n\t\t\trec.shouldStop = true\n\t\t\trec.router.Unsubscribe(rec.route)\n\t\t\trec.route = nil\n\t\t\trec.sendOK(protocol.SUCCESS_CANCELED, string(rec.path))\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (rec *Receiver) fetchOnlyLoop() {\n\terr := rec.fetch()\n\tif err != nil {\n\t\tlogger.WithError(err).WithField(\"rec\", rec).Error(\"Error while fetching\")\n\t\trec.sendError(protocol.ERROR_INTERNAL_SERVER, err.Error())\n\t}\n}\n\nfunc (rec *Receiver) fetch() error {\n\tfetch := &store.FetchRequest{\n\t\tPartition: rec.path.Partition(),\n\t\tMessageC:  make(chan *store.FetchedMessage, 10), //TODO MAKE more tests when the receiver will be refactored after the route params is integrated.Initial capacity was 3\n\t\tErrorC:    make(chan error),\n\t\tStartC:    make(chan int),\n\t\tCount:     rec.maxCount,\n\t}\n\n\tif rec.startID >= 0 {\n\t\tfetch.Direction = 1\n\t\tfetch.StartID = uint64(rec.startID)\n\t\tif rec.maxCount == 0 {\n\t\t\tfetch.Count = math.MaxInt32\n\t\t}\n\t} else {\n\t\tfetch.Direction = -1\n\t\tmaxID, err := rec.messageStore.MaxMessageID(rec.path.Partition())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfetch.StartID = maxID\n\t\tif rec.maxCount == 0 {\n\t\t\tfetch.Count = -1 * int(rec.startID)\n\t\t}\n\t}\n\n\trec.messageStore.Fetch(fetch)\n\n\tfor {\n\t\tselect {\n\t\tcase numberOfResults := <-fetch.StartC:\n\t\t\trec.sendOK(protocol.SUCCESS_FETCH_START, fmt.Sprintf(\"%v %v\", rec.path, numberOfResults))\n\t\tcase msgAndID, open := <-fetch.MessageC:\n\t\t\tif !open {\n\t\t\t\trec.sendOK(protocol.SUCCESS_FETCH_END, string(rec.path))\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tlogger.WithFields(log.Fields{\n\t\t\t\t\"msgId\":      msgAndID.ID,\n\t\t\t\t\"msg\":        string(msgAndID.Message),\n\t\t\t\t\"lastSendId\": rec.lastSentID,\n\t\t\t}).Info(\"Reply sent\")\n\n\t\t\trec.lastSentID = msgAndID.ID\n\t\t\trec.sendC <- msgAndID.Message\n\t\tcase err := <-fetch.ErrorC:\n\t\t\treturn err\n\t\tcase <-rec.cancelC:\n\t\t\trec.shouldStop = true\n\t\t\trec.sendOK(protocol.SUCCESS_CANCELED, string(rec.path))\n\t\t\t// TODO implement cancellation in message store\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\n// Stop stops/cancels the receiver\nfunc (rec *Receiver) Stop() error {\n\trec.cancelC <- true\n\treturn nil\n}\n\nfunc (rec *Receiver) sendError(name string, argPattern string, params ...interface{}) {\n\tnotificationMessage := &protocol.NotificationMessage{\n\t\tName:    name,\n\t\tArg:     fmt.Sprintf(argPattern, params...),\n\t\tIsError: true,\n\t}\n\trec.sendC <- notificationMessage.Bytes()\n}\n\nfunc (rec *Receiver) sendOK(name string, argPattern string, params ...interface{}) {\n\tif rec.enableNotifications {\n\t\tnotificationMessage := &protocol.NotificationMessage{\n\t\t\tName:    name,\n\t\t\tArg:     fmt.Sprintf(argPattern, params...),\n\t\t\tIsError: false,\n\t\t}\n\t\trec.sendC <- notificationMessage.Bytes()\n\t}\n}\n"
  },
  {
    "path": "server/websocket/receiver_test.go",
    "content": "package websocket\n\nimport (\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/router\"\n\t\"github.com/smancke/guble/server/store\"\n\t\"github.com/smancke/guble/testutil\"\n\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"errors\"\n\t\"math\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc Test_Receiver_error_handling_on_create(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\ta := assert.New(t)\n\n\tbadArgs := []string{\"\", \"20\", \"foo 20 20\", \"/foo 20 20 20\", \"/foo a\", \"/foo 20 b\"}\n\tfor _, arg := range badArgs {\n\t\trec, _, _, _, err := aMockedReceiver(arg)\n\t\ta.Nil(rec, \"Testing with: \"+arg)\n\t\ta.Error(err, \"Testing with: \"+arg)\n\t}\n}\n\nfunc Test_Receiver_Fetch_Subscribe_Fetch_Subscribe(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\ta := assert.New(t)\n\n\trec, msgChannel, routerMock, messageStore, err := aMockedReceiver(\"/foo 0\")\n\ta.NoError(err)\n\n\t// fetch first, starting at 0\n\tfetchFirst1 := messageStore.EXPECT().Fetch(gomock.Any()).Do(func(r *store.FetchRequest) {\n\t\tgo func() {\n\t\t\ta.Equal(\"foo\", r.Partition)\n\t\t\ta.Equal(store.DirectionForward, r.Direction)\n\t\t\ta.Equal(uint64(0), r.StartID)\n\t\t\ta.Equal(int(math.MaxInt32), r.Count)\n\n\t\t\tr.StartC <- 2\n\n\t\t\tr.MessageC <- &store.FetchedMessage{ID: uint64(1), Message: []byte(\"fetch_first1-a\")}\n\t\t\tr.MessageC <- &store.FetchedMessage{ID: uint64(2), Message: []byte(\"fetch_first1-b\")}\n\t\t\tclose(r.MessageC)\n\t\t}()\n\t})\n\n\t// there is a gap between fetched and max id\n\tmessageID1 := messageStore.EXPECT().DoInTx(gomock.Any(), gomock.Any()).\n\t\tDo(func(partition string, callback func(maxMessageId uint64) error) {\n\t\t\tcallback(uint64(3))\n\t\t}).Return(errUnreadMsgsAvailable)\n\tmessageID1.After(fetchFirst1)\n\n\t// fetch again, starting at 3, because, there is still a gap\n\tfetchFirst2 := messageStore.EXPECT().Fetch(gomock.Any()).Do(func(r *store.FetchRequest) {\n\t\tgo func() {\n\t\t\ta.Equal(\"foo\", r.Partition)\n\t\t\ta.Equal(store.DirectionForward, r.Direction)\n\t\t\ta.Equal(uint64(3), r.StartID)\n\t\t\ta.Equal(int(math.MaxInt32), r.Count)\n\n\t\t\tr.StartC <- 1\n\t\t\tr.MessageC <- &store.FetchedMessage{ID: uint64(3), Message: []byte(\"fetch_first2-a\")}\n\t\t\tclose(r.MessageC)\n\t\t}()\n\t})\n\tfetchFirst2.After(messageID1)\n\n\t// the gap is closed\n\tmessageID2 := messageStore.EXPECT().DoInTx(gomock.Any(), gomock.Any()).\n\t\tDo(func(partition string, callback func(maxMessageId uint64) error) {\n\t\t\tcallback(uint64(3))\n\t\t})\n\tmessageID2.After(fetchFirst2)\n\n\t// subscribe\n\tsubscribe := routerMock.EXPECT().Subscribe(gomock.Any()).Do(func(r *router.Route) {\n\t\ta.Equal(r.Path, protocol.Path(\"/foo\"))\n\t\tr.Deliver(&protocol.Message{ID: uint64(4), Body: []byte(\"router-a\"), Time: 1405544146}, true)\n\t\tr.Deliver(&protocol.Message{ID: uint64(5), Body: []byte(\"router-b\"), Time: 1405544146}, true)\n\t\tr.Close() // emulate router close\n\t})\n\tsubscribe.After(messageID2)\n\n\t// router closed, so we fetch again, starting at 6 (after meesages from subscribe)\n\tfetchAfter := messageStore.EXPECT().Fetch(gomock.Any()).Do(func(r *store.FetchRequest) {\n\t\tgo func() {\n\t\t\ta.Equal(uint64(6), r.StartID)\n\t\t\ta.Equal(int(math.MaxInt32), r.Count)\n\n\t\t\tr.StartC <- 1\n\t\t\tr.MessageC <- &store.FetchedMessage{ID: uint64(6), Message: []byte(\"fetch_after-a\")}\n\t\t\tclose(r.MessageC)\n\t\t}()\n\t})\n\tfetchAfter.After(subscribe)\n\n\t// no gap\n\tmessageID3 := messageStore.EXPECT().DoInTx(gomock.Any(), gomock.Any()).\n\t\tDo(func(partition string, callback func(maxMessageId uint64) error) {\n\t\t\tcallback(uint64(6))\n\t\t})\n\n\tmessageID3.After(fetchAfter)\n\n\t// subscribe and don't send messages,\n\t// so the client has to wait until we stop\n\tsubscribe2 := routerMock.EXPECT().Subscribe(gomock.Any())\n\tsubscribe2.After(messageID3)\n\n\tsubscriptionLoopDone := make(chan bool)\n\tgo func() {\n\t\trec.subscriptionLoop()\n\t\tsubscriptionLoopDone <- true\n\t}()\n\n\texpectMessages(a, msgChannel,\n\t\t\"#\"+protocol.SUCCESS_FETCH_START+\" /foo 2\",\n\t\t\"fetch_first1-a\",\n\t\t\"fetch_first1-b\",\n\t\t\"#\"+protocol.SUCCESS_FETCH_END+\" /foo\",\n\t\t\"#\"+protocol.SUCCESS_FETCH_START+\" /foo 1\",\n\t\t\"fetch_first2-a\",\n\t\t\"#\"+protocol.SUCCESS_FETCH_END+\" /foo\",\n\t\t\"#\"+protocol.SUCCESS_SUBSCRIBED_TO+\" /foo\",\n\t\t\",4,,,,1405544146,0\\n\\nrouter-a\",\n\t\t\",5,,,,1405544146,0\\n\\nrouter-b\",\n\t\t\"#\"+protocol.SUCCESS_FETCH_START+\" /foo 1\",\n\t\t\"fetch_after-a\",\n\t\t\"#\"+protocol.SUCCESS_FETCH_END+\" /foo\",\n\t\t\"#\"+protocol.SUCCESS_SUBSCRIBED_TO+\" /foo\",\n\t)\n\n\ttime.Sleep(time.Millisecond)\n\trouterMock.EXPECT().Unsubscribe(gomock.Any())\n\trec.Stop()\n\n\texpectMessages(a, msgChannel,\n\t\t\"#\"+protocol.SUCCESS_CANCELED+\" /foo\",\n\t)\n\n\ttestutil.ExpectDone(a, subscriptionLoopDone)\n}\n\nfunc Test_Receiver_Fetch_Returns_Correct_Messages(t *testing.T) {\n\tctrl, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\ta := assert.New(t)\n\n\trec, msgChannel, _, messageStore, err := aMockedReceiver(\"/foo 0 2\")\n\ta.NoError(err)\n\n\tmessages := []string{\"The answer \", \"is 42\"}\n\tdone := make(chan bool)\n\tmessageStore.EXPECT().Fetch(gomock.Any()).Do(func(r *store.FetchRequest) {\n\t\tgo func() {\n\t\t\tr.StartC <- len(messages)\n\t\t\tfor i, m := range messages {\n\t\t\t\tr.MessageC <- &store.FetchedMessage{ID: uint64(i + 1), Message: []byte(m)}\n\t\t\t}\n\t\t\tclose(r.MessageC)\n\t\t\tdone <- true\n\t\t}()\n\t})\n\n\tfetchHasTerminated := make(chan bool)\n\tgo func() {\n\t\trec.fetchOnlyLoop()\n\t\tfetchHasTerminated <- true\n\t}()\n\ttestutil.ExpectDone(a, done)\n\n\texpectMessages(a, msgChannel, \"#\"+protocol.SUCCESS_FETCH_START+\" /foo 2\")\n\texpectMessages(a, msgChannel, messages...)\n\texpectMessages(a, msgChannel, \"#\"+protocol.SUCCESS_FETCH_END+\" /foo\")\n\n\ttestutil.ExpectDone(a, fetchHasTerminated)\n\tctrl.Finish()\n}\n\nfunc Test_Receiver_Fetch_Produces_Correct_Fetch_Requests(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\ta := assert.New(t)\n\n\ttestcases := []struct {\n\t\tdesc   string\n\t\targ    string\n\t\tmaxID  int\n\t\texpect store.FetchRequest\n\t}{\n\t\t{desc: \"simple forward fetch\",\n\t\t\targ:    \"/foo 0 20\",\n\t\t\tmaxID:  -1,\n\t\t\texpect: store.FetchRequest{Partition: \"foo\", Direction: 1, StartID: uint64(0), Count: 20},\n\t\t},\n\t\t{desc: \"forward fetch without bounds\",\n\t\t\targ:    \"/foo 0\",\n\t\t\tmaxID:  -1,\n\t\t\texpect: store.FetchRequest{Partition: \"foo\", Direction: 1, StartID: uint64(0), Count: math.MaxInt32},\n\t\t},\n\t\t{desc: \"backward fetch to top\",\n\t\t\targ:    \"/foo -20\",\n\t\t\tmaxID:  42,\n\t\t\texpect: store.FetchRequest{Partition: \"foo\", Direction: -1, StartID: uint64(42), Count: 20},\n\t\t},\n\t\t{desc: \"backward fetch with count\",\n\t\t\targ:    \"/foo -1 10\",\n\t\t\tmaxID:  42,\n\t\t\texpect: store.FetchRequest{Partition: \"foo\", Direction: -1, StartID: uint64(42), Count: 10},\n\t\t},\n\t}\n\n\tfor _, test := range testcases {\n\t\trec, _, _, messageStore, err := aMockedReceiver(test.arg)\n\n\t\ta.NotNil(rec)\n\t\ta.NoError(err, test.desc)\n\n\t\tif test.maxID != -1 {\n\t\t\tmessageStore.EXPECT().MaxMessageID(test.expect.Partition).\n\t\t\t\tReturn(uint64(test.maxID), nil)\n\t\t}\n\n\t\tdone := make(chan bool)\n\t\tmessageStore.EXPECT().Fetch(gomock.Any()).Do(func(r *store.FetchRequest) {\n\t\t\ta.Equal(test.expect.Partition, r.Partition, test.desc)\n\t\t\ta.Equal(test.expect.Direction, r.Direction, test.desc)\n\t\t\ta.Equal(test.expect.StartID, r.StartID, test.desc)\n\t\t\ta.Equal(test.expect.Count, r.Count, test.desc)\n\t\t\tdone <- true\n\t\t})\n\n\t\tgo rec.fetchOnlyLoop()\n\t\ttestutil.ExpectDone(a, done)\n\t\trec.Stop()\n\t}\n}\n\nfunc Test_Receiver_Fetch_Sends_error_on_failure(t *testing.T) {\n\ta := assert.New(t)\n\n\tfor _, arg := range []string{\n\t\t\"/foo 0 2\", // fetch only\n\t\t\"/foo 0\",   // fetch and subscribe\n\t} {\n\t\tctrl := gomock.NewController(t)\n\n\t\trec, msgChannel, _, messageStore, err := aMockedReceiver(arg)\n\t\ta.NoError(err)\n\n\t\tmessageStore.EXPECT().Fetch(gomock.Any()).Do(func(r *store.FetchRequest) {\n\t\t\tgo func() {\n\t\t\t\tr.ErrorC <- errors.New(\"expected test error\")\n\t\t\t}()\n\t\t})\n\n\t\trec.Start()\n\t\texpectMessages(a, msgChannel, \"!error-server-internal expected test error\")\n\t\tctrl.Finish()\n\t}\n}\n\nfunc Test_Receiver_Fetch_Sends_error_on_failure_in_MaxMessageId(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\ta := assert.New(t)\n\n\trec, msgChannel, _, messageStore, err := aMockedReceiver(\"/foo -2 2\")\n\ta.NoError(err)\n\n\tmessageStore.EXPECT().MaxMessageID(\"foo\").\n\t\tReturn(uint64(0), errors.New(\"expected test error\"))\n\n\trec.Start()\n\n\texpectMessages(a, msgChannel, \"!error-server-internal expected test error\")\n}\n\n//rec, sendChannel, router, messageStore, err := aMockedReceiver(\"+\")\nfunc aMockedReceiver(arg string) (*Receiver, chan []byte, *MockRouter, *MockMessageStore, error) {\n\trouterMock := NewMockRouter(testutil.MockCtrl)\n\tmessageStore := NewMockMessageStore(testutil.MockCtrl)\n\trouterMock.EXPECT().MessageStore().Return(messageStore, nil).AnyTimes()\n\tsendChannel := make(chan []byte)\n\tcmd := &protocol.Cmd{\n\t\tName: protocol.CmdReceive,\n\t\tArg:  arg,\n\t}\n\trec, err := NewReceiverFromCmd(\"any-appId\", cmd, sendChannel, routerMock, \"userId\")\n\treturn rec, sendChannel, routerMock, messageStore, err\n}\n\nfunc expectMessages(a *assert.Assertions, msgChannel chan []byte, message ...string) {\n\tfor _, m := range message {\n\t\tselect {\n\t\tcase msg := <-msgChannel:\n\t\t\ta.Equal(m, string(msg))\n\t\tcase <-time.After(time.Millisecond * 100):\n\t\t\ta.Fail(\"timeout: \" + m)\n\t\t\treturn\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "server/websocket/websocket_connector.go",
    "content": "package websocket\n\nimport (\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/auth\"\n\t\"github.com/smancke/guble/server/router\"\n\n\tlog \"github.com/Sirupsen/logrus\"\n\t\"github.com/gorilla/websocket\"\n\t\"github.com/rs/xid\"\n\n\t\"fmt\"\n\t\"net/http\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar webSocketUpgrader = websocket.Upgrader{\n\tCheckOrigin: func(r *http.Request) bool { return true },\n}\n\n// WSHandler is a struct used for handling websocket connections on a certain prefix.\ntype WSHandler struct {\n\trouter        router.Router\n\tprefix        string\n\taccessManager auth.AccessManager\n}\n\n// NewWSHandler returns a new WSHandler.\nfunc NewWSHandler(router router.Router, prefix string) (*WSHandler, error) {\n\taccessManager, err := router.AccessManager()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &WSHandler{\n\t\trouter:        router,\n\t\tprefix:        prefix,\n\t\taccessManager: accessManager,\n\t}, nil\n}\n\n// GetPrefix returns the prefix.\n// It is a part of the service.endpoint implementation.\nfunc (handler *WSHandler) GetPrefix() string {\n\treturn handler.prefix\n}\n\n// ServeHTTP is an http.Handler.\n// It is a part of the service.endpoint implementation.\nfunc (handler *WSHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tc, err := webSocketUpgrader.Upgrade(w, r, nil)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"Error on upgrading to websocket\")\n\t\treturn\n\t}\n\tdefer c.Close()\n\n\tNewWebSocket(handler, &wsconn{c}, extractUserID(r.RequestURI)).Start()\n}\n\n// WSConnection is a wrapper interface for the needed functions of the websocket.Conn\n// It is introduced for testability of the WSHandler\ntype WSConnection interface {\n\tClose()\n\tSend(bytes []byte) (err error)\n\tReceive(bytes *[]byte) (err error)\n}\n\n// wsconnImpl is a Wrapper of the websocket.Conn\n// implementing the interface WSConn for better testability\ntype wsconn struct {\n\t*websocket.Conn\n}\n\n// Close the connection.\nfunc (conn *wsconn) Close() {\n\tconn.Conn.Close()\n}\n\n// Send bytes through the connection and possibly return an error.\nfunc (conn *wsconn) Send(bytes []byte) error {\n\treturn conn.WriteMessage(websocket.BinaryMessage, bytes)\n}\n\n// Receive bytes through the connection and possibly return an error.\nfunc (conn *wsconn) Receive(bytes *[]byte) (err error) {\n\t_, *bytes, err = conn.ReadMessage()\n\treturn err\n}\n\n// WebSocket struct represents a websocket.\ntype WebSocket struct {\n\t*WSHandler\n\tWSConnection\n\tapplicationID string\n\tuserID        string\n\tsendChannel   chan []byte\n\treceivers     map[protocol.Path]*Receiver\n}\n\n// NewWebSocket returns a new WebSocket.\nfunc NewWebSocket(handler *WSHandler, wsConn WSConnection, userID string) *WebSocket {\n\treturn &WebSocket{\n\t\tWSHandler:     handler,\n\t\tWSConnection:  wsConn,\n\t\tapplicationID: xid.New().String(),\n\t\tuserID:        userID,\n\t\tsendChannel:   make(chan []byte, 10),\n\t\treceivers:     make(map[protocol.Path]*Receiver),\n\t}\n}\n\n// Start the WebSocket (the send and receive loops).\n// It is implementing the service.startable interface.\nfunc (ws *WebSocket) Start() error {\n\tws.sendConnectionMessage()\n\tgo ws.sendLoop()\n\tws.receiveLoop()\n\treturn nil\n}\n\nfunc (ws *WebSocket) sendLoop() {\n\tfor raw := range ws.sendChannel {\n\t\tif !ws.checkAccess(raw) {\n\t\t\tcontinue\n\t\t}\n\t\tif err := ws.Send(raw); err != nil {\n\t\t\tlogger.WithFields(log.Fields{\n\t\t\t\t\"userId\":        ws.userID,\n\t\t\t\t\"applicationID\": ws.applicationID,\n\t\t\t\t\"totalSize\":     len(raw),\n\t\t\t\t\"actualContent\": string(raw),\n\t\t\t}).Error(\"Could not send\")\n\t\t\tws.cleanAndClose()\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc (ws *WebSocket) checkAccess(raw []byte) bool {\n\tif len(raw) > 0 && raw[0] == byte('/') {\n\t\tpath := getPathFromRawMessage(raw)\n\n\t\tlogger.WithFields(log.Fields{\n\t\t\t\"userID\": ws.userID,\n\t\t\t\"path\":   path,\n\t\t}).Debug(\"Received msg\")\n\n\t\treturn len(path) == 0 || ws.accessManager.IsAllowed(auth.READ, ws.userID, path)\n\n\t}\n\treturn true\n}\n\nfunc getPathFromRawMessage(raw []byte) protocol.Path {\n\ti := strings.Index(string(raw), \",\")\n\treturn protocol.Path(raw[:i])\n}\n\nfunc (ws *WebSocket) receiveLoop() {\n\tvar message []byte\n\tfor {\n\t\terr := ws.Receive(&message)\n\t\tif err != nil {\n\n\t\t\tlogger.WithFields(log.Fields{\n\t\t\t\t\"applicationID\": ws.applicationID,\n\t\t\t}).Debug(\"Closed connnection by application\")\n\n\t\t\tws.cleanAndClose()\n\t\t\tbreak\n\t\t}\n\n\t\t//protocol.Debug(\"websocket_connector, raw message received: %v\", string(message))\n\t\tcmd, err := protocol.ParseCmd(message)\n\t\tif err != nil {\n\t\t\tws.sendError(protocol.ERROR_BAD_REQUEST, \"error parsing command. %v\", err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tswitch cmd.Name {\n\t\tcase protocol.CmdSend:\n\t\t\tws.handleSendCmd(cmd)\n\t\tcase protocol.CmdReceive:\n\t\t\tws.handleReceiveCmd(cmd)\n\t\tcase protocol.CmdCancel:\n\t\t\tws.handleCancelCmd(cmd)\n\t\tdefault:\n\t\t\tws.sendError(protocol.ERROR_BAD_REQUEST, \"unknown command %v\", cmd.Name)\n\t\t}\n\t}\n}\n\nfunc (ws *WebSocket) sendConnectionMessage() {\n\tn := &protocol.NotificationMessage{\n\t\tName: protocol.SUCCESS_CONNECTED,\n\t\tArg:  \"You are connected to the server.\",\n\t\tJson: fmt.Sprintf(`{\"ApplicationId\": \"%s\", \"UserId\": \"%s\", \"Time\": \"%s\"}`, ws.applicationID, ws.userID, time.Now().Format(time.RFC3339)),\n\t}\n\tws.sendChannel <- n.Bytes()\n}\n\nfunc (ws *WebSocket) handleReceiveCmd(cmd *protocol.Cmd) {\n\trec, err := NewReceiverFromCmd(\n\t\tws.applicationID,\n\t\tcmd,\n\t\tws.sendChannel,\n\t\tws.router,\n\t\tws.userID,\n\t)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"Client error in handleReceiveCmd\")\n\t\tws.sendError(protocol.ERROR_BAD_REQUEST, err.Error())\n\t\treturn\n\t}\n\tws.receivers[rec.path] = rec\n\trec.Start()\n}\n\nfunc (ws *WebSocket) handleCancelCmd(cmd *protocol.Cmd) {\n\tif len(cmd.Arg) == 0 {\n\t\tws.sendError(protocol.ERROR_BAD_REQUEST, \"- command requires a path argument, but none given\")\n\t\treturn\n\t}\n\tpath := protocol.Path(cmd.Arg)\n\trec, exist := ws.receivers[path]\n\tif exist {\n\t\trec.Stop()\n\t\tdelete(ws.receivers, path)\n\t}\n}\n\nfunc (ws *WebSocket) handleSendCmd(cmd *protocol.Cmd) {\n\tlogger.WithFields(log.Fields{\n\t\t\"cmd\": string(cmd.Bytes()),\n\t}).Debug(\"Sending \")\n\n\tif len(cmd.Arg) == 0 {\n\t\tws.sendError(protocol.ERROR_BAD_REQUEST, \"send command requires a path argument, but none given\")\n\t\treturn\n\t}\n\n\targs := strings.SplitN(cmd.Arg, \" \", 2)\n\tmsg := &protocol.Message{\n\t\tPath:          protocol.Path(args[0]),\n\t\tApplicationID: ws.applicationID,\n\t\tUserID:        ws.userID,\n\t\tHeaderJSON:    cmd.HeaderJSON,\n\t\tBody:          cmd.Body,\n\t}\n\n\tws.router.HandleMessage(msg)\n\n\tws.sendOK(protocol.SUCCESS_SEND, \"\")\n}\n\nfunc (ws *WebSocket) cleanAndClose() {\n\n\tlogger.WithFields(log.Fields{\n\t\t\"applicationID\": ws.applicationID,\n\t}).Debug(\"Closing applicationId\")\n\n\tfor path, rec := range ws.receivers {\n\t\trec.Stop()\n\t\tdelete(ws.receivers, path)\n\t}\n\n\tws.Close()\n}\n\nfunc (ws *WebSocket) sendError(name string, argPattern string, params ...interface{}) {\n\tn := &protocol.NotificationMessage{\n\t\tName:    name,\n\t\tArg:     fmt.Sprintf(argPattern, params...),\n\t\tIsError: true,\n\t}\n\tws.sendChannel <- n.Bytes()\n}\n\nfunc (ws *WebSocket) sendOK(name string, argPattern string, params ...interface{}) {\n\tn := &protocol.NotificationMessage{\n\t\tName:    name,\n\t\tArg:     fmt.Sprintf(argPattern, params...),\n\t\tIsError: false,\n\t}\n\tws.sendChannel <- n.Bytes()\n}\n\n// Extracts the userID out of an URI or empty string if format not met\n// Example:\n// \t\thttp://example.com/user/user01/ -> user01\n// \t\thttp://example.com/user/ -> \"\"\nfunc extractUserID(uri string) string {\n\turiParts := strings.SplitN(uri, \"/user/\", 2)\n\tif len(uriParts) != 2 {\n\t\treturn \"\"\n\t}\n\treturn uriParts[1]\n}\n"
  },
  {
    "path": "server/websocket/websocket_connector_test.go",
    "content": "package websocket\n\nimport (\n\t\"github.com/smancke/guble/protocol\"\n\t\"github.com/smancke/guble/server/auth\"\n\t\"github.com/smancke/guble/server/router\"\n\t\"github.com/smancke/guble/server/store\"\n\t\"github.com/smancke/guble/testutil\"\n\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\nvar aTestMessage = &protocol.Message{\n\tID:   uint64(42),\n\tPath: \"/foo\",\n\tBody: []byte(\"Test\"),\n}\n\nfunc Test_WebSocket_SubscribeAndUnsubscribe(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\ta := assert.New(t)\n\n\tmessages := []string{\"+ /foo\", \"+ /bar\", \"- /foo\"}\n\twsconn, routerMock, messageStore := createDefaultMocks(messages)\n\n\tvar wg sync.WaitGroup\n\twg.Add(3)\n\tdoneGroup := func(bytes []byte) error {\n\t\twg.Done()\n\t\treturn nil\n\t}\n\n\trouterMock.EXPECT().Subscribe(routeMatcher{\"/foo\"}).Return(nil, nil)\n\twsconn.EXPECT().\n\t\tSend([]byte(\"#\" + protocol.SUCCESS_SUBSCRIBED_TO + \" /foo\")).\n\t\tDo(doneGroup)\n\n\trouterMock.EXPECT().Subscribe(routeMatcher{\"/bar\"}).Return(nil, nil)\n\twsconn.EXPECT().\n\t\tSend([]byte(\"#\" + protocol.SUCCESS_SUBSCRIBED_TO + \" /bar\")).\n\t\tDo(doneGroup)\n\n\trouterMock.EXPECT().Unsubscribe(routeMatcher{\"/foo\"})\n\twsconn.EXPECT().\n\t\tSend([]byte(\"#\" + protocol.SUCCESS_CANCELED + \" /foo\")).\n\t\tDo(doneGroup)\n\n\twebsocket := runNewWebSocket(wsconn, routerMock, messageStore, nil)\n\twg.Wait()\n\n\ta.Equal(1, len(websocket.receivers))\n\ta.Equal(protocol.Path(\"/bar\"), websocket.receivers[protocol.Path(\"/bar\")].path)\n}\n\nfunc Test_SendMessage(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\tcommands := []string{\"> /path\\n{\\\"key\\\": \\\"value\\\"}\\nHello, this is a test\"}\n\twsconn, routerMock, messageStore := createDefaultMocks(commands)\n\n\trouterMock.EXPECT().HandleMessage(messageMatcher{path: \"/path\", message: \"Hello, this is a test\", header: `{\"key\": \"value\"}`})\n\twsconn.EXPECT().Send([]byte(\"#send\"))\n\n\trunNewWebSocket(wsconn, routerMock, messageStore, nil)\n}\n\nfunc Test_AnIncomingMessageIsDelivered(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\twsconn, routerMock, messageStore := createDefaultMocks([]string{})\n\n\twsconn.EXPECT().Send(aTestMessage.Bytes())\n\n\thandler := runNewWebSocket(wsconn, routerMock, messageStore, nil)\n\n\thandler.sendChannel <- aTestMessage.Bytes()\n\ttime.Sleep(time.Millisecond * 2)\n}\n\nfunc Test_AnIncomingMessageIsNotAllowed(t *testing.T) {\n\tctrl, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\twsconn, routerMock, _ := createDefaultMocks([]string{})\n\n\ttam := NewMockAccessManager(ctrl)\n\ttam.EXPECT().IsAllowed(auth.READ, \"testuser\", protocol.Path(\"/foo\")).Return(false)\n\thandler := NewWebSocket(\n\t\ttestWSHandler(routerMock, tam),\n\t\twsconn,\n\t\t\"testuser\",\n\t)\n\tgo func() {\n\t\thandler.Start()\n\t}()\n\ttime.Sleep(time.Millisecond * 2)\n\n\thandler.sendChannel <- aTestMessage.Bytes()\n\ttime.Sleep(time.Millisecond * 2)\n\t//nothing shall have been sent\n\n\t//now allow\n\ttam.EXPECT().IsAllowed(auth.READ, \"testuser\", protocol.Path(\"/foo\")).Return(true)\n\n\twsconn.EXPECT().Send(aTestMessage.Bytes())\n\n\ttime.Sleep(time.Millisecond * 2)\n\n\thandler.sendChannel <- aTestMessage.Bytes()\n\ttime.Sleep(time.Millisecond * 2)\n}\n\nfunc Test_BadCommands(t *testing.T) {\n\t_, finish := testutil.NewMockCtrl(t)\n\tdefer finish()\n\n\tbadRequests := []string{\"XXXX\", \"\", \">\", \">/foo\", \"+\", \"-\", \"send /foo\"}\n\twsconn, routerMock, messageStore := createDefaultMocks(badRequests)\n\n\tcounter := 0\n\n\tvar wg sync.WaitGroup\n\twg.Add(len(badRequests))\n\n\twsconn.EXPECT().Send(gomock.Any()).Do(func(data []byte) error {\n\t\tif strings.HasPrefix(string(data), \"#connected\") {\n\t\t\treturn nil\n\t\t}\n\t\tif strings.HasPrefix(string(data), \"!error-bad-request\") {\n\t\t\tcounter++\n\t\t} else {\n\t\t\tt.Logf(\"expected bad-request, but got: %v\", string(data))\n\t\t}\n\n\t\twg.Done()\n\t\treturn nil\n\t}).AnyTimes()\n\n\trunNewWebSocket(wsconn, routerMock, messageStore, nil)\n\n\twg.Wait()\n\tassert.Equal(t, len(badRequests), counter, \"expected number of bad requests does not match\")\n}\n\nfunc TestExtractUserId(t *testing.T) {\n\tassert.Equal(t, \"marvin\", extractUserID(\"/foo/user/marvin\"))\n\tassert.Equal(t, \"marvin\", extractUserID(\"/user/marvin\"))\n\tassert.Equal(t, \"\", extractUserID(\"/\"))\n}\n\nfunc testWSHandler(\n\trouterMock *MockRouter,\n\taccessManager auth.AccessManager) *WSHandler {\n\n\treturn &WSHandler{\n\t\trouter:        routerMock,\n\t\tprefix:        \"/prefix\",\n\t\taccessManager: accessManager,\n\t}\n}\n\nfunc runNewWebSocket(\n\twsconn *MockWSConnection,\n\trouterMock *MockRouter,\n\tmessageStore store.MessageStore,\n\taccessManager auth.AccessManager) *WebSocket {\n\n\tif accessManager == nil {\n\t\taccessManager = auth.NewAllowAllAccessManager(true)\n\t}\n\twebsocket := NewWebSocket(\n\t\ttestWSHandler(routerMock, accessManager),\n\t\twsconn,\n\t\t\"testuser\",\n\t)\n\n\tgo func() {\n\t\twebsocket.Start()\n\t}()\n\n\ttime.Sleep(time.Millisecond * 2)\n\treturn websocket\n}\n\nfunc createDefaultMocks(inputMessages []string) (\n\t*MockWSConnection,\n\t*MockRouter,\n\t*MockMessageStore) {\n\tinputMessagesC := make(chan []byte, len(inputMessages))\n\tfor _, msg := range inputMessages {\n\t\tinputMessagesC <- []byte(msg)\n\t}\n\n\trouterMock := NewMockRouter(testutil.MockCtrl)\n\tmessageStore := NewMockMessageStore(testutil.MockCtrl)\n\trouterMock.EXPECT().MessageStore().Return(messageStore, nil).AnyTimes()\n\n\twsconn := NewMockWSConnection(testutil.MockCtrl)\n\twsconn.EXPECT().Receive(gomock.Any()).Do(func(message *[]byte) error {\n\t\t*message = <-inputMessagesC\n\t\treturn nil\n\t}).Times(len(inputMessages) + 1)\n\n\twsconn.EXPECT().Send(connectedNotificationMatcher{})\n\n\treturn wsconn, routerMock, messageStore\n}\n\n// --- routeMatcher ---------\ntype routeMatcher struct {\n\tpath string\n}\n\nfunc (n routeMatcher) Matches(x interface{}) bool {\n\treturn n.path == string(x.(*router.Route).Path)\n}\n\nfunc (n routeMatcher) String() string {\n\treturn \"route path equals \" + n.path\n}\n\n// --- messageMatcher ---------\ntype messageMatcher struct {\n\tid      uint64\n\tpath    string\n\tmessage string\n\theader  string\n}\n\nfunc (n messageMatcher) Matches(x interface{}) bool {\n\treturn n.path == string(x.(*protocol.Message).Path) &&\n\t\tn.message == string(x.(*protocol.Message).Body) &&\n\t\t(n.id == 0 || n.id == x.(*protocol.Message).ID) &&\n\t\t(n.header == \"\" || (n.header == x.(*protocol.Message).HeaderJSON))\n}\n\nfunc (n messageMatcher) String() string {\n\treturn fmt.Sprintf(\"message equals %q, %q, %q\", n.id, n.path, n.message)\n}\n\n// --- Connected Notification Matcher ---------\ntype connectedNotificationMatcher struct {\n}\n\nfunc (notify connectedNotificationMatcher) Matches(x interface{}) bool {\n\treturn strings.HasPrefix(string(x.([]byte)), \"#connected\")\n}\n\nfunc (notify connectedNotificationMatcher) String() string {\n\treturn fmt.Sprintf(\"is connected message\")\n}\n"
  },
  {
    "path": "test.sh",
    "content": "#!/usr/bin/env bash\n\nGO_TEST_DISABLED=true go test -short  ./...\nTESTRESULT=$?\n\nRED='\\033[0;31m'\nGREEN='\\033[0;32m'\nNOCOLOR='\\033[0m'\n\ncase ${TESTRESULT} in\n0)\n  MESSAGE=\"${GREEN}OK\"\n  ;;\n1)\n  MESSAGE=\"${RED}Test(s) failing\"\n  ;;\n2)\n  MESSAGE=\"${RED}Compilation error\"\n  ;;\n*)\n  MESSAGE=\"${RED}Error(s)\"\n  ;;\nesac\n\necho -e \"${MESSAGE}${NOCOLOR}\\n\"\n\nexit ${TESTRESULT}\n"
  },
  {
    "path": "testutil/testutil.go",
    "content": "package testutil\n\nimport (\n\t//used for pprof server\n\t_ \"net/http/pprof\"\n\n\tlog \"github.com/Sirupsen/logrus\"\n\t\"github.com/docker/distribution/health\"\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"net/http\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n)\n\n// MockCtrl is a gomock.Controller to use globally\nvar MockCtrl *gomock.Controller\n\nfunc init() {\n\t// disable error output while testing\n\t// because also negative tests are tested\n\tlog.SetLevel(log.ErrorLevel)\n}\n\n// NewMockCtrl initializes the `MockCtrl` package var and returns a method to\n// finish the controller when test is complete\n// **Important**: Don't forget to call the returned method at the end of the test\n// Usage:\n// \t\tctrl, finish := test_util.NewMockCtrl(t)\n// \t\tdefer finish()\nfunc NewMockCtrl(t *testing.T) (*gomock.Controller, func()) {\n\tMockCtrl = gomock.NewController(t)\n\treturn MockCtrl, func() { MockCtrl.Finish() }\n}\n\nfunc NewMockBenchmarkCtrl(b *testing.B) (*gomock.Controller, func()) {\n\tMockCtrl = gomock.NewController(b)\n\treturn MockCtrl, func() { MockCtrl.Finish() }\n}\n\n// EnableDebugForMethod enables debug-level output through the current test\n// Usage:\n//\t\ttestutil.EnableDebugForMethod()()\nfunc EnableDebugForMethod() func() {\n\treset := log.GetLevel()\n\tlog.SetLevel(log.DebugLevel)\n\treturn func() { log.SetLevel(reset) }\n}\n\n// EnableInfoForMethod enables info-level output through the current test\n// Usage:\n//\t\ttestutil.EnableInfoForMethod()()\nfunc EnableInfoForMethod() func() {\n\treset := log.GetLevel()\n\tlog.SetLevel(log.InfoLevel)\n\treturn func() { log.SetLevel(reset) }\n}\n\n// ExpectDone waits to receive a value in the doneChannel for at least a second\n// or fails the test.\nfunc ExpectDone(a *assert.Assertions, doneChannel chan bool) {\n\tselect {\n\tcase <-doneChannel:\n\t\treturn\n\tcase <-time.After(time.Second):\n\t\ta.Fail(\"timeout in expectDone\")\n\t}\n}\n\n// ExpectPanic expects a panic (and fails if this does not happen).\nfunc ExpectPanic(t *testing.T) {\n\tif r := recover(); r == nil {\n\t\tassert.Fail(t, \"Expecting a panic but unfortunately it did not happen\")\n\t}\n}\n\n// ResetDefaultRegistryHealthCheck resets the existing registry containing health-checks\nfunc ResetDefaultRegistryHealthCheck() {\n\thealth.DefaultRegistry = health.NewRegistry()\n}\n\n//SkipIfShort skips a test if the `-short` flag is given to `go test`\nfunc SkipIfShort(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping test in short mode.\")\n\t}\n}\n\n//SkipIfDisabled skips a test if the GO_TEST_DISABLED environment variable is set to any value (when `go test` runs)\nfunc SkipIfDisabled(t *testing.T) {\n\tif os.Getenv(\"GO_TEST_DISABLED\") != \"\" {\n\t\tt.Skip(\"skipping disabled test.\")\n\t}\n}\n\nfunc PprofDebug() {\n\tgo func() {\n\t\thttp.ListenAndServe(\"localhost:6060\", nil)\n\t}()\n}\n"
  }
]