Repository: senecajs/ramanujan
Branch: master
Commit: 69e61a90ada5
Files: 119
Total size: 123.9 KB
Directory structure:
gitextract_lorkntwj/
├── .gitignore
├── .travis.yml
├── LICENSE
├── README.md
├── api/
│ └── api-service.js
├── base/
│ └── base.js
├── docker/
│ ├── Makefile
│ ├── api/
│ │ ├── Dockerfile
│ │ ├── Makefile
│ │ └── api-service.js
│ ├── base/
│ │ ├── Dockerfile
│ │ ├── Makefile
│ │ └── base.js
│ ├── docker.txt
│ ├── entry-cache/
│ │ ├── Dockerfile
│ │ ├── Makefile
│ │ ├── entry-cache-logic.js
│ │ └── entry-cache-service.js
│ ├── entry-store/
│ │ ├── Dockerfile
│ │ ├── Makefile
│ │ ├── entry-store-logic.js
│ │ └── entry-store-service.js
│ ├── fanout/
│ │ ├── Dockerfile
│ │ ├── Makefile
│ │ ├── fanout-logic.js
│ │ └── fanout-service.js
│ ├── follow/
│ │ ├── Dockerfile
│ │ ├── Makefile
│ │ ├── follow-logic.js
│ │ └── follow-service.js
│ ├── front/
│ │ ├── Dockerfile
│ │ ├── Makefile
│ │ ├── front.js
│ │ └── www/
│ │ └── res/
│ │ └── site.css
│ ├── home/
│ │ ├── Dockerfile
│ │ ├── Makefile
│ │ ├── home-service.js
│ │ └── www/
│ │ ├── home.html
│ │ └── layout.html
│ ├── index/
│ │ ├── Dockerfile
│ │ ├── Makefile
│ │ ├── index-logic.js
│ │ └── index-service.js
│ ├── mine/
│ │ ├── Dockerfile
│ │ ├── Makefile
│ │ ├── mine-service.js
│ │ └── www/
│ │ ├── home.html
│ │ ├── layout.html
│ │ └── mine.html
│ ├── post/
│ │ ├── Dockerfile
│ │ ├── Makefile
│ │ ├── post-logic.js
│ │ └── post-service.js
│ ├── ramanujan.yml
│ ├── repl/
│ │ ├── Dockerfile
│ │ ├── Makefile
│ │ ├── monitor.js
│ │ └── repl-service.js
│ ├── reserve/
│ │ ├── Dockerfile
│ │ ├── Makefile
│ │ ├── reserve-logic.js
│ │ └── reserve-service.js
│ ├── search/
│ │ ├── Dockerfile
│ │ ├── Makefile
│ │ ├── search-service.js
│ │ └── www/
│ │ ├── home.html
│ │ ├── layout.html
│ │ └── search.html
│ ├── shared/
│ │ ├── Dockerfile
│ │ ├── Makefile
│ │ └── package.json
│ ├── timeline/
│ │ ├── Dockerfile
│ │ ├── Makefile
│ │ └── timeline-shard-service.js
│ └── timeline-shard/
│ ├── Dockerfile
│ ├── Makefile
│ ├── timeline-logic.js
│ └── timeline-service.js
├── entry-cache/
│ ├── entry-cache-logic.js
│ ├── entry-cache-service.js
│ └── test/
│ └── entry-cache-test.js
├── entry-store/
│ ├── entry-store-logic.js
│ ├── entry-store-service.js
│ └── test/
│ └── entry-store-test.js
├── fanout/
│ ├── fanout-logic.js
│ ├── fanout-service.js
│ └── test/
│ └── fanout-test.js
├── follow/
│ ├── follow-logic.js
│ ├── follow-service.js
│ └── test/
│ └── follow-test.js
├── front/
│ ├── front.js
│ └── www/
│ └── res/
│ └── site.css
├── fuge/
│ └── fuge.yml
├── home/
│ ├── home-service.js
│ └── www/
│ ├── home.html
│ └── layout.html
├── index/
│ ├── index-logic.js
│ ├── index-service.js
│ └── test/
│ └── index-test.js
├── mine/
│ ├── mine-service.js
│ └── www/
│ ├── layout.html
│ └── mine.html
├── monitor/
│ └── monitor.js
├── package.json
├── post/
│ ├── post-logic.js
│ ├── post-service.js
│ └── test/
│ └── post-test.js
├── repl/
│ └── repl-service.js
├── reserve/
│ ├── reserve-logic.js
│ ├── reserve-service.js
│ └── test/
│ └── reserve-test.js
├── search/
│ ├── search-service.js
│ └── www/
│ ├── layout.html
│ └── search.html
├── start.sh
└── timeline/
├── test/
│ └── timeline-test.js
├── timeline-logic.js
├── timeline-service.js
└── timeline-shard-service.js
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitignore
================================================
# Logs
logs
*.log
npm-debug.log*
# Runtime data
pids
*.pid
*.seed
# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov
# Coverage directory used by tools like istanbul
coverage
# Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files)
.grunt
# node-waf configuration
.lock-wscript
# Compiled binary addons (http://nodejs.org/api/addons.html)
build/Release
# Dependency directory
# https://docs.npmjs.com/misc/faq#should-i-check-my-node-modules-folder-into-git
node_modules
# Optional npm cache directory
.npm
# Optional REPL history
.node_repl_history
*~
node_modules*
================================================
FILE: .travis.yml
================================================
sudo: required
dist: trusty
language: node_js
node_js:
- "6"
- "node"
================================================
FILE: LICENSE
================================================
The MIT License (MIT)
Copyright (c) Richard Rodger and other contributors 2015-2016.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: README.md
================================================
# ramanujan
This project is an implementation of a microblogging system (similar
to the basic functionality of [Twitter](http://twitter.com)) using the
[microservice architecture](http://www.richardrodger.com/seneca-microservices-nodejs#.VyCjoWQrL-k)
and [Node.js](https://nodejs.org). It is the example system discussed
in Chapter 1 of [The Tao of Microservices](http://bit.ly/rmtaomicro)
book.
This purpose of this code base to help you learn how to design and
build microservice systems. You can follow the construction through
the following steps:
* [Informal Requirements](#informal-requirements)
* [Message specification](#message-specification)
* [Service specification](#service-specification)
The system uses the
[Seneca microservice framework](http://senecajs.org) to provide
inter-service communication, and the
[fuge microservice development tool](https://github.com/apparatus/fuge) to manage
services on a local development machine.
The system is also a demonstration of the
[SWIM protocol](https://www.cs.cornell.edu/~asdas/research/dsn02-swim.pdf)
for peer-to-peer service discovery. A service registry is not needed
as the network automatically reconfigures as microservices are added
and removed.
## Scope of the system
The system shows implementations of some of the essential features of
a microblogging system, but not all. Of particular focus is the use of
use of separate microservices for separate content pages, the use of
messages for data manipulation, and the use of a reactive message flow
for scaling.
The system does not provide for full accounts, or user
authentication. This could be added relatively easily using the
seneca-auth and seneca-user plugins. Avoiding the need to login makes
it easier to experiment as you can check multiple user experiences in
the browser.
The system exposes a (RESTish) JSON API over HTTP. However, the user
interface does _not_ use any client-side JavaScript, and entirely
delivered by server-side templates. This is an old school POST and
redirect architecture to keep things simple and focused on the
server-side.
The system does not use persistent storage. You can easily make the
data persistent by using a Seneca data storage plugin. Keeping
everything in memory makes for faster development, easier
experimentation, and lets you reboot the system if you end up with
corrupted data during development.
This system also provides an example of message tracing, using Zipkin.
This example codebase does not provide a production deployment
configuration. It does however provide a Docker Swarm example that you
can start building from.
## Unit test examples
The system also includes example code for unit testing microservices.
The unit test code for each service is in the `test` subfolder of each
microservice folder.
To run all the tests, use:
``js
npm test
``
The microservices can be unit tested independently and offline. Mock
messages are used to isolate each microservice from its network
dependencies.
## Running the system
The system is implemented in Node.js. You will need to have Node.js
version 4.0 or greater installed.
You can run the system directly from the command line by running the
`start.sh` script:
```sh
$ ./start.sh
```
This starts all the microservices in the background. While this is a
quick way to get started, and verify that everything works, it is not
the most convenient option.
To have more control, you can use
[fuge](https://github.com/apparatus/fuge) to run the microservice
processes. Detailed instructions are provided next.
You can also use Docker to run the services. Example Dockerfiles are
provided in the
[docker folder](https://github.com/senecajs/ramanujan/tree/master/docker). See
below for more details.
## Running with fuge
#### Step 0: Install fuge
Follow the instructions at [fuge repository](https://github.com/apparatus/fuge).
_fuge_ is a development tool that lets you manage and control a
microservice system for local development. The ramanjun repository is
preconfigured for fuge (see the fuge folder), so you don't have to set
anything up. The ramanujan system has 14 microservices (at last
count), so you really do need a local tool to help run the system.
This is trade-off that you make when you choose the microservice
architecture. You can move faster because you have very low coupling,
and thus lower technical debt, but you will need more automation to
manage the higher number of moving parts.
#### Step 1: Clone the repository
Use git to clone the repository to a local development folder of your choice
```sh
$ git clone https://github.com/senecajs/ramanujan.git
```
#### Step 2: Download dependencies
The system needs a number of Node.js modules from npmjs.org to
function correctly. These are the only external dependencies.
```sh
$ npm install
```
Wait until the downloads complete. Some modules will require local
compilation. If you run into problems due to your operating system,
using a [Linux virtual machine](https://www.virtualbox.org/) is
probably your fastest solution. If you are using Windows,
[configuring msbuild](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules)
first is a good place to start.
The Zipkin message tracing is optional, and the system will work fine
if there is no Zipkin installation. However, it is pretty easy to set
one up using Docker:
```sh
$ docker run -d -p 9411:9411 openzipkin/zipkin
```
Once you've run through some of the use cases, open http://localhost:9411/ to see the
message traces. Note that this is a demonstration system, so all
traces are captured. In production you'll want to use a much lower
sampling rate - see the Zipkin documentation for details.
#### Step 3: Run fuge
From within the repository folder, run the fuge shell.
```sh
$ fuge shell fuge/fuge.yml
```
This will start fuge, output some logging messages about the ramanujan services, and then place you in an interactive repl:
```sh
...
starting shell..
? fuge>
```
Enter the command `help` to see a list of commands. Useful commands
are `ps` to list the status of the services (try it!), and `exit` to
shutdown all services and exit. If your system state becomes corrupted
in some way (this often happens during development due to bugs in
microservices), exit fuge completely and restart the fuge shell.
#### Step 4: Start up the system
To start the system, use the fuge command:
```sh
...
? fuge> start all
```
You see a list of startup logs from each service. _fuge_ prefixes the
logs for each service with the service names, and gives them different
colors so they are easy to tell apart. This also makes is easy to
review message flows. The system takes about a few seconds to start
all microservices.
Now use the `ps` command to list the state of the services. They
should all be running.
### Using the system
Open your web browser to interact with the system. The steps below
define a "happy path" to validate the basic functionality of the
system.
#### Step 1: Post microblogs entries for user _foo_
Open `http://localhost:8000/foo`.
This is the homepage for the user _foo_, and shows their timeline. The
timeline is a list of recent microblog entries from all users that the
user _foo_ follows, and also entries from _foo_ themselves.
At first there are no entries, so go ahead and post an entry, say:
> _three colors: blue_
Click the _post_ button or hit return. You should see the new entry.
Post another entry, say:
> _three colors: white_
You should see both entries listed, with the most recent one at the
top. This is the timeline for user _foo_.
#### Step 2: Review microblogs for user _foo_
Open `http://localhost:8000/mine/foo` (Or click on the _Mine_ navigation tab).
This shows only the entries for user _foo_, omitting entries for followers.
You can use this page to verify the entry list for a given user.
#### Step 3: Load search page of user _bar_, and follow user _foo_
Open `http://localhost:8000/search/bar`.
You are now acting as user _bar_. Use the text _blue_ as a search query:
Click on the _follow_ button. Now user _bar_ is following user _foo_.
#### Step 4: Review timeline for user _bar_
Open `http://localhost:8000/bar` (Or click on the _Home_ navigation tab).
You should see the entries from user _foo_, as user _bar_ is now a follower.
#### Step 5: Post microblog entries for user _bar_
Enter and post the text:
> _the sound of music_
The timeline for user _bar_ now includes entries from both users _foo_
and _bar_.
#### Step 5: Post microblog entries for user _foo_
Return to user _foo_. Open `http://localhost:8000/foo`.
Post a new entry:
> _three colors: red_
You should see entries only for user _foo_, as _foo_ does **not** follow _bar_.
#### Step 6: Load microblog timeline of user _bar_
Go back to user _bar_. Open `http://localhost:8000/bar`.
You should see an updated list of entries, included all the entries
for user _foo_, as _bar_ **does** follow _foo_.
### Starting and stopping services
One of the main benefits of a microservice system is that you can
deploy services independently. In a local development setting this
means you should be able to start and stop services independently,
without stopping and starting the entire system. This has a huge
productivity benefit as you don't have to wait for the entire system
to ready itself.
To work on a particular service, update the code for that service, and
then stop and restart the service to see the new functionality. The
rest of the system keeps working. To really get the maximum benefit
from this technique, you need to avoid the use of schema validation
for your messages, and you must avoid creating hard couplings
(services should not know about each other). That is why the Seneca
framework provides pattern matching and transport independence as key
features - they enable rapid development.
The payoff for more deployment complexity is that you can change parts
of the system dynamically - don't lose that ability!
_fuge_ allows you to start and stop services using the 'start' and
'stop' commands.
To stop a service (say, _search_), use the command:
```sh
? fuge> stop search
```
If you now try to use the search feature, it will fail, but other
pages will still work. Another important benefit of microservices is that they can isolate errors in this way.
To restart the _search_ service, use:
```sh
? fuge> start search
```
And the search functionality works again. Notice that you did not have
to do any manual configuration to let the other services know about
the new instance of the _search_ service. Notice also that the other
services knew almost instantaneously about the the new instance of the
_search_ service. That's becuase the SWIM algorithm propogated that
information quickly and efficiently throughout the network. No need
for 30 second timeouts to detect errors - SWIM works much more quickly
as it has many observers (the other services) so can detect failure,
and new services, very quickly with a high degree of confidence.
You can also run multiple instances of the same service. This lets you
scale to handle load. The underlying seneca-mesh network will
automatically round-robin messages between all available services for
a given message. Just start the service again:
```sh
? fuge> start search
```
And is you now run the `ps` command in fuge, you'll see the count is 2
instances.
### Accessing the network REPL
The system comes with a REPL service that lets you submit messages to the network manually. This is very useful for debugging. Access the REPL by telnetting into it:
```sh
$ telnet localhost 10001
```
Use the following message to see the user _foo's_ timeline:
```sh
seneca 2.0.1 7k/repl> timeline:list,user:foo
IN 000000: { timeline: 'list', user: 'foo' } # t7/39 timeline:* (6ln6zlc2qaer) transport_client
OUT 000000: { '0':
{ user: 'foo',
text: 'three colors: red',
when: 1461759716373,
can_follow: false },
'1':
{ user: 'foo',
text: 'three colors: white',
when: 1461759467135,
can_follow: false },
'2':
{ user: 'foo',
text: 'three colors: blue',
when: 1461759353996,
can_follow: false } }
```
You can enter messages directly into the terminal, in JSON format (the
format is lenient, see
[jsonic](https://github.com/rjrodger/jsonic)). The output will show
the message data `IN` and `OUT` of the network.
The REPL is a JavaScript console environment. There is a `seneca`
object that you can use directly, calling any methods of the seneca
API.
```sh
seneca 2.0.1 7k/repl> seneca.id
'7k/repl'
```
To get a list of all services on the network, and which messages they
listen for, try:
```sh
seneca 2.0.1 7k/repl> role:mesh,get:members
IN 000001: { role: 'mesh', get: 'members' } # aa/ie get:members,role:mesh (9mxp6qx6zyox) get_members
OUT 000001: {
...
'4':
{ pin: 'timeline:*',
port: 54932,
host: '0.0.0.0',
type: 'web',
model: 'consume',
instance: 'gt/timeline-shard' },
...
}
```
This message is so useful, that the repl service defines an alias for it: `m`.
The default configuration of the system uses shortened identifers to
make debugging easier.
### Using the monitor
You can monitor the state of each service, and the message patterns
that it responds to, by running the `monitor` service separately in
it's own terminal window. The `monitor` service prints a table of
showing each service, and dynamically updates the table as services
come and go. See
[seneca-mesh](https://github.com/senecajs/seneca-mesh) for details.
```sh
$ node monitor/monitor.js
```
## Using Docker
You'll need to have the latest version of
[Docker](https://www.docker.com/) installed.
The [docker](https://github.com/senecajs/ramanujan/tree/master/docker)
folder contains Docker image setup Makefiles and Dockerfiles. Run the
top level `Makefile` to build all the images:
```
$ cd docker
$ make
```
Then deploy all the images using Docker Stack:
```
$ docker stack deploy -c ramanujan.yml ramanujan
```
This will start up everything. The containers run in their own overlay
network, but you will be able to access the website and repl on
localhost as with fuge.
If things go funny (hey, it's Docker), delete the stack, restart
Docker, and try again:
```
$ docker stack rm ramanujan
```
You can see some information about the containers with these commands:
```
$ docker stats
$ docker services ls
$ docker ps
```
To view the monitor, run the it on the `repl` container:
```
$ docker exec -it `docker ps | grep repl | cut -f 1 -d ' '` /bin/sh
# node monitor.js
```
## Informal Requirements
> TODO
## Message Specification
> TODO
## Service Specification
> TODO
## Help and Questions
[github issue]: https://github.com/senecajs/ramanujan/issues
[gitter-url]: https://gitter.im/senecajs/ramanujan
## License
Copyright (c) Richard Rodger and other contributors 2015-2016, Licensed under [MIT](/LICENSE).
================================================
FILE: api/api-service.js
================================================
"use strict"
var PORT = process.env.PORT || process.argv[2] || 0
var HOST = process.env.HOST || process.argv[3] || '127.0.0.1'
var BASES = (process.env.BASES || process.argv[4] || '').split(',')
var SILENT = process.env.SILENT || process.argv[5] || 'true'
var Hapi = require('hapi')
var Chairo = require('chairo')
var Seneca = require('seneca')
var Rif = require('rif')
var tag = 'api'
var server = new Hapi.Server()
var rif = Rif()
var host = rif(HOST) || HOST
server.connection({
port: PORT,
host: host
})
server.register({
register: Chairo,
options:{
seneca: Seneca({
tag: tag,
internal: {logger: require('seneca-demo-logger')},
debug: {short_logs:true}
})
//.use('zipkin-tracer', {sampling:1})
}
})
server.register({
register: require('wo'),
options:{
bases: BASES,
route: [
{path: '/api/ping'},
{path: '/api/post/{user}', method: 'post'},
{path: '/api/follow/{user}', method: 'post'},
],
sneeze: {
host: host,
silent: JSON.parse(SILENT),
swim: {interval: 1111}
}
}
})
server.route({
method: 'GET', path: '/api/ping',
handler: function( req, reply ){
server.seneca.act(
'role:api,cmd:ping',
function(err,out) {
reply(err||out)
}
)}
})
server.route({
method: 'POST', path: '/api/post/{user}',
handler: function( req, reply ){
console.log('/api/post A', req.params, req.payload)
server.seneca.act(
'post:entry',
{user:req.params.user, text:req.payload.text},
function(err,out) {
console.log('/api/post B', err, out)
if( err ) return reply.redirect('/error')
reply.redirect(req.payload.from)
}
)}
})
server.route({
method: 'POST', path: '/api/follow/{user}',
handler: function( req, reply ){
server.seneca.act(
'follow:user',
{user:req.params.user, target:req.payload.user},
function(err,out) {
if( err ) return reply.redirect('/error')
reply.redirect(req.payload.from)
}
)}
})
server.seneca
.add('role:api,cmd:ping', function(msg,done){
done( null, {pong:true,api:true,time:Date.now()})
})
.use('mesh',{
host: host,
bases: BASES,
sneeze: {
silent: JSON.parse(SILENT),
swim: {interval: 1111}
}
})
server.start(function(){
console.log(tag,server.info.host,server.info.port)
})
================================================
FILE: base/base.js
================================================
// node base.js base0 39000 127.0.0.1 127.0.0.1:39000,127.0.0.1:39001
// node base.js base1 39001 127.0.0.1 127.0.0.1:39000,127.0.0.1:39001
var TAG = process.env.TAG || process.argv[2] || 'base'
var PORT = process.env.PORT || process.argv[3] || 39999
var HOST = process.env.HOST || process.argv[4] || '127.0.0.1'
var BASES = (process.env.BASES || process.argv[5] || '').split(',')
var SILENT = process.env.SILENT || process.argv[6] || 'true'
require('seneca')({
tag: TAG,
internal: {logger: require('seneca-demo-logger')},
debug: {short_logs:true}
})
//.test(console.log,'print')
//.use('zipkin-tracer', {sampling:1})
.use('mesh',{
isbase: true,
port: PORT,
host: HOST,
bases: BASES,
pin:'role:mesh',
sneeze: {
silent: JSON.parse(SILENT),
swim: {interval: 1111}
}
})
.ready(function(){
console.log(this.id)
})
================================================
FILE: docker/Makefile
================================================
containers :
$(MAKE) -C shared container
$(MAKE) -C api container
$(MAKE) -C base container
$(MAKE) -C entry-cache container
$(MAKE) -C entry-store container
$(MAKE) -C fanout container
$(MAKE) -C follow container
$(MAKE) -C front container
$(MAKE) -C home container
$(MAKE) -C index container
$(MAKE) -C mine container
$(MAKE) -C post container
$(MAKE) -C repl container
$(MAKE) -C reserve container
$(MAKE) -C search container
$(MAKE) -C timeline container
$(MAKE) -C timeline-shard container
$(MAKE) -C monitor container
.PHONY : containers
================================================
FILE: docker/api/Dockerfile
================================================
FROM shared
ADD api-service.js .
CMD ["node", "api-service.js"]
================================================
FILE: docker/api/Makefile
================================================
container :
cp ../../api/api-service.js .
docker build -t api .
docker images | grep api
run-single :
docker service create --replicas 1 --network ramanujan --name api -e HOST=eth0 -e BASES=base0:39000,base1:39000 api
rm-single :
docker service rm api
clean :
rm -f *~
rm -f *.js
rm -f *.json
.PHONY : container clean
================================================
FILE: docker/api/api-service.js
================================================
"use strict"
var PORT = process.env.PORT || process.argv[2] || 0
var HOST = process.env.HOST || process.argv[3] || '127.0.0.1'
var BASES = (process.env.BASES || process.argv[4] || '').split(',')
var SILENT = process.env.SILENT || process.argv[5] || 'true'
var Hapi = require('hapi')
var Chairo = require('chairo')
var Seneca = require('seneca')
var Rif = require('rif')
var tag = 'api'
var server = new Hapi.Server()
var rif = Rif()
var host = rif(HOST) || HOST
server.connection({
port: PORT,
host: host
})
server.register({
register: Chairo,
options:{
seneca: Seneca({
tag: tag,
internal: {logger: require('seneca-demo-logger')},
debug: {short_logs:true}
})
//.use('zipkin-tracer', {sampling:1})
}
})
server.register({
register: require('wo'),
options:{
bases: BASES,
route: [
{path: '/api/ping'},
{path: '/api/post/{user}', method: 'post'},
{path: '/api/follow/{user}', method: 'post'},
],
sneeze: {
host: host,
silent: JSON.parse(SILENT),
swim: {interval: 1111}
}
}
})
server.route({
method: 'GET', path: '/api/ping',
handler: function( req, reply ){
server.seneca.act(
'role:api,cmd:ping',
function(err,out) {
reply(err||out)
}
)}
})
server.route({
method: 'POST', path: '/api/post/{user}',
handler: function( req, reply ){
console.log('/api/post A', req.params, req.payload)
server.seneca.act(
'post:entry',
{user:req.params.user, text:req.payload.text},
function(err,out) {
console.log('/api/post B', err, out)
if( err ) return reply.redirect('/error')
reply.redirect(req.payload.from)
}
)}
})
server.route({
method: 'POST', path: '/api/follow/{user}',
handler: function( req, reply ){
server.seneca.act(
'follow:user',
{user:req.params.user, target:req.payload.user},
function(err,out) {
if( err ) return reply.redirect('/error')
reply.redirect(req.payload.from)
}
)}
})
server.seneca
.add('role:api,cmd:ping', function(msg,done){
done( null, {pong:true,api:true,time:Date.now()})
})
.use('mesh',{
host: host,
bases: BASES,
sneeze: {
silent: JSON.parse(SILENT),
swim: {interval: 1111}
}
})
server.start(function(){
console.log(tag,server.info.host,server.info.port)
})
================================================
FILE: docker/base/Dockerfile
================================================
FROM shared
ADD base.js .
CMD ["node", "base.js"]
================================================
FILE: docker/base/Makefile
================================================
container :
cp ../../base/base.js .
docker build -t base .
docker images | grep base
run-single-base0:
docker service create --replicas 1 --network ramanujan --name base0 -e TAG=base0 -e PORT=39000 -e HOST=base0 -e BASES=base0:39000,base1:39000 base
run-single-base1:
docker service create --replicas 1 --network ramanujan --name base1 -e TAG=base1 -e PORT=39000 -e HOST=base1 -e BASES=base0:39000,base1:39000 base
rm-single-base0:
docker service rm base0
rm-single-base1:
docker service rm base1
clean :
rm -f *~
rm -f *.js
rm -f *.json
.PHONY : container clean
================================================
FILE: docker/base/base.js
================================================
// node base.js base0 39000 127.0.0.1 127.0.0.1:39000,127.0.0.1:39001
// node base.js base1 39001 127.0.0.1 127.0.0.1:39000,127.0.0.1:39001
var TAG = process.env.TAG || process.argv[2] || 'base'
var PORT = process.env.PORT || process.argv[3] || 39999
var HOST = process.env.HOST || process.argv[4] || '127.0.0.1'
var BASES = (process.env.BASES || process.argv[5] || '').split(',')
var SILENT = process.env.SILENT || process.argv[6] || 'true'
require('seneca')({
tag: TAG,
internal: {logger: require('seneca-demo-logger')},
debug: {short_logs:true}
})
//.test(console.log,'print')
//.use('zipkin-tracer', {sampling:1})
.use('mesh',{
isbase: true,
port: PORT,
host: HOST,
bases: BASES,
pin:'role:mesh',
sneeze: {
silent: JSON.parse(SILENT),
swim: {interval: 1111}
}
})
.ready(function(){
console.log(this.id)
})
================================================
FILE: docker/docker.txt
================================================
# These are development notes NOT instructions!
# on host
docker-machine ls
docker-machine create --driver virtualbox manager1
docker-machine create --driver virtualbox worker1
docker-machine create --driver virtualbox worker2
docker-machine ls
# MANAGER-IP
docker-machine ip manager1
docker-machine ssh manager1
# inside manager1
docker swarm init --advertise-addr
docker swarm join-token worker
exit
docker-machine ssh worker1
# inside worker1
docker swarm join --token :2377
exit
docker-machine ssh worker2
# inside worker2
docker swarm join --token :2377
exit
docker-machine ssh manager1 -A
# inside manager1
docker node ls
docker network create --driver overlay ramanujan
docker network ls
# install emacs
tce
# emacs.tcz
# libXrandr.tcz
# make.tcz
TERM=vt100 emacs -nw
# IMPORTANT: use the same host name everywhere otherwise swim-js mappings will fail
# TODO: order properly
docker service create --replicas 1 --network ramanujan --name base1 -e TAG=base1 -e PORT=39000 -e HOST=base
1 -e BASES=base0:39000,base1:39000 base
docker service create --replicas 1 --network ramanujan -p 10001:10001 --name repl -e TAG=repl -e REPL_HOST=0.0.0.0 -e HOST=@eth2 -e BASES=base0:3900
0,base1:39000 repl
# eth2 as publish
docker service create --replicas 1 --network ramanujan --publish 8000:8000 --name front -e HOST=eth2
-e BASES=base0:39000,base1:39000 front
# eth0 as no publish
docker service create --replicas 1 --network ramanujan --name home -e HOST=eth0 -e BASES=base0:39000,ba
se1:39000 home
# stack
docker stack deploy -c ramanujan.yml ramanujan
docker stack rm ramanujan
# monitor
docker exec -it `docker ps | grep repl | cut -f 1 -d ' '` /bin/sh
$ node monitor.js
================================================
FILE: docker/entry-cache/Dockerfile
================================================
FROM shared
ADD entry-cache-logic.js .
ADD entry-cache-service.js .
CMD ["node", "entry-cache-service.js"]
================================================
FILE: docker/entry-cache/Makefile
================================================
container :
cp ../../entry-cache/entry-cache-*.js .
docker build -t entry-cache .
docker images | grep entry-cache
run-single :
docker service create --replicas 1 --network ramanujan --name entry-cache -e HOST=@eth0 -e BASES=base0:39000,base1:39000 entry-cache
rm-single :
docker service rm entry-cache
clean :
rm -f *~
rm -f *.js
rm -f *.json
.PHONY : container clean
================================================
FILE: docker/entry-cache/entry-cache-logic.js
================================================
'use strict'
var _ = require('lodash')
module.exports = function entry_cache (options) {
var seneca = this
var cache = {}
seneca.add('store:save,kind:entry', function(msg, done) {
delete cache[msg.user]
msg.cache = true
this.act(msg, done)
})
seneca.add('store:list,kind:entry', function(msg, done) {
if( cache[msg.user] ) {
return done( null, cache[msg.user] )
}
msg.cache = true
this.act(msg, function(err,list){
if(err) return done(err)
cache[msg.user] = list
done(null,list)
})
})
}
================================================
FILE: docker/entry-cache/entry-cache-service.js
================================================
var HOST = process.env.HOST || process.argv[2] || '127.0.0.1'
var BASES = (process.env.BASES || process.argv[3] || '').split(',')
var SILENT = process.env.SILENT || process.argv[4] || 'true'
require('seneca')({
tag:'entry-cache',
internal: {logger: require('seneca-demo-logger')},
debug: {short_logs:true}
})
//.use('zipkin-tracer', {sampling:1})
.use('basic')
.use('entity')
.use('entry-cache-logic')
.use('mesh',{
pin: 'store:*,kind:entry',
bases: BASES,
host: HOST,
sneeze: {
silent: JSON.parse(SILENT),
swim: {interval: 1111}
}
})
.ready(function(){
console.log(this.id)
})
================================================
FILE: docker/entry-store/Dockerfile
================================================
FROM shared
ADD entry-store-logic.js .
ADD entry-store-service.js .
CMD ["node", "entry-store-service.js"]
================================================
FILE: docker/entry-store/Makefile
================================================
container :
cp ../../entry-store/entry-store-*.js .
docker build -t entry-store .
docker images | grep entry-store
run-single :
docker service create --replicas 1 --network ramanujan --name entry-store -e HOST=@eth0 -e BASES=base0:39000,base1:39000 entry-store
rm-single :
docker service rm entry-store
clean :
rm -f *~
rm -f *.js
rm -f *.json
.PHONY : container clean
================================================
FILE: docker/entry-store/entry-store-logic.js
================================================
module.exports = function entry_store (options) {
var seneca = this
seneca.add('store:save,kind:entry', function(msg, done) {
this
.make('entry', {
when: msg.when,
user: msg.user,
text: msg.text
})
.save$(function(err, entry) {
if(err) return done(err)
this.act(
{
timeline: 'insert',
users: [msg.user],
},
entry,
function(err) {
return done(err, entry)
})
})
})
seneca.add('store:list,kind:entry', function(msg, done) {
this
.make('entry')
.list$( {user: msg.user}, function(err, list) {
if(err) return done(err)
list.reverse( function(a, b) {
return a.when - b.when
})
done( null, list )
})
})
}
================================================
FILE: docker/entry-store/entry-store-service.js
================================================
var HOST = process.env.HOST || process.argv[2] || '127.0.0.1'
var BASES = (process.env.BASES || process.argv[3] || '').split(',')
var SILENT = process.env.SILENT || process.argv[4] || 'true'
require('seneca')({
tag: 'entry-store',
internal: {logger: require('seneca-demo-logger')},
debug: {short_logs: true}
})
//.use('zipkin-tracer', {sampling:1})
.use('basic')
.use('entity')
.use('entry-store-logic')
.use('mesh',{
pin: 'store:*,kind:entry,cache:true',
bases: BASES,
host: HOST,
sneeze:{
silent: JSON.parse(SILENT),
swim: {interval: 1111}
}
})
.ready(function(){
console.log(this.id)
})
================================================
FILE: docker/fanout/Dockerfile
================================================
FROM shared
ADD fanout-logic.js .
ADD fanout-service.js .
CMD ["node", "fanout-service.js"]
================================================
FILE: docker/fanout/Makefile
================================================
container :
cp ../../fanout/fanout-*.js .
docker build -t fanout .
docker images | grep fanout
run-single :
docker service create --replicas 1 --network ramanujan --name fanout -e HOST=@eth0 -e BASES=base0:39000,base1:39000 fanout
rm-single :
docker service rm fanout
clean :
rm -f *~
rm -f *.js
rm -f *.json
.PHONY : container clean
================================================
FILE: docker/fanout/fanout-logic.js
================================================
'use strict'
var _ = require('lodash')
module.exports = function fanout (options) {
var seneca = this
seneca.add('fanout:entry', function(msg, done) {
done()
var entry = this.util.clean(msg)
delete entry.fanout
this.act('follow:list,kind:followers',{user:entry.user},function(err,userlist){
if(err) return
if( userlist && 0 < userlist.length ) {
this.act({
timeline: 'insert',
users: userlist,
}, entry)
}
})
})
}
================================================
FILE: docker/fanout/fanout-service.js
================================================
var HOST = process.env.HOST || process.argv[2] || '127.0.0.1'
var BASES = (process.env.BASES || process.argv[3] || '').split(',')
var SILENT = process.env.SILENT || process.argv[4] || 'true'
require('seneca')({
tag: 'fanout',
internal: {logger: require('seneca-demo-logger')},
debug: {short_logs: true}
})
//.use('zipkin-tracer', {sampling:1})
.use('fanout-logic')
.add('info:entry', function(msg,done){
delete msg.info
this.act('fanout:entry',msg,done)
})
.use('mesh',{
listen:[
{pin: 'fanout:*'},
{pin: 'info:entry', model:'observe'}
],
bases: BASES,
host: HOST,
sneeze: {
silent: JSON.parse(SILENT),
swim: {interval: 1111}
}
})
.ready(function(){
console.log(this.id)
})
================================================
FILE: docker/follow/Dockerfile
================================================
FROM shared
ADD follow-logic.js .
ADD follow-service.js .
CMD ["node", "follow-service.js"]
================================================
FILE: docker/follow/Makefile
================================================
container :
cp ../../follow/follow-*.js .
docker build -t follow .
docker images | grep follow
run-single :
docker service create --replicas 1 --network ramanujan --name follow -e HOST=@eth0 -e BASES=base0:39000,base1:39000 follow
rm-single :
docker service rm follow
clean :
rm -f *~
rm -f *.js
rm -f *.json
.PHONY : container clean
================================================
FILE: docker/follow/follow-logic.js
================================================
var _ = require('lodash')
module.exports = function follow (options) {
var seneca = this
seneca.add('follow:user', function(msg, done) {
var seneca = this
relate( seneca, 'followers', msg.target, msg.user, true, function(err) {
if( err ) return done(err)
relate( seneca, 'following', msg.user, msg.target, true, function(err) {
if( err ) return done(err)
seneca.act('store:list,kind:entry',{user:msg.target}, function(err,list) {
if( err ) return done(err)
_.each(list,function(entry){
seneca.act({
timeline: 'insert',
users: [msg.user],
}, entry.data$())
})
done()
})
})
})
})
seneca.add('follow:list', function(msg,done){
this
.make('follow')
.load$(msg.user, function(err,follow){
var list = (follow && follow[msg.kind]) || []
done(err, list)
})
})
function relate(seneca,relation,from,to,create,done) {
seneca
.make('follow')
.load$(from, function(err, follow) {
if( err ) return done(err)
if (follow) {
add_follower( null, follow, done )
}
else if (create) {
this.act('reserve:create', {key: 'follow/'+from}, function (err, status) {
if( err ) return done(err)
if( !status.ok ) {
return relate(this,relation,from,to,false,done)
}
var follow = this.make('follow',{id$:from})
follow[relation] = []
add_follower(err, follow, function (err) {
if( err ) return done(err)
this.act('reserve:remove', {key: 'follow/'+from})
done()
})
})
}
function add_follower( err, follow, done ) {
if( err ) return done(err)
follow[relation] = (follow[relation] || [])
follow[relation].push(to)
follow[relation] = _.uniq(follow[relation])
follow.save$(done)
}
})
}
}
================================================
FILE: docker/follow/follow-service.js
================================================
var HOST = process.env.HOST || process.argv[2] || '127.0.0.1'
var BASES = (process.env.BASES || process.argv[3] || '').split(',')
var SILENT = process.env.SILENT || process.argv[4] || 'true'
require('seneca')({
tag: 'follow',
internal: {logger: require('seneca-demo-logger')},
debug: {short_logs: true}
})
//.use('zipkin-tracer', {sampling:1})
.use('entity')
.use('follow-logic')
.use('mesh',{
pin: 'follow:*',
bases: BASES,
host: HOST,
sneeze: {
silent: JSON.parse(SILENT),
swim: {interval: 1111}
}
})
.ready(function(){
console.log(this.id)
})
================================================
FILE: docker/front/Dockerfile
================================================
FROM shared
ADD front.js .
ADD www www
CMD ["node", "front.js"]
================================================
FILE: docker/front/Makefile
================================================
container :
cp ../../front/front.js .
cp -r ../../front/www .
docker build -t front .
docker images | grep front
run-single :
docker service create --replicas 1 --network ramanujan --publish 8000:8000 --name front -e HOST=eth2 -e BASES=base0:39000,base1:39000 front
rm-single :
docker service rm front
clean :
rm -f *~
rm -f *.js
rm -f *.json
.PHONY : container clean
================================================
FILE: docker/front/front.js
================================================
"use strict"
var HOST = process.env.HOST || process.argv[2] || '127.0.0.1'
var BASES = (process.env.BASES || process.argv[3] || '').split(',')
var SILENT = process.env.SILENT || process.argv[4] || 'true'
var Hapi = require('hapi')
var Rif = require('rif')
var server = new Hapi.Server()
var rif = Rif()
var host = rif(HOST) || HOST
server.connection({
port: 8000 // test with http://localhost:8000/api/ping
})
server.register(require('inert'))
server.register({
register: require('wo'),
options: {
bases: BASES,
sneeze: {
host: host,
silent: JSON.parse(SILENT),
swim: {interval: 1111}
}
}
})
server.route({
method: 'GET', path: '/api/ping',
handler: {
wo: {}
}
})
server.route({
method: 'POST', path: '/api/post/{user}',
handler: {
wo: {
passThrough: true
}
}
})
server.route({
method: 'POST', path: '/api/follow/{user}',
handler: {
wo: {
passThrough: true
}
}
})
server.route({
method: 'GET', path: '/mine/{user}',
handler: {
wo: {}
}
})
server.route({
method: ['GET','POST'], path: '/search/{user}',
handler: {
wo: {}
}
})
server.route({
method: 'GET', path: '/{user}',
handler: {
wo: {}
}
})
server.route({
path: '/favicon.ico',
method: 'get',
config: {
cache: {
expiresIn: 1000*60*60*24*21
}
},
handler: function(request, reply) {
reply().code(200).type('image/x-icon')
}
})
server.route({
method: 'GET',
path: '/res/{path*}',
handler: {
directory: {
path: __dirname + '/www/res',
}
}
})
server.start(function(){
console.log('front',server.info.uri)
})
================================================
FILE: docker/front/www/res/site.css
================================================
* {
font-family: arial;
padding: 0px;
margin: 0px;
text-decoration: none;
}
body {
background-color: #eef;
}
input {
border: 1px solid #222;
font-size: 14pt;
padding: 2px;
}
input[type='text'] {
width: 25%;
}
div.header {
background-color: #222;
margin: 0px 0px 10px 0px;
padding: 4px;
}
div.header a {
display: inline-block;
padding: 4px;
color: #eee;
}
div.header .nav_active {
background-color: #eef;
color: #222;
}
div.container {
padding: 4px;
}
div.entry {
padding: 2px;
margin: 4px 2px;
border: 1px solid #ccc;
width: 33%;
color: #666;
}
div.text {
color: #222;
padding: 2px;
margin-top: 2px;
}
================================================
FILE: docker/home/Dockerfile
================================================
FROM shared
ADD home-service.js .
ADD www www
CMD ["node", "home-service.js"]
================================================
FILE: docker/home/Makefile
================================================
container :
cp ../../home/home-service.js .
cp -r ../../home/www .
docker build -t home .
docker images | grep home
run-single :
docker service create --replicas 1 --network ramanujan --name home -e HOST=eth0 -e BASES=base0:39000,base1:39000 home
rm-single :
docker service rm home
clean :
rm -f *~
rm -f *.js
rm -f *.json
.PHONY : container clean
================================================
FILE: docker/home/home-service.js
================================================
"use strict"
var PORT = process.env.PORT || process.argv[2] || 0
var HOST = process.env.HOST || process.argv[3] || '127.0.0.1'
var BASES = (process.env.BASES || process.argv[4] || '').split(',')
var SILENT = process.env.SILENT || process.argv[5] || 'true'
var hapi = require('hapi')
var chairo = require('chairo')
var vision = require('vision')
var inert = require('inert')
var handlebars = require('handlebars')
var _ = require('lodash')
var moment = require('moment')
var Seneca = require('seneca')
var Rif = require('rif')
var tag = 'home'
var server = new hapi.Server()
var rif = Rif()
var host = rif(HOST) || HOST
server.connection({
port: PORT,
host: host
})
server.register( vision )
server.register( inert )
server.register({
register:chairo,
options:{
seneca: Seneca({
tag: tag,
internal: {logger: require('seneca-demo-logger')},
debug: {short_logs:true}
})
//.use('zipkin-tracer', {sampling:1})
}
})
server.register({
register: require('wo'),
options:{
bases: BASES,
route: [
{path: '/{user}'},
],
sneeze: {
host: host,
silent: JSON.parse(SILENT),
swim: {interval: 1111}
}
}
})
server.views({
engines: { html: handlebars },
path: __dirname + '/www',
layout: true
})
server.route({
method: 'GET', path: '/{user}',
handler: function( req, reply )
{
server.seneca.act(
'timeline:list',
{user:req.params.user},
function( err, entrylist ) {
if(err) {
entrylist = []
}
reply.view('home',{
user: req.params.user,
entrylist: _.map(entrylist,function(entry){
entry.when = moment(entry.when).fromNow()
return entry
})
})
})
}
})
server.seneca.use('mesh',{
host:host,
bases:BASES,
sneeze: {
silent: JSON.parse(SILENT),
swim: {interval: 1111}
}
})
server.start(function(){
console.log(tag,server.info.host,server.info.port)
})
================================================
FILE: docker/home/www/home.html
================================================
{{#each entrylist}}
================================================
FILE: docker/index/Dockerfile
================================================
FROM shared
ADD index-logic.js .
ADD index-service.js .
CMD ["node", "index-service.js"]
================================================
FILE: docker/index/Makefile
================================================
container :
cp ../../index/index-*.js .
docker build -t index .
docker images | grep index
run-single :
docker service create --replicas 1 --network ramanujan --name index -e HOST=@eth0 -e BASES=base0:39000,base1:39000 index
rm-single :
docker service rm index
clean :
rm -f *~
rm -f *.js
rm -f *.json
.PHONY : container clean
================================================
FILE: docker/index/index-logic.js
================================================
// Business logic for the index microservice.
// Provides a full text search index for microblog entries.
// Modules providing a simple in-memory full text search index.
// In production you could replace these with API calls to elasticsearch
// or similar.
var Levelup = require('levelup')
var Memdown = require('memdown')
var Search = require('search-index')
// This is the standard way to define a Seneca plugin.
module.exports = function index (options) {
// The plugin Seneca instance is provided by `this`.
// This Seneca instance tracks patterns against this plugin
// as an aid to debugging.
var seneca = this
// The search index. This is the internal state of the service. In general.
// services should *not* have internal state, as it has to be synchronized
// between multiple instances. This service is purely for demonstration purposes,
// and only a single instance should be run.
var index
// The Seneca patterns that this plugin defines.
// This is the `interface` for this plugin - matching messages will end up here.
seneca.add('search:query', search_query)
seneca.add('search:insert', search_insert)
seneca.add('init:index', init)
// Query the search index.
// The implementation logic consists of calls to the search index API.
function search_query (msg, done) {
console.log(terms)
var terms = msg.query.split(/ +/)
var query = {
query: {
AND: {text:terms}
}
}
index.search(query, function (err, out) {
var hits = (out && out.hits) || []
hits = hits.map(function (hit) {
return hit.document
})
done(null, hits)
})
}
// Insert a document into the search index.
function search_insert (msg, done) {
index.add([{
id: msg.id,
text: msg.text,
user: msg.user,
when: msg.when
}], {}, done)
}
// Initialize the plugin. This is the standard mechanism to initialize a Seneca
// plugin - by defining a special pattern of the form init:.
function init (msg, done) {
Search({
indexes: Levelup('si', {
db: Memdown,
valueEncoding: 'json'
})
}, function(err, si) {
if (err) return done(err)
index = si
done()
})
}
}
================================================
FILE: docker/index/index-service.js
================================================
var HOST = process.env.HOST || process.argv[2] || '127.0.0.1'
var BASES = (process.env.BASES || process.argv[3] || '').split(',')
var SILENT = process.env.SILENT || process.argv[4] || 'true'
require('seneca')({
tag: 'index',
internal: {logger: require('seneca-demo-logger')},
debug: {short_logs: true}
})
//.use('zipkin-tracer', {sampling:1})
.use('index-logic')
.add('info:entry', function(msg,done){
delete msg.info
this.act('search:insert',msg,done)
})
.use('mesh',{
listen:[
{pin: 'search:*'},
{pin: 'info:entry', model:'observe'}
],
bases: BASES,
host: HOST,
sneeze: {
silent: JSON.parse(SILENT),
swim: {interval: 1111}
}
})
.ready(function(){
console.log(this.id)
})
================================================
FILE: docker/mine/Dockerfile
================================================
FROM shared
ADD mine-service.js .
ADD www www
CMD ["node", "mine-service.js"]
================================================
FILE: docker/mine/Makefile
================================================
container :
cp ../../mine/mine-service.js .
cp -r ../../mine/www .
docker build -t mine .
docker images | grep mine
run-single :
docker service create --replicas 1 --network ramanujan --name mine -e HOST=eth0 -e BASES=base0:39000,base1:39000 mine
rm-single :
docker service rm mine
clean :
rm -f *~
rm -f *.js
rm -f *.json
.PHONY : container clean
================================================
FILE: docker/mine/mine-service.js
================================================
"use strict"
var PORT = process.env.PORT || process.argv[2] || 0
var HOST = process.env.HOST || process.argv[3] || 0
var BASES = (process.env.BASES || process.argv[4] || '').split(',')
var SILENT = process.env.SILENT || process.argv[5] || 'true'
var hapi = require('hapi')
var chairo = require('chairo')
var vision = require('vision')
var inert = require('inert')
var handlebars = require('handlebars')
var _ = require('lodash')
var moment = require('moment')
var Seneca = require('seneca')
var Rif = require('rif')
var server = new hapi.Server()
var rif = Rif()
var host = rif(HOST) || HOST
server.connection({
port: PORT,
host: host
})
server.register( vision )
server.register( inert )
server.register({
register:chairo,
options:{
seneca: Seneca({
tag: 'mine',
internal: {logger: require('seneca-demo-logger')},
debug: {short_logs:true}
})
//.use('zipkin-tracer', {sampling:1})
.use('entity')
}
})
server.register({
register: require('wo'),
options:{
bases: BASES,
route: [
{path: '/mine/{user}'},
],
sneeze: {
host: host,
silent: JSON.parse(SILENT),
swim: {interval: 1111}
}
}
})
server.views({
engines: { html: handlebars },
path: __dirname + '/www',
layout: true
})
server.route({
method: 'GET', path: '/mine/{user}',
handler: function( req, reply )
{
server.seneca.act(
'store:list,kind:entry',
{user:req.params.user},
function( err, entrylist ) {
if(err) {
entrylist = []
}
reply.view('mine',{
user: req.params.user,
entrylist: _.map(entrylist,function(entry){
entry.when = moment(entry.when).fromNow()
return entry
})
})
})
}
})
server.seneca.use('mesh',{
bases:BASES,
host:host
})
server.start(function(){
console.log('mine',server.info.host,server.info.port)
})
================================================
FILE: docker/mine/www/home.html
================================================
{{#each entrylist}}
================================================
FILE: index/index-logic.js
================================================
// Business logic for the index microservice.
// Provides a full text search index for microblog entries.
// Modules providing a simple in-memory full text search index.
// In production you could replace these with API calls to elasticsearch
// or similar.
var Levelup = require('levelup')
var Memdown = require('memdown')
var Search = require('search-index')
// This is the standard way to define a Seneca plugin.
module.exports = function index (options) {
// The plugin Seneca instance is provided by `this`.
// This Seneca instance tracks patterns against this plugin
// as an aid to debugging.
var seneca = this
// The search index. This is the internal state of the service. In general.
// services should *not* have internal state, as it has to be synchronized
// between multiple instances. This service is purely for demonstration purposes,
// and only a single instance should be run.
var index
// The Seneca patterns that this plugin defines.
// This is the `interface` for this plugin - matching messages will end up here.
seneca.add('search:query', search_query)
seneca.add('search:insert', search_insert)
seneca.add('init:index', init)
// Query the search index.
// The implementation logic consists of calls to the search index API.
function search_query (msg, done) {
console.log(terms)
var terms = msg.query.split(/ +/)
var query = {
query: {
AND: {text:terms}
}
}
index.search(query, function (err, out) {
var hits = (out && out.hits) || []
hits = hits.map(function (hit) {
return hit.document
})
done(null, hits)
})
}
// Insert a document into the search index.
function search_insert (msg, done) {
index.add([{
id: msg.id,
text: msg.text,
user: msg.user,
when: msg.when
}], {}, done)
}
// Initialize the plugin. This is the standard mechanism to initialize a Seneca
// plugin - by defining a special pattern of the form init:.
function init (msg, done) {
Search({
indexes: Levelup('si', {
db: Memdown,
valueEncoding: 'json'
})
}, function(err, si) {
if (err) return done(err)
index = si
done()
})
}
}
================================================
FILE: index/index-service.js
================================================
var HOST = process.env.HOST || process.argv[2] || '127.0.0.1'
var BASES = (process.env.BASES || process.argv[3] || '').split(',')
var SILENT = process.env.SILENT || process.argv[4] || 'true'
require('seneca')({
tag: 'index',
internal: {logger: require('seneca-demo-logger')},
debug: {short_logs: true}
})
//.use('zipkin-tracer', {sampling:1})
.use('index-logic')
.add('info:entry', function(msg,done){
delete msg.info
this.act('search:insert',msg,done)
})
.use('mesh',{
listen:[
{pin: 'search:*'},
{pin: 'info:entry', model:'observe'}
],
bases: BASES,
host: HOST,
sneeze: {
silent: JSON.parse(SILENT),
swim: {interval: 1111}
}
})
.ready(function(){
console.log(this.id)
})
================================================
FILE: index/test/index-test.js
================================================
// Unit test for the index microservice.
// Uses https://github.com/hapijs/lab but easy to refactor for other unit testers.
// The utility function test_seneca constructs an instance of Seneca
// suitable for test execution, using the seneca.test() method.
var Lab = require('lab')
var Code = require('code')
var Seneca = require('seneca')
var lab = exports.lab = Lab.script()
var describe = lab.describe
var it = lab.it
var expect = Code.expect
// A suite of unit tests for this microservice.
describe('index', function () {
// A unit test (the test callback is named 'fin' to distinguish it from others).
it('insert-query', function (fin) {
// Create a Seneca instance for testing.
var seneca = test_seneca(fin)
// Gate the execution of actions for this instance. Gated actions are executed
// in sequence and each action waits for the previous one to complete. Gating
// is not required, but avoids excessive callbacks in the unit test code.
seneca
.gate()
// Send an action, and validate the response.
.act({
search: 'insert',
id: ''+Math.random(),
when: Date.now(),
user: 'u0',
text: 'lorem ipsum dolor sit amet'
}, function (ignore) {})
.act({
search: 'query',
query: 'ipsum',
// Because test mode is active, it is not necessary to handle
// callback errors. These are passed directly to the 'fin' callback.
}, function (ignore, list) {
expect(list.length).to.equal(1)
expect(list[0].text).to.equal('lorem ipsum dolor sit amet')
})
// Once all the tests are complete, invoke the test callback
.ready(fin)
})
})
// Construct a Seneca instance suitable for unit testing
function test_seneca (fin) {
return Seneca({log: 'test'})
// activate unit test mode. Errors provide additional stack tracing context.
// The fin callback is called when an error occurs anywhere.
.test(fin)
// Load the microservice business logic.
.use(require('../index-logic'))
}
================================================
FILE: mine/mine-service.js
================================================
"use strict"
var PORT = process.env.PORT || process.argv[2] || 0
var HOST = process.env.HOST || process.argv[3] || 0
var BASES = (process.env.BASES || process.argv[4] || '').split(',')
var SILENT = process.env.SILENT || process.argv[5] || 'true'
var hapi = require('hapi')
var chairo = require('chairo')
var vision = require('vision')
var inert = require('inert')
var handlebars = require('handlebars')
var _ = require('lodash')
var moment = require('moment')
var Seneca = require('seneca')
var Rif = require('rif')
var server = new hapi.Server()
var rif = Rif()
var host = rif(HOST) || HOST
server.connection({
port: PORT,
host: host
})
server.register( vision )
server.register( inert )
server.register({
register:chairo,
options:{
seneca: Seneca({
tag: 'mine',
internal: {logger: require('seneca-demo-logger')},
debug: {short_logs:true}
})
//.use('zipkin-tracer', {sampling:1})
.use('entity')
}
})
server.register({
register: require('wo'),
options:{
bases: BASES,
route: [
{path: '/mine/{user}'},
],
sneeze: {
host: host,
silent: JSON.parse(SILENT),
swim: {interval: 1111}
}
}
})
server.views({
engines: { html: handlebars },
path: __dirname + '/www',
layout: true
})
server.route({
method: 'GET', path: '/mine/{user}',
handler: function( req, reply )
{
server.seneca.act(
'store:list,kind:entry',
{user:req.params.user},
function( err, entrylist ) {
if(err) {
entrylist = []
}
reply.view('mine',{
user: req.params.user,
entrylist: _.map(entrylist,function(entry){
entry.when = moment(entry.when).fromNow()
return entry
})
})
})
}
})
server.seneca.use('mesh',{
bases:BASES,
host:host
})
server.start(function(){
console.log('mine',server.info.host,server.info.port)
})
================================================
FILE: mine/www/layout.html
================================================
Microblog