Supervisor (#15)
parent
d0f8fe7280
commit
c47e73fc9a
|
@ -1,3 +1,9 @@
|
|||
*~
|
||||
gonode
|
||||
./epmd
|
||||
cmd/epmd/epmd
|
||||
coverage.txt
|
||||
coverage.html
|
||||
*.swp
|
||||
tags
|
||||
.session
|
||||
|
||||
|
||||
|
|
Binary file not shown.
After Width: | Height: | Size: 4.1 KiB |
Binary file not shown.
After Width: | Height: | Size: 552 KiB |
Binary file not shown.
After Width: | Height: | Size: 2.8 KiB |
|
@ -0,0 +1,14 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.12.x
|
||||
|
||||
before_install:
|
||||
- go get -t -v ./...
|
||||
|
||||
script:
|
||||
- go vet
|
||||
- go test -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
13
ChangeLog
13
ChangeLog
|
@ -1,9 +1,20 @@
|
|||
# Changelog
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
This format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
#### [1.0.0](https://github.com/halturin/ergo/releases/tag/1.0.0) - 2019-11-30 ####
|
||||
There is a bunch of changes we deliver with this release
|
||||
- We have changed the name - Ergo (or Ergo Framework). GitHub's repo has been renamed as well. We also have created cloned repo `ergonode` to support users of the old version of this project. So, its still available at [https://github.com/halturin/ergonode](https://github.com/halturin/ergonode). But it's strongly recommend to use the new one.
|
||||
- Completely reworked (almost from scratch) architecture whole project
|
||||
- Implemented linking process feature (in order to support Application/Supervisor behaviors)
|
||||
- Reworked Monitor-feature. Now it has full-featured support with remote process/nodes
|
||||
- Added multinode support
|
||||
- Added basic observer support
|
||||
- Improved code structure and readability
|
||||
- Among the new features we have added new bugs that still uncovered :). So, any feedback/bugreport/contribution is highly appreciated
|
||||
|
||||
#### [0.2.0](https://github.com/halturin/ergonode/releases/tag/0.2.0) - 2019-02-23 ####
|
||||
- Now we make versioning releases
|
||||
- Improve node creation. Now you can specify the listening port range. See 'Usage' for details
|
||||
|
|
2
LICENSE
2
LICENSE
|
@ -1,6 +1,6 @@
|
|||
This is the MIT license.
|
||||
|
||||
Copyright (c) 2012-2013 Metachord Ltd.
|
||||
Copyright (c) Taras Halturin
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this
|
||||
software and associated documentation files (the "Software"), to deal in the Software
|
||||
|
|
21
Makefile
21
Makefile
|
@ -1,12 +1,15 @@
|
|||
all:
|
||||
go build examples/gonode.go
|
||||
|
||||
run:
|
||||
./gonode -cookie d3vc00k -listen 12321 -trace.node -trace.dist
|
||||
rungs:
|
||||
go run --tags debug ./examples/genserver/demoGenServer.go -trace.node
|
||||
|
||||
epmd:
|
||||
go build cmd/epmd.go
|
||||
go build cmd/epmd/epmd.go
|
||||
|
||||
clean:
|
||||
go clean
|
||||
$(RM) ./gonode
|
||||
test:
|
||||
go vet
|
||||
go clean -testcache
|
||||
go test ./...
|
||||
|
||||
cover:
|
||||
go test -coverprofile=cover.out ./...
|
||||
go tool cover -html=cover.out -o coverage.html
|
||||
rm cover.out
|
||||
|
|
329
README.md
329
README.md
|
@ -1,184 +1,235 @@
|
|||
# Ergonode #
|
||||
# Ergo Framework #
|
||||
|
||||
Implementation of Erlang/OTP node in Go
|
||||
[![GitHub release](https://img.shields.io/github/release/halturin/ergonode.svg)](https://github.com/halturin/ergonode/releases/latest)
|
||||
[![Go Report Card](https://goreportcard.com/badge/github.com/halturin/ergonode)](https://goreportcard.com/report/github.com/halturin/ergonode)
|
||||
[![GoDoc](https://godoc.org/code.gitea.io/gitea?status.svg)](https://godoc.org/github.com/halturin/ergonode)
|
||||
[![MIT license](https://img.shields.io/badge/license-MIT-brightgreen.svg)](https://opensource.org/licenses/MIT)
|
||||
[![codecov](https://codecov.io/gh/halturin/ergonode/branch/supervisor/graph/badge.svg)](https://codecov.io/gh/halturin/ergonode)
|
||||
[![Build Status](https://travis-ci.org/halturin/ergonode.svg?branch=supervisor)](https://travis-ci.org/halturin/ergonode)
|
||||
|
||||
#### Features ####
|
||||
Implementation of Erlang/OTP in Golang
|
||||
|
||||
* Publish listen port via EPMD
|
||||
* Embedded EPMD server
|
||||
* Handle incoming connection from other node using Erlang Distribution Protocol
|
||||
* Spawn Erlang-like processes
|
||||
* Register and unregister processes with simple atom
|
||||
* Send sync and async messages like `erlang:gen_call` and `erlang:gen_cast`
|
||||
* Create own process with `GenServer` behaviour (like `gen_server` in Erlang/OTP)
|
||||
* Atomic 'state' of GenServer
|
||||
* Initiate connection to other node
|
||||
* RPC callbacks
|
||||
* Monitor processes
|
||||
* Monitor nodes
|
||||
* Support Erlang 21.*
|
||||
### Purpose ###
|
||||
|
||||
#### Requirement ####
|
||||
The goal of this project is to leverage Erlang/OTP experience with Golang performance. *Ergo Framework* implements OTP design patterns such as `GenServer`/`Supervisor`/`Application` and makes you able to create high performance and reliable application having native integration with Erlang infrastructure
|
||||
|
||||
* Go 1.10 and above
|
||||
### Features ###
|
||||
|
||||
#### EPMD ####
|
||||
Ergonode has embedded EPMD implementation. It allows to run your nodes without erlang's empd dependency. There are two reason to activate embedded epmd:
|
||||
* Erlang node (run single/[multinode](#multinode))
|
||||
* [embedded EPMD](#epmd) (in order to get rid of erlang' dependencies)
|
||||
* Spawn Erlang-like processes
|
||||
* Register/unregister processes with simple atom
|
||||
* `GenServer` behavior support (with atomic state)
|
||||
* `Supervisor` behavior support (with all known restart strategies support)
|
||||
* `Application` behavior support
|
||||
* Connect to (accept connection from) any Erlang node within a cluster (or clusters, if running as multinode)
|
||||
* Making sync/async request in fashion of `gen_server:call` or `gen_server:cast`
|
||||
* Monitor processes/nodes
|
||||
* local -> local
|
||||
* local -> remote
|
||||
* remote -> local
|
||||
* Link processes
|
||||
* local <-> local
|
||||
* local <-> remote
|
||||
* remote <-> local
|
||||
* RPC callbacks support
|
||||
* Experimental [observer support](#observer)
|
||||
* Unmarshalling terms into the struct using etf.TermIntoStruct
|
||||
* Support Erlang 21.*
|
||||
|
||||
- EPMD port is not taken during an ergonode initialization
|
||||
- lost connection to the EPMD server
|
||||
### Requirements ###
|
||||
|
||||
Current implementation has a bit different behaviour (from the original ones) - ergonode tryes to restore connection to EPMD server in case of its has been lost. At the same time ergonode tryes to start its own EPMD (as embedded set of goroutines) to serve all epmd-requests from the nodes.
|
||||
* Go 1.10 and above
|
||||
|
||||
You may want to use epmd as standalone application. There is simple drop-in replacement of [epmd](http://erlang.org/doc/man/epmd.html):
|
||||
### EPMD ###
|
||||
|
||||
```
|
||||
go get -u github.com/halturin/ergonode/cmd/epmd
|
||||
```
|
||||
*Ergo Framework* has embedded EPMD implementation in order to run your node without external epmd process needs. By default it works as a client with erlang' epmd daemon or others ergo's nodes either.
|
||||
|
||||
## Changelog ##
|
||||
The one thing that makes embedded EPMD different is the behavior of handling connection hangs - if ergo' node is running as an EPMD client and lost connection it tries either to run its own embedded EPMD service or to restore the lost connection.
|
||||
|
||||
Here is the changes of latest release. For more details see the [ChangeLog](ChangeLog)
|
||||
As an extra option, we provide EPMD service as a standalone application. There is a simple drop-in replacement of the original Erlang' epmd daemon.
|
||||
|
||||
#### [0.2.0](https://github.com/halturin/ergonode/releases/tag/0.2.0) - 2019-02-25 ####
|
||||
- Now we make versioning releases
|
||||
- Improve node creation. Now you can specify the listening port range. See 'Usage' for the details
|
||||
- Add embedded EPMD.
|
||||
`go get -u github.com/halturin/ergo/cmd/epmd`
|
||||
|
||||
### Multinode ###
|
||||
|
||||
This feature allows create two or more nodes within a single running instance. The only needs is specify the different set of options for creating nodes (such as: node name, empd port number, secret cookie). You may also want to use this feature to create 'proxy'-node between some clusters.
|
||||
See [Examples](#examples) for more details
|
||||
|
||||
## Usage ##
|
||||
### Observer ###
|
||||
|
||||
It allows you to see the most metrics/information using standard tool of Erlang distribution. The example below shows this feature in action using one of the [examples](examples/):
|
||||
|
||||
![observer demo](./.images/observer.gif)
|
||||
|
||||
### Changelog ###
|
||||
|
||||
Here are the changes of latest release. For more details see the [ChangeLog](ChangeLog)
|
||||
|
||||
#### [1.0.0](https://github.com/halturin/ergo/releases/tag/1.0.0) - 2020-03-03 ####
|
||||
|
||||
There is a bunch of changes we deliver with this release
|
||||
|
||||
* We have changed the name - Ergo (or Ergo Framework). GitHub's repo has been renamed as well. We also have created cloned repo `ergonode` to support users of the old version of this project. So, its still available at [https://github.com/halturin/ergonode](https://github.com/halturin/ergonode). But it's strongly recommend to use the new one.
|
||||
* Completely reworked (almost from scratch) architecture whole project
|
||||
* Implemented linking process feature (in order to support Application/Supervisor behaviors)
|
||||
* Reworked Monitor-feature. Now it has full-featured support with remote process/nodes
|
||||
* Added multinode support
|
||||
* Added experimental observer support
|
||||
* Fixed incorrect ETF string encoding (it was encoded as binary instead of string)
|
||||
* Improved ETF TermIntoStruct decoder
|
||||
* Improved code structure and readability
|
||||
|
||||
### Examples ###
|
||||
|
||||
Code below is a simple implementation of GenServer pattern `examples/simple/GenServer.go`
|
||||
|
||||
```golang
|
||||
package main
|
||||
|
||||
type goGenServ struct {
|
||||
ergonode.GenServer
|
||||
completeChan chan bool
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
ergo "github.com/halturin/ergonode"
|
||||
"github.com/halturin/ergonode/etf"
|
||||
)
|
||||
|
||||
type ExampleGenServer struct {
|
||||
ergo.GenServer
|
||||
process ergo.Process
|
||||
}
|
||||
|
||||
|
||||
// listen from ListenRangeBegin ... ListenRangeEnd and use custom EPMD port
|
||||
// n := ergonode.Create(NodeName, Cookie, uint16(ListenRangeBegin), uint16(ListenRangeEnd), uint16(EPMDPort))
|
||||
//
|
||||
// listen from ListenRangeBegin ... ListenRangeEnd with default EPMD port 4369
|
||||
// n := ergonode.Create(NodeName, Cookie, uint16(ListenRangeBegin), uint16(ListenRangeEnd))
|
||||
//
|
||||
// listen from ListenRangeBegin ... 65000 with default EPMD port 4369
|
||||
// n := ergonode.Create(NodeName, Cookie, uint16(ListenRangeBegin))
|
||||
|
||||
// use default listen port range: 15000...65000 and use default EPMD port 4369
|
||||
Node := ergonode.Create("examplenode@127.0.0.1", "SecretCookie")
|
||||
completeChan := make(chan bool)
|
||||
gs := new(goGenServ)
|
||||
|
||||
n.Spawn(gs, completeChan)
|
||||
|
||||
message := etf.Term(etf.Atom("hello"))
|
||||
|
||||
// gen_server:call({pname, 'node@address'} , hello) with default timeout 5 seconds
|
||||
to := etf.Tuple{etf.Atom("pname"), etf.Atom("node@address")}
|
||||
|
||||
answer, err := gs.Call(to, message)
|
||||
fmt.Printf("Got response: %v\n", answer)
|
||||
|
||||
// specify the custom calling timeout
|
||||
// gen_server:call({pname, 'node@address'} , hello, 8)
|
||||
answer, err := gs.Call(Pid, message, 12)
|
||||
|
||||
|
||||
// it's also possible to call using Pid (etf.Pid)
|
||||
answer, err := gs.Call(Pid, message)
|
||||
|
||||
// gen_server:cast({pname, 'node@address'} , hello)
|
||||
to := etf.Tuple{etf.Atom("pname"), etf.Atom("node@address")}
|
||||
gs.Cast(to, message)
|
||||
|
||||
// the same way using Pid
|
||||
gs.Cast(Pid, message)
|
||||
|
||||
// simple sending message 'Pid ! hello'
|
||||
gs.Send(Pid, message)
|
||||
|
||||
// to get pid like it does erlang:self()
|
||||
gs.Self()
|
||||
|
||||
// set monitor. this gen_server will recieve the message (via HandleInfo) like
|
||||
// {'DOWN',#Ref<0.0.13893633.237772>,process,<26194.4.1>, Reason})
|
||||
// in case of remote process went down by some reason
|
||||
gs.Monitor(Pid)
|
||||
|
||||
|
||||
// *** http://erlang.org/doc/man/erlang.html#monitor_node-2
|
||||
// *** Making several calls to monitor_node(Node, true) for the same Node is not an error;
|
||||
// *** it results in as many independent monitoring instances.
|
||||
// seting up node monitor (will recieve {nodedown, Nodename})
|
||||
gs.MonitorNode(etf.Atom("node@address"), true)
|
||||
// removing monitor
|
||||
gs.MonitorNode(etf.Atom("node@address"), false)
|
||||
|
||||
/*
|
||||
* Simple example how are handling incoming messages.
|
||||
* Interface implementation
|
||||
*/
|
||||
|
||||
// Init initializes process state using arbitrary arguments
|
||||
func (gs *goGenServ) Init(args ...interface{}) (state interface{}) {
|
||||
// Self-registration with name SrvName
|
||||
gs.Node.Register(etf.Atom(SrvName), gs.Self)
|
||||
return nil
|
||||
type State struct {
|
||||
value int
|
||||
}
|
||||
|
||||
|
||||
// HandleCast serves incoming messages sending via gen_server:cast
|
||||
// HandleCast -> (0, state) - noreply
|
||||
// (-1, state) - normal stop (-2, -3 .... custom reasons to stop)
|
||||
func (gs *goGenServ) HandleCast(message *etf.Term, state interface{}) (code int, stateout interface{}) {
|
||||
return 0, state
|
||||
func (egs *ExampleGenServer) Init(p ergo.Process, args ...interface{}) (state interface{}) {
|
||||
fmt.Printf("Init: args %v \n", args)
|
||||
egs.process = p
|
||||
InitialState := &State{
|
||||
value: args[0].(int), // 100
|
||||
}
|
||||
return InitialState
|
||||
}
|
||||
|
||||
// HandleCall serves incoming messages sending via gen_server:call
|
||||
// HandleCall -> (1, reply, state) - reply
|
||||
// (0, _, state) - noreply
|
||||
// (-1, _, state) - normal stop (-2, -3 .... custom reasons to stop)
|
||||
func (gs *goGenServ) HandleCall(from *etf.Tuple, message *etf.Term) (code int, reply *etf.Term, stateout interface{}) {
|
||||
reply = etf.Term(etf.Atom("ok"))
|
||||
return 1, &reply, state
|
||||
func (egs *ExampleGenServer) HandleCast(message etf.Term, state interface{}) (string, interface{}) {
|
||||
fmt.Printf("HandleCast: %#v (state value %d) \n", message, state.(*State).value)
|
||||
time.Sleep(1 * time.Second)
|
||||
state.(*State).value++
|
||||
|
||||
if state.(*State).value > 103 {
|
||||
egs.process.Send(egs.process.Self(), "hello")
|
||||
} else {
|
||||
egs.process.Cast(egs.process.Self(), "hi")
|
||||
}
|
||||
|
||||
return "noreply", state
|
||||
}
|
||||
|
||||
// HandleInfo serves all another incoming messages (Pid ! message)
|
||||
// HandleInfo -> (0, state) - noreply
|
||||
// (-1, state) - normal stop (-2, -3 .... custom reasons to stop)
|
||||
func (gs *goGenServ) HandleInfo(message *etf.Term, state interface{}) (code int, stateout interface{}) {
|
||||
fmt.Printf("HandleInfo: %#v\n", *message)
|
||||
return 0, state
|
||||
func (egs *ExampleGenServer) HandleCall(from etf.Tuple, message etf.Term, state interface{}) (string, etf.Term, interface{}) {
|
||||
fmt.Printf("HandleCall: %#v, From: %#v\n", message, from)
|
||||
return "reply", message, state
|
||||
}
|
||||
|
||||
// Terminate called when process died
|
||||
func (gs *goGenServ) Terminate(reason int, state interface{}) {
|
||||
fmt.Printf("Terminate: %#v\n", reason)
|
||||
func (egs *ExampleGenServer) HandleInfo(message etf.Term, state interface{}) (string, interface{}) {
|
||||
fmt.Printf("HandleInfo: %#v (state value %d) \n", message, state.(*State).value)
|
||||
time.Sleep(1 * time.Second)
|
||||
state.(*State).value++
|
||||
if state.(*State).value > 106 {
|
||||
return "stop", "normal"
|
||||
} else {
|
||||
egs.process.Send(egs.process.Self(), "hello")
|
||||
}
|
||||
return "noreply", state
|
||||
}
|
||||
func (egs *ExampleGenServer) Terminate(reason string, state interface{}) {
|
||||
fmt.Printf("Terminate: %#v \n", reason)
|
||||
}
|
||||
|
||||
func main() {
|
||||
node := ergo.CreateNode("node@localhost", "cookies", ergo.NodeOptions{})
|
||||
gs1 := &ExampleGenServer{}
|
||||
process, _ := node.Spawn("gs1", ergo.ProcessOptions{}, gs1, 100)
|
||||
|
||||
process.Cast(process.Self(), "hey")
|
||||
|
||||
select {
|
||||
case <-process.Context.Done():
|
||||
fmt.Println("exited")
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Example ##
|
||||
here is output of this code
|
||||
|
||||
See `examples/` for simple implementation of node and `GenServer` process
|
||||
```shell
|
||||
$ go run ./examples/simple/GenServer.go
|
||||
Init: args [100]
|
||||
HandleCast: "hey" (state value 100)
|
||||
HandleCast: "hi" (state value 101)
|
||||
HandleCast: "hi" (state value 102)
|
||||
HandleCast: "hi" (state value 103)
|
||||
HandleInfo: "hello" (state value 104)
|
||||
HandleInfo: "hello" (state value 105)
|
||||
HandleInfo: "hello" (state value 106)
|
||||
Terminate: "normal"
|
||||
exited
|
||||
```
|
||||
|
||||
## Elixir Phoenix Users ##
|
||||
See `examples/` for more details
|
||||
|
||||
* [demoGenServer](examples/genserver)
|
||||
* [demoSupervisor](examples/supervisor)
|
||||
* [demoApplication](examples/application)
|
||||
* [demoMultinode](examples/multinode)
|
||||
|
||||
### Elixir Phoenix Users ###
|
||||
|
||||
Users of the Elixir Phoenix framework might encounter timeouts when trying to connect a Phoenix node
|
||||
to an ergonode node. The reason is that, in addition to global_name_server and net_kernel,
|
||||
Phoenix attemts to broadcast messages to the pg2 PubSub handler:
|
||||
https://hexdocs.pm/phoenix/1.1.0/Phoenix.PubSub.PG2.html
|
||||
to an ergo node. The reason is that, in addition to global_name_server and net_kernel,
|
||||
Phoenix attempts to broadcast messages to the [pg2 PubSub handler](https://hexdocs.pm/phoenix/1.1.0/Phoenix.PubSub.PG2.html)
|
||||
|
||||
To work with Phoenix nodes, you must create and register a dedicated pg2 GenServer, and
|
||||
spawn it inside your node. Take inspiration from the global_name_server.go for the rest of
|
||||
the GenServer methods, but the Init must specify the "pg2" atom:
|
||||
the GenServer methods, but the Spawn must have "pg2" as a process name:
|
||||
|
||||
```golang
|
||||
func (pg2 *pg2Server) Init(args ...interface{}) (state interface{}) {
|
||||
pg2.Node.Register(etf.Atom("pg2"), pg2.Self)
|
||||
return nil
|
||||
type Pg2GenServer struct {
|
||||
ergo.GenServer
|
||||
}
|
||||
|
||||
func main() {
|
||||
// ...
|
||||
pg2 := &Pg2GenServer{}
|
||||
node1 := ergo.CreateNode("node1@localhost", "cookies", ergo.NodeOptions{})
|
||||
process, _ := node1.Spawn("pg2", ergo.ProcessOptions{}, pg2, nil)
|
||||
// ...
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
### Development and debugging ###
|
||||
|
||||
There is a couple of options are already defined that you might want to use
|
||||
|
||||
* -trace.node
|
||||
* -trace.dist
|
||||
|
||||
To enable Golang profiler just add `--tag debug` in your `go run` or `go build` like this:
|
||||
|
||||
`go run --tags debug ./examples/genserver/demoGenServer.go`
|
||||
|
||||
Now golang' profiler is available at `http://localhost:9009/debug/pprof`
|
||||
|
||||
### Companies are using Ergo Framework ###
|
||||
|
||||
[![Kaspersky](./.images/kaspersky.png)](https://kaspersky.com)
|
||||
[![RingCentral](./.images/ringcentral.png)](https://www.ringcentral.com)
|
||||
|
||||
is your company using Ergo? add your company logo/name here
|
||||
|
||||
### Commercial support
|
||||
|
||||
if you are looking for commercial support feel free to contact me via email (halturin at gmail dot com)
|
||||
|
|
|
@ -0,0 +1,217 @@
|
|||
package ergonode
|
||||
|
||||
// http://erlang.org/doc/apps/kernel/application.html
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/halturin/ergonode/etf"
|
||||
"github.com/halturin/ergonode/lib"
|
||||
)
|
||||
|
||||
type ApplicationStartType = string
|
||||
|
||||
const (
|
||||
// start types:
|
||||
|
||||
// ApplicationStartPermanent If a permanent application terminates,
|
||||
// all other applications and the runtime system (node) are also terminated.
|
||||
ApplicationStartPermanent = "permanent"
|
||||
|
||||
// ApplicationStartTemporary If a temporary application terminates,
|
||||
// this is reported but no other applications are terminated.
|
||||
ApplicationStartTemporary = "temporary"
|
||||
|
||||
// ApplicationStartTransient If a transient application terminates
|
||||
// with reason normal, this is reported but no other applications are
|
||||
// terminated. If a transient application terminates abnormally, that
|
||||
// is with any other reason than normal, all other applications and
|
||||
// the runtime system (node) are also terminated.
|
||||
ApplicationStartTransient = "transient"
|
||||
)
|
||||
|
||||
// ApplicationBehavior interface
|
||||
type ApplicationBehavior interface {
|
||||
Load(args ...interface{}) (ApplicationSpec, error)
|
||||
Start(process *Process, args ...interface{})
|
||||
}
|
||||
|
||||
type ApplicationSpec struct {
|
||||
Name string
|
||||
Description string
|
||||
Version string
|
||||
Lifespan time.Duration
|
||||
Applications []string
|
||||
Environment map[string]interface{}
|
||||
// Depends []
|
||||
Children []ApplicationChildSpec
|
||||
startType ApplicationStartType
|
||||
app ApplicationBehavior
|
||||
process *Process
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
type ApplicationChildSpec struct {
|
||||
Child interface{}
|
||||
Name string
|
||||
Args []interface{}
|
||||
process *Process
|
||||
}
|
||||
|
||||
// Application is implementation of ProcessBehavior interface
|
||||
type Application struct{}
|
||||
|
||||
type ApplicationInfo struct {
|
||||
Name string
|
||||
Description string
|
||||
Version string
|
||||
PID etf.Pid
|
||||
}
|
||||
|
||||
func (a *Application) loop(p *Process, object interface{}, args ...interface{}) string {
|
||||
// some internal agreement that the first argument should be a spec of this application
|
||||
// (see ApplicatoinStart for the details)
|
||||
spec := args[0].(*ApplicationSpec)
|
||||
|
||||
if spec.Environment != nil {
|
||||
for k, v := range spec.Environment {
|
||||
p.SetEnv(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
if !a.startChildren(p, spec.Children[:]) {
|
||||
a.stopChildren(p.Self(), spec.Children[:], "failed")
|
||||
return "failed"
|
||||
}
|
||||
|
||||
p.currentFunction = "Application:Start"
|
||||
|
||||
object.(ApplicationBehavior).Start(p, args[1:]...)
|
||||
lib.Log("Application spec %#v\n", spec)
|
||||
p.ready <- true
|
||||
|
||||
p.currentFunction = "Application:loop"
|
||||
|
||||
if spec.Lifespan == 0 {
|
||||
spec.Lifespan = time.Second * 31536000 * 100 // let's define default lifespan 100 years :)
|
||||
}
|
||||
|
||||
// to prevent of timer leaks due to its not GCed until the timer fires
|
||||
timer := time.NewTimer(spec.Lifespan)
|
||||
defer timer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case ex := <-p.gracefulExit:
|
||||
a.stopChildren(ex.from, spec.Children, string(ex.reason))
|
||||
return ex.reason
|
||||
|
||||
case direct := <-p.direct:
|
||||
a.handleDirect(direct, spec.Children)
|
||||
continue
|
||||
|
||||
case <-p.Context.Done():
|
||||
// node is down or killed using p.Kill()
|
||||
fmt.Printf("Warning: application %s has been killed\n", spec.Name)
|
||||
return "kill"
|
||||
case <-timer.C:
|
||||
// time to die
|
||||
go p.Exit(p.Self(), "normal")
|
||||
case msg := <-p.mailBox:
|
||||
//fromPid := msg.Element(1).(etf.Pid)
|
||||
message := msg.Element(2)
|
||||
switch r := message.(type) {
|
||||
case etf.Tuple:
|
||||
// waiting for {'EXIT', Pid, Reason}
|
||||
if len(r) != 3 || r.Element(1) != etf.Atom("EXIT") {
|
||||
// unknown. ignoring
|
||||
continue
|
||||
}
|
||||
terminated := r.Element(2).(etf.Pid)
|
||||
terminatedName := terminated.Str()
|
||||
reason := r.Element(3).(etf.Atom)
|
||||
|
||||
for i := range spec.Children {
|
||||
child := spec.Children[i].process
|
||||
if child != nil && child.Self() == terminated {
|
||||
terminatedName = child.Name()
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
switch spec.startType {
|
||||
case ApplicationStartPermanent:
|
||||
a.stopChildren(terminated, spec.Children, string(reason))
|
||||
fmt.Printf("Application child %s (at %s) stopped with reason %s (permanent: node is shutting down)\n",
|
||||
terminatedName, p.Node.FullName, reason)
|
||||
p.Node.Stop()
|
||||
return "shutdown"
|
||||
|
||||
case ApplicationStartTransient:
|
||||
if reason == etf.Atom("normal") || reason == etf.Atom("shutdown") {
|
||||
fmt.Printf("Application child %s (at %s) stopped with reason %s (transient)\n",
|
||||
terminatedName, p.Node.FullName, reason)
|
||||
continue
|
||||
}
|
||||
a.stopChildren(terminated, spec.Children, "normal")
|
||||
fmt.Printf("Application child %s (at %s) stopped with reason %s. (transient: node is shutting down)\n",
|
||||
terminatedName, p.Node.FullName, reason)
|
||||
p.Node.Stop()
|
||||
return string(reason)
|
||||
|
||||
case ApplicationStartTemporary:
|
||||
fmt.Printf("Application child %s (at %s) stopped with reason %s (temporary)\n",
|
||||
terminatedName, p.Node.FullName, reason)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
func (a *Application) stopChildren(from etf.Pid, children []ApplicationChildSpec, reason string) {
|
||||
for i := range children {
|
||||
child := children[i].process
|
||||
if child != nil && child.self != from {
|
||||
children[i].process.Exit(from, reason)
|
||||
children[i].process = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Application) startChildren(parent *Process, children []ApplicationChildSpec) bool {
|
||||
for i := range children {
|
||||
// i know, it looks weird to use the funcion from supervisor file.
|
||||
// will move it to somewhere else, but let it be there for a while.
|
||||
p := startChild(parent, children[i].Name, children[i].Child, children[i].Args...)
|
||||
if p == nil {
|
||||
return false
|
||||
}
|
||||
children[i].process = p
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (a *Application) handleDirect(m directMessage, children []ApplicationChildSpec) {
|
||||
switch m.id {
|
||||
case "getChildren":
|
||||
pids := []etf.Pid{}
|
||||
for i := range children {
|
||||
if children[i].process == nil {
|
||||
continue
|
||||
}
|
||||
pids = append(pids, children[i].process.self)
|
||||
}
|
||||
|
||||
m.message = pids
|
||||
m.reply <- m
|
||||
|
||||
default:
|
||||
if m.reply != nil {
|
||||
m.message = ErrUnsupportedRequest
|
||||
m.reply <- m
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,533 @@
|
|||
package ergonode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/halturin/ergonode/etf"
|
||||
)
|
||||
|
||||
type testApplication struct {
|
||||
Application
|
||||
}
|
||||
|
||||
func (a *testApplication) Load(args ...interface{}) (ApplicationSpec, error) {
|
||||
lifeSpan := args[0].(time.Duration)
|
||||
name := args[1].(string)
|
||||
nameGS := "testAppGS"
|
||||
if len(args) == 3 {
|
||||
nameGS = args[2].(string)
|
||||
}
|
||||
return ApplicationSpec{
|
||||
Name: name,
|
||||
Description: "My Test Applicatoin",
|
||||
Version: "v.0.1",
|
||||
Environment: map[string]interface{}{
|
||||
"envName1": 123,
|
||||
"envName2": "Hello world",
|
||||
},
|
||||
Children: []ApplicationChildSpec{
|
||||
ApplicationChildSpec{
|
||||
Child: &testAppGenServer{},
|
||||
Name: nameGS,
|
||||
},
|
||||
},
|
||||
Lifespan: lifeSpan,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *testApplication) Start(p *Process, args ...interface{}) {
|
||||
//p.SetEnv("env123", 456)
|
||||
}
|
||||
|
||||
// test GenServer
|
||||
type testAppGenServer struct {
|
||||
GenServer
|
||||
}
|
||||
|
||||
func (gs *testAppGenServer) Init(p *Process, args ...interface{}) interface{} {
|
||||
//fmt.Println("STARTING TEST GS IN APP")
|
||||
p.SetEnv("env123", 456)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gs *testAppGenServer) HandleCast(message etf.Term, state interface{}) (string, interface{}) {
|
||||
return "noreply", state
|
||||
}
|
||||
|
||||
func (gs *testAppGenServer) HandleCall(from etf.Tuple, message etf.Term, state interface{}) (string, etf.Term, interface{}) {
|
||||
return "stop", message, nil
|
||||
}
|
||||
|
||||
func (gs *testAppGenServer) HandleInfo(message etf.Term, state interface{}) (string, interface{}) {
|
||||
return "noreply", state
|
||||
}
|
||||
|
||||
func (gs *testAppGenServer) Terminate(reason string, state interface{}) {
|
||||
fmt.Println("TERMINATING TEST GS IN APP with reason:", reason)
|
||||
}
|
||||
|
||||
// testing application
|
||||
func TestApplication(t *testing.T) {
|
||||
|
||||
fmt.Printf("\n=== Test Application load/unload/start/stop\n")
|
||||
fmt.Printf("\nStarting node nodeTestAplication@localhost:")
|
||||
ctx := context.Background()
|
||||
node := CreateNodeWithContext(ctx, "nodeTestApplication@localhost", "cookies", NodeOptions{})
|
||||
if node == nil {
|
||||
t.Fatal("can't start node")
|
||||
} else {
|
||||
fmt.Println("OK")
|
||||
}
|
||||
|
||||
app := &testApplication{}
|
||||
lifeSpan := 0 * time.Second
|
||||
|
||||
//
|
||||
// case 1: loading/unloading app
|
||||
//
|
||||
fmt.Printf("Loading application... ")
|
||||
err := node.ApplicationLoad(app, lifeSpan, "testapp")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
la := node.LoadedApplications()
|
||||
|
||||
if len(la) != 1 {
|
||||
t.Fatal("total number of loaded application mismatch")
|
||||
}
|
||||
if la[0].Name != "testapp" {
|
||||
t.Fatal("can't load application")
|
||||
}
|
||||
|
||||
fmt.Println("OK")
|
||||
|
||||
wa := node.WhichApplications()
|
||||
if len(wa) > 0 {
|
||||
t.Fatal("total number of running application mismatch")
|
||||
}
|
||||
|
||||
fmt.Printf("Unloading application... ")
|
||||
if err := node.ApplicationUnload("testapp"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond) // takes some time
|
||||
la = node.LoadedApplications()
|
||||
|
||||
if len(la) > 0 {
|
||||
t.Fatal("total number of loaded application mismatch")
|
||||
}
|
||||
fmt.Println("OK")
|
||||
|
||||
//
|
||||
// case 2: start(and try to unload running app)/stop(normal) application
|
||||
//
|
||||
fmt.Printf("Starting application... ")
|
||||
// use the new app name because the unloading takes some time
|
||||
if err := node.ApplicationLoad(app, lifeSpan, "testapp1", "testAppGS1"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
p, e := node.ApplicationStart("testapp1")
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
|
||||
// we shouldn't be able to unload running app
|
||||
if e := node.ApplicationUnload("testapp1"); e != ErrAppAlreadyStarted {
|
||||
t.Fatal(e)
|
||||
}
|
||||
|
||||
wa = node.WhichApplications()
|
||||
if len(wa) != 1 {
|
||||
t.Fatal("total number of running application mismatch")
|
||||
}
|
||||
|
||||
if wa[0].Name != "testapp1" {
|
||||
t.Fatal("can't start application")
|
||||
}
|
||||
|
||||
fmt.Println("OK")
|
||||
|
||||
// case 2.1: test env vars
|
||||
fmt.Printf("testing application' environment variables...")
|
||||
p.SetEnv("env123", 123)
|
||||
p.SetEnv("envStr", "123")
|
||||
|
||||
gs := node.GetProcessByName("testAppGS1")
|
||||
env := gs.GetEnv("env123")
|
||||
if env == nil {
|
||||
t.Fatal("incorrect environment variable: not found")
|
||||
}
|
||||
|
||||
if env.(int) != 456 {
|
||||
t.Fatal("incorrect environment variable: value should be overrided by child process")
|
||||
}
|
||||
|
||||
if envUnknown := gs.GetEnv("unknown"); envUnknown != nil {
|
||||
t.Fatal("incorrect environment variable: undefined variable should have nil value")
|
||||
}
|
||||
|
||||
envs := gs.ListEnv()
|
||||
if x, ok := envs["env123"]; !ok || x != 456 {
|
||||
t.Fatal("incorrect environment variable: list of variables has no env123 value or its wrong")
|
||||
}
|
||||
|
||||
if x, ok := envs["envStr"]; !ok || x != "123" {
|
||||
t.Fatal("incorrect environment variable: list of variables has no envStr value or its wrong")
|
||||
}
|
||||
|
||||
fmt.Println("OK")
|
||||
|
||||
// case 2.2: get list of children' pid
|
||||
fmt.Printf("testing application' children list...")
|
||||
list := p.GetChildren()
|
||||
if len(list) != 1 || list[0] != gs.Self() {
|
||||
t.Fatal("incorrect children list")
|
||||
}
|
||||
fmt.Println("OK")
|
||||
|
||||
// case 2.3: get application info
|
||||
|
||||
fmt.Printf("get application info for testapp1...")
|
||||
info, errInfo := node.GetApplicationInfo("testapp1")
|
||||
if errInfo != nil {
|
||||
t.Fatal(errInfo)
|
||||
}
|
||||
if p.Self() != info.PID {
|
||||
t.Fatal("incorrect pid in application info")
|
||||
}
|
||||
fmt.Println("OK")
|
||||
|
||||
fmt.Printf("Stopping application ...")
|
||||
if e := node.ApplicationStop("testapp1"); e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
fmt.Println("OK")
|
||||
|
||||
if e := p.WaitWithTimeout(100 * time.Millisecond); e != nil {
|
||||
t.Fatal("timed out")
|
||||
}
|
||||
wa = node.WhichApplications()
|
||||
if len(wa) != 0 {
|
||||
fmt.Println("waa: ", wa)
|
||||
t.Fatal("total number of running application mismatch")
|
||||
}
|
||||
|
||||
//
|
||||
// case 3: start/stop (brutal) application
|
||||
//
|
||||
fmt.Printf("Starting application for brutal kill...")
|
||||
p, e = node.ApplicationStart("testapp1")
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
fmt.Println("OK")
|
||||
fmt.Printf("Kill application...")
|
||||
p.Kill()
|
||||
if e := p.WaitWithTimeout(100 * time.Millisecond); e != nil {
|
||||
t.Fatal("timed out")
|
||||
}
|
||||
fmt.Println("OK")
|
||||
|
||||
node.ApplicationUnload("testapp1")
|
||||
|
||||
//
|
||||
// case 4: start with limited lifespan
|
||||
//
|
||||
fmt.Printf("Starting application with lifespan 150ms...")
|
||||
lifeSpan = 150 * time.Millisecond
|
||||
if err := node.ApplicationLoad(app, lifeSpan, "testapp2", "testAppGS2"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tStart := time.Now()
|
||||
p, e = node.ApplicationStart("testapp2")
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
if e := p.WaitWithTimeout(160 * time.Millisecond); e != nil {
|
||||
t.Fatal("application lifespan was longer than 150m")
|
||||
}
|
||||
tLifeSpan := time.Since(tStart)
|
||||
|
||||
if node.IsProcessAlive(p.Self()) {
|
||||
t.Fatal("application still alive")
|
||||
}
|
||||
|
||||
if tLifeSpan < lifeSpan {
|
||||
t.Fatal("application lifespan was shorter(", tLifeSpan, ") than ", lifeSpan)
|
||||
}
|
||||
|
||||
fmt.Println("OK. lifespan:", tLifeSpan)
|
||||
|
||||
node.Stop()
|
||||
}
|
||||
|
||||
func TestApplicationTypePermanent(t *testing.T) {
|
||||
fmt.Printf("\n=== Test Application type Permanent\n")
|
||||
fmt.Printf("\nStarting node nodeTestAplicationPermanent@localhost:")
|
||||
ctx := context.Background()
|
||||
node := CreateNodeWithContext(ctx, "nodeTestApplicationPermanent@localhost", "cookies", NodeOptions{})
|
||||
if node == nil {
|
||||
t.Fatal("can't start node")
|
||||
} else {
|
||||
fmt.Println("OK")
|
||||
}
|
||||
|
||||
fmt.Printf("Starting application... ")
|
||||
app := &testApplication{}
|
||||
lifeSpan := time.Duration(0)
|
||||
if err := node.ApplicationLoad(app, lifeSpan, "testapp"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
p, e := node.ApplicationStartPermanent("testapp")
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
|
||||
gs := node.GetProcessByName("testAppGS")
|
||||
|
||||
gs.Exit(p.Self(), "abnormal")
|
||||
if e := gs.WaitWithTimeout(100 * time.Millisecond); e != nil {
|
||||
t.Fatal("timeout on waiting child")
|
||||
}
|
||||
|
||||
if e := p.WaitWithTimeout(1 * time.Second); e != nil {
|
||||
t.Fatal("timeout on waiting application stopping")
|
||||
}
|
||||
|
||||
if e := node.WaitWithTimeout(1 * time.Second); e != nil {
|
||||
t.Fatal("node shouldn't be alive here")
|
||||
}
|
||||
|
||||
if node.IsAlive() {
|
||||
t.Fatal("node shouldn't be alive here")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestApplicationTypeTransient(t *testing.T) {
|
||||
fmt.Printf("\n=== Test Application type Transient\n")
|
||||
fmt.Printf("\nStarting node nodeTestAplicationTypeTransient@localhost:")
|
||||
ctx := context.Background()
|
||||
node := CreateNodeWithContext(ctx, "nodeTestApplicationTypeTransient@localhost", "cookies", NodeOptions{})
|
||||
if node == nil {
|
||||
t.Fatal("can't start node")
|
||||
} else {
|
||||
fmt.Println("OK")
|
||||
}
|
||||
|
||||
fmt.Printf("Starting application...")
|
||||
app1 := &testApplication{}
|
||||
app2 := &testApplication{}
|
||||
lifeSpan := time.Duration(0)
|
||||
|
||||
if err := node.ApplicationLoad(app1, lifeSpan, "testapp1", "testAppGS1"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := node.ApplicationLoad(app2, lifeSpan, "testapp2", "testAppGS2"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
p1, e1 := node.ApplicationStartTransient("testapp1")
|
||||
if e1 != nil {
|
||||
t.Fatal(e1)
|
||||
}
|
||||
|
||||
p2, e2 := node.ApplicationStartTransient("testapp2")
|
||||
if e2 != nil {
|
||||
t.Fatal(e2)
|
||||
}
|
||||
|
||||
fmt.Println("OK")
|
||||
|
||||
fmt.Printf("stopping testAppGS1 with 'normal' reason (shouldn't affect testAppGS2)...")
|
||||
gs := node.GetProcessByName("testAppGS1")
|
||||
gs.Exit(gs.Self(), "normal")
|
||||
if e := gs.WaitWithTimeout(100 * time.Millisecond); e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
|
||||
if e := p1.WaitWithTimeout(100 * time.Millisecond); e != ErrTimeout {
|
||||
t.Fatal("application testapp1 should be alive here")
|
||||
}
|
||||
|
||||
p1.Kill()
|
||||
|
||||
p2.WaitWithTimeout(100 * time.Millisecond)
|
||||
if !p2.IsAlive() {
|
||||
t.Fatal("testAppGS2 should be alive here")
|
||||
}
|
||||
|
||||
if !node.IsAlive() {
|
||||
t.Fatal("node should be alive here")
|
||||
}
|
||||
|
||||
fmt.Println("OK")
|
||||
|
||||
fmt.Println("starting application testapp1")
|
||||
p1, e1 = node.ApplicationStartTransient("testapp1")
|
||||
if e1 != nil {
|
||||
t.Fatal(e1)
|
||||
}
|
||||
fmt.Println("OK")
|
||||
|
||||
fmt.Printf("stopping testAppGS1 with 'abnormal' reason (node will shotdown)...")
|
||||
gs = node.GetProcessByName("testAppGS1")
|
||||
gs.Exit(gs.Self(), "abnormal")
|
||||
|
||||
if e := gs.WaitWithTimeout(100 * time.Millisecond); e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
|
||||
if e := p1.WaitWithTimeout(100 * time.Millisecond); e != nil {
|
||||
t.Fatal("testapp1 shouldn't be alive here")
|
||||
}
|
||||
|
||||
if e := p2.WaitWithTimeout(100 * time.Millisecond); e != nil {
|
||||
t.Fatal("testapp2 shouldn't be alive here")
|
||||
}
|
||||
|
||||
if e := node.WaitWithTimeout(100 * time.Millisecond); e != nil {
|
||||
t.Fatal("node shouldn't be alive here")
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplicationTypeTemporary(t *testing.T) {
|
||||
fmt.Printf("\n=== Test Application type Temporary\n")
|
||||
fmt.Printf("\nStarting node nodeTestAplicationStop@localhost:")
|
||||
ctx := context.Background()
|
||||
node := CreateNodeWithContext(ctx, "nodeTestApplicationStop@localhost", "cookies", NodeOptions{})
|
||||
if node == nil {
|
||||
t.Fatal("can't start node")
|
||||
} else {
|
||||
fmt.Println("OK")
|
||||
}
|
||||
fmt.Printf("Starting application...")
|
||||
app := &testApplication{}
|
||||
lifeSpan := time.Duration(0)
|
||||
if err := node.ApplicationLoad(app, lifeSpan, "testapp"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
p, e := node.ApplicationStart("testapp") // default start type is Temporary
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
|
||||
gs := node.GetProcessByName("testAppGS")
|
||||
gs.Exit(p.Self(), "normal")
|
||||
if e := gs.WaitWithTimeout(100 * time.Millisecond); e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
|
||||
if e := node.WaitWithTimeout(100 * time.Millisecond); e != ErrTimeout {
|
||||
t.Fatal("node should be alive here")
|
||||
}
|
||||
|
||||
if !node.IsAlive() {
|
||||
t.Fatal("node should be alive here")
|
||||
}
|
||||
|
||||
node.Stop()
|
||||
}
|
||||
|
||||
func TestApplicationStop(t *testing.T) {
|
||||
fmt.Printf("\n=== Test Application stopping\n")
|
||||
fmt.Printf("\nStarting node nodeTestAplicationTypeTemporary@localhost:")
|
||||
ctx := context.Background()
|
||||
node := CreateNodeWithContext(ctx, "nodeTestApplicationTypeTemporary@localhost", "cookies", NodeOptions{})
|
||||
if node == nil {
|
||||
t.Fatal("can't start node")
|
||||
} else {
|
||||
fmt.Println("OK")
|
||||
}
|
||||
fmt.Printf("Starting applications testapp1, testapp2...")
|
||||
lifeSpan := time.Duration(0)
|
||||
app := &testApplication{}
|
||||
if e := node.ApplicationLoad(app, lifeSpan, "testapp1", "testAppGS1"); e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
|
||||
app1 := &testApplication{}
|
||||
if e := node.ApplicationLoad(app1, lifeSpan, "testapp2", "testAppGS2"); e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
|
||||
p1, e1 := node.ApplicationStartPermanent("testapp1")
|
||||
if e1 != nil {
|
||||
t.Fatal(e1)
|
||||
}
|
||||
p2, e2 := node.ApplicationStartPermanent("testapp2")
|
||||
if e2 != nil {
|
||||
t.Fatal(e2)
|
||||
}
|
||||
fmt.Println("OK")
|
||||
|
||||
// case 1: stopping via node.ApplicatoinStop
|
||||
fmt.Printf("stopping testapp1 via node.ApplicationStop (shouldn't affect testapp2) ...")
|
||||
node.ApplicationStop("testapp1")
|
||||
if e := p1.WaitWithTimeout(100 * time.Millisecond); e != nil {
|
||||
t.Fatal("can't stop application via node.ApplicationStop")
|
||||
}
|
||||
|
||||
if !p2.IsAlive() {
|
||||
t.Fatal("testapp2 should be alive here")
|
||||
}
|
||||
|
||||
if !node.IsAlive() {
|
||||
t.Fatal("node should be alive here")
|
||||
}
|
||||
|
||||
fmt.Println("OK")
|
||||
|
||||
// case 2: stopping via process.Exit
|
||||
fmt.Printf("starting application testapp1 ...")
|
||||
p1, e1 = node.ApplicationStartPermanent("testapp1")
|
||||
if e1 != nil {
|
||||
t.Fatal(e1)
|
||||
}
|
||||
fmt.Println("OK")
|
||||
|
||||
fmt.Printf("stopping testapp1 via process.Exit (shouldn't affect testapp2)...")
|
||||
p1.Exit(p1.Self(), "normal")
|
||||
if e := p1.WaitWithTimeout(100 * time.Millisecond); e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
if !p2.IsAlive() {
|
||||
t.Fatal("testapp2 should be alive here")
|
||||
}
|
||||
|
||||
if !node.IsAlive() {
|
||||
t.Fatal("node should be alive here")
|
||||
}
|
||||
fmt.Println("OK")
|
||||
|
||||
// case 3: stopping via process.Kill
|
||||
fmt.Printf("starting application testapp1 ...")
|
||||
p1, e1 = node.ApplicationStartPermanent("testapp1")
|
||||
if e1 != nil {
|
||||
t.Fatal(e1)
|
||||
}
|
||||
fmt.Println("OK")
|
||||
|
||||
fmt.Printf("stopping testapp1 via process.Kill (shouldn't affect testapp2)...")
|
||||
p1.Kill()
|
||||
if e := p1.WaitWithTimeout(100 * time.Millisecond); e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
|
||||
if !p2.IsAlive() {
|
||||
t.Fatal("testapp2 should be alive here")
|
||||
}
|
||||
|
||||
if !node.IsAlive() {
|
||||
t.Fatal("node should be alive here")
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,212 @@
|
|||
package ergonode
|
||||
|
||||
// TODO: https://github.com/erlang/otp/blob/master/lib/runtime_tools-1.13.1/src/appmon_info.erl
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/halturin/ergonode/etf"
|
||||
"github.com/halturin/ergonode/lib"
|
||||
)
|
||||
|
||||
type appMon struct {
|
||||
GenServer
|
||||
}
|
||||
|
||||
type appMonState struct {
|
||||
process *Process
|
||||
jobs map[etf.Atom][]jobDetails
|
||||
}
|
||||
|
||||
type jobDetails struct {
|
||||
name etf.Atom
|
||||
args etf.List
|
||||
sendTo etf.Pid
|
||||
}
|
||||
|
||||
// Init initializes process state using arbitrary arguments
|
||||
// Init -> state
|
||||
func (am *appMon) Init(p *Process, args ...interface{}) interface{} {
|
||||
lib.Log("APP_MON: Init %#v", args)
|
||||
from := args[0]
|
||||
p.Link(from.(etf.Pid))
|
||||
|
||||
return appMonState{
|
||||
process: p,
|
||||
jobs: make(map[etf.Atom][]jobDetails),
|
||||
}
|
||||
}
|
||||
|
||||
// HandleCast -> ("noreply", state) - noreply
|
||||
// ("stop", reason) - stop with reason
|
||||
func (am *appMon) HandleCast(message etf.Term, state interface{}) (string, interface{}) {
|
||||
var appState appMonState = state.(appMonState)
|
||||
lib.Log("APP_MON: HandleCast: %#v", message)
|
||||
switch message {
|
||||
case "sendStat":
|
||||
|
||||
for cmd, jobs := range state.(appMonState).jobs {
|
||||
switch cmd {
|
||||
case "app_ctrl":
|
||||
// From ! {delivery, self(), Cmd, Aux, Result}
|
||||
apps := appState.process.Node.WhichApplications()
|
||||
for i := range jobs {
|
||||
appList := make(etf.List, len(apps))
|
||||
for ai, a := range apps {
|
||||
appList[ai] = etf.Tuple{a.PID, etf.Atom(a.Name),
|
||||
etf.Tuple{etf.Atom(a.Name), a.Description, a.Version},
|
||||
}
|
||||
}
|
||||
delivery := etf.Tuple{etf.Atom("delivery"), appState.process.Self(), cmd, jobs[i].name, appList}
|
||||
appState.process.Send(jobs[i].sendTo, delivery)
|
||||
}
|
||||
|
||||
case "app":
|
||||
for i := range jobs {
|
||||
appTree := am.makeAppTree(appState.process, jobs[i].name)
|
||||
if appTree == nil {
|
||||
continue
|
||||
}
|
||||
delivery := etf.Tuple{etf.Atom("delivery"), appState.process.Self(), cmd, jobs[i].name, appTree}
|
||||
appState.process.Send(jobs[i].sendTo, delivery)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
appState.process.CastAfter(appState.process.Self(), "sendStat", 2*time.Second)
|
||||
return "noreply", state
|
||||
|
||||
default:
|
||||
switch m := message.(type) {
|
||||
case etf.Tuple:
|
||||
if len(m) == 5 {
|
||||
// etf.Tuple{etf.Pid{Node:"erl-demo@127.0.0.1", Id:0x7c, Serial:0x0, Creation:0x1}, "app_ctrl", "demo@127.0.0.1", "true", etf.List{}}
|
||||
job := jobDetails{
|
||||
name: m.Element(3).(etf.Atom),
|
||||
args: m.Element(5).(etf.List),
|
||||
sendTo: m.Element(1).(etf.Pid),
|
||||
}
|
||||
|
||||
if m.Element(4) == etf.Atom("true") {
|
||||
// add new job
|
||||
if len(state.(appMonState).jobs) == 0 {
|
||||
appState.process.Cast(appState.process.Self(), "sendStat")
|
||||
}
|
||||
|
||||
if jobList, ok := state.(appMonState).jobs[m.Element(2).(etf.Atom)]; ok {
|
||||
for i := range jobList {
|
||||
if jobList[i].name == job.name {
|
||||
return "noreply", appState
|
||||
}
|
||||
}
|
||||
jobList = append(jobList, job)
|
||||
state.(appMonState).jobs[m.Element(2).(etf.Atom)] = jobList
|
||||
} else {
|
||||
state.(appMonState).jobs[m.Element(2).(etf.Atom)] = []jobDetails{job}
|
||||
}
|
||||
|
||||
} else {
|
||||
// remove a job
|
||||
if jobList, ok := state.(appMonState).jobs[m.Element(2).(etf.Atom)]; ok {
|
||||
for i := range jobList {
|
||||
if jobList[i].name == job.name {
|
||||
jobList[i] = jobList[0]
|
||||
jobList = jobList[1:]
|
||||
|
||||
if len(jobList) > 0 {
|
||||
state.(appMonState).jobs[m.Element(2).(etf.Atom)] = jobList
|
||||
} else {
|
||||
delete(state.(appMonState).jobs, m.Element(2).(etf.Atom))
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(state.(appMonState).jobs) == 0 {
|
||||
return "stop", "normal"
|
||||
}
|
||||
|
||||
}
|
||||
return "noreply", appState
|
||||
}
|
||||
|
||||
// etf.Tuple{etf.Atom("EXIT"), Pid, reason}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return "stop", "normal"
|
||||
}
|
||||
|
||||
// HandleCall serves incoming messages sending via gen_server:call
|
||||
// HandleCall -> ("reply", message, state) - reply
|
||||
// ("noreply", _, state) - noreply
|
||||
// ("stop", reason, _) - normal stop
|
||||
func (am *appMon) HandleCall(from etf.Tuple, message etf.Term, state interface{}) (string, etf.Term, interface{}) {
|
||||
lib.Log("APP_MON: HandleCall: %#v, From: %#v", message, from)
|
||||
// return "reply", reply, state
|
||||
return "stop", "normal", state
|
||||
}
|
||||
|
||||
// HandleInfo serves all another incoming messages (Pid ! message)
|
||||
// HandleInfo -> ("noreply", state) - noreply
|
||||
// ("stop", reason) - normal stop
|
||||
func (am *appMon) HandleInfo(message etf.Term, state interface{}) (string, interface{}) {
|
||||
lib.Log("APP_MON: HandleInfo: %#v", message)
|
||||
return "stop", "normal"
|
||||
}
|
||||
|
||||
// Terminate called when process died
|
||||
func (am *appMon) Terminate(reason string, state interface{}) {
|
||||
lib.Log("APP_MON: Terminate: %#v", reason)
|
||||
}
|
||||
|
||||
func (am *appMon) makeAppTree(p *Process, app etf.Atom) etf.Tuple {
|
||||
appInfo, err := p.Node.GetApplicationInfo(string(app))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
resolver := make(map[etf.Pid]interface{})
|
||||
|
||||
tree := makeTree(p, resolver, appInfo.PID)
|
||||
children := etf.List{etf.Tuple{appInfo.PID, appInfo.PID.Str()}}
|
||||
for p, n := range resolver {
|
||||
children = append(children, etf.Tuple{p, n})
|
||||
}
|
||||
|
||||
appTree := etf.Tuple{
|
||||
appInfo.PID.Str(), // pid or registered name
|
||||
children,
|
||||
tree,
|
||||
etf.List{}, // TODO: links
|
||||
}
|
||||
|
||||
return appTree
|
||||
}
|
||||
|
||||
func makeTree(p *Process, resolver map[etf.Pid]interface{}, pid etf.Pid) etf.List {
|
||||
|
||||
pidProcess := p.Node.GetProcessByPid(pid)
|
||||
if pidProcess == nil {
|
||||
return etf.List{}
|
||||
}
|
||||
if pidProcess.name != "" {
|
||||
resolver[pid] = pidProcess.name
|
||||
} else {
|
||||
resolver[pid] = pid.Str()
|
||||
}
|
||||
|
||||
tree := etf.List{}
|
||||
|
||||
for _, cp := range pidProcess.GetChildren() {
|
||||
children := makeTree(p, resolver, cp)
|
||||
child := etf.Tuple{resolver[pid], resolver[cp]}
|
||||
tree = append(tree, child)
|
||||
tree = append(tree, children...)
|
||||
}
|
||||
|
||||
return tree
|
||||
}
|
|
@ -1,12 +1,14 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/halturin/ergonode/dist"
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
"github.com/halturin/ergonode/dist"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -32,11 +34,11 @@ func main() {
|
|||
return
|
||||
}
|
||||
|
||||
if err := dist.Server(uint16(Listen)); err != nil {
|
||||
if err := dist.Server(context.TODO(), uint16(Listen)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// just sleep forever. until somebody kiil this process
|
||||
// just sleep forever. until somebody kill this process
|
||||
select {}
|
||||
}
|
||||
|
||||
|
@ -49,16 +51,25 @@ func getNames() {
|
|||
|
||||
defer conn.Close()
|
||||
|
||||
buf := make([]byte, 1024)
|
||||
buf := make([]byte, 2048)
|
||||
buf[1] = 1
|
||||
buf[2] = dist.EPMD_NAMES_REQ
|
||||
conn.Write(buf[0:3])
|
||||
if n, err := conn.Read(buf); n == 0 {
|
||||
panic(err)
|
||||
if n, err := conn.Read(buf); n < 4 || err != nil {
|
||||
panic("malformed response from epmd")
|
||||
} else {
|
||||
fmt.Printf("epmd: up and running on port %d with data:\n", binary.BigEndian.Uint32(buf[0:4]))
|
||||
if len(buf[4:n]) > 0 {
|
||||
fmt.Printf("%s\n", string(buf[4:n]))
|
||||
fmt.Printf("%s", string(buf[4:]))
|
||||
buf = buf[n:]
|
||||
|
||||
for {
|
||||
n, err = conn.Read(buf)
|
||||
if err != nil || n == 0 {
|
||||
break
|
||||
}
|
||||
fmt.Printf("%s", string(buf))
|
||||
buf = buf[len(buf):]
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
//+build debug
|
||||
|
||||
package ergonode
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
)
|
||||
|
||||
func init() {
|
||||
go http.ListenAndServe("0.0.0.0:9009", nil)
|
||||
}
|
|
@ -4,10 +4,8 @@ import (
|
|||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/halturin/ergonode/etf"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
|
@ -16,6 +14,8 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/halturin/ergonode/etf"
|
||||
)
|
||||
|
||||
var dTrace bool
|
||||
|
@ -93,7 +93,7 @@ type NodeDesc struct {
|
|||
term *etf.Context
|
||||
isacceptor bool
|
||||
|
||||
Ready chan bool
|
||||
HandshakeError chan error
|
||||
}
|
||||
|
||||
func NewNodeDesc(name, cookie string, isHidden bool, c net.Conn) (nd *NodeDesc) {
|
||||
|
@ -103,14 +103,14 @@ func NewNodeDesc(name, cookie string, isHidden bool, c net.Conn) (nd *NodeDesc)
|
|||
Hidden: isHidden,
|
||||
remote: nil,
|
||||
state: HANDSHAKE,
|
||||
flag: toNodeFlag(PUBLISHED, UNICODE_IO, DIST_MONITOR,
|
||||
flag: toNodeFlag(PUBLISHED, UNICODE_IO, DIST_MONITOR, DIST_MONITOR_NAME,
|
||||
EXTENDED_PIDS_PORTS, EXTENDED_REFERENCES,
|
||||
DIST_HDR_ATOM_CACHE, HIDDEN_ATOM_CACHE, NEW_FUN_TAGS,
|
||||
SMALL_ATOM_TAGS, UTF8_ATOMS, MAP_TAG, BIG_CREATION),
|
||||
version: 5,
|
||||
term: new(etf.Context),
|
||||
isacceptor: true,
|
||||
Ready: make(chan bool),
|
||||
version: 5,
|
||||
term: new(etf.Context),
|
||||
isacceptor: true,
|
||||
HandshakeError: make(chan error),
|
||||
}
|
||||
|
||||
nd.term.ConvertBinaryToString = true
|
||||
|
@ -128,7 +128,7 @@ func NewNodeDesc(name, cookie string, isHidden bool, c net.Conn) (nd *NodeDesc)
|
|||
return nd
|
||||
}
|
||||
|
||||
func (currNd *NodeDesc) ReadMessage(c net.Conn) (ts []etf.Term, err error) {
|
||||
func (currentND *NodeDesc) ReadMessage(c net.Conn) (ts []etf.Term, err error) {
|
||||
|
||||
sendData := func(headerLen int, data []byte) (int, error) {
|
||||
reply := make([]byte, len(data)+headerLen)
|
||||
|
@ -142,14 +142,16 @@ func (currNd *NodeDesc) ReadMessage(c net.Conn) (ts []etf.Term, err error) {
|
|||
return c.Write(reply)
|
||||
}
|
||||
|
||||
switch currNd.state {
|
||||
switch currentND.state {
|
||||
case HANDSHAKE:
|
||||
var length uint16
|
||||
if err = binary.Read(c, binary.BigEndian, &length); err != nil {
|
||||
currentND.HandshakeError <- err
|
||||
return
|
||||
}
|
||||
msg := make([]byte, length)
|
||||
if _, err = io.ReadFull(c, msg); err != nil {
|
||||
currentND.HandshakeError <- err
|
||||
return
|
||||
}
|
||||
dLog("Read from enode %d: %v", length, msg)
|
||||
|
@ -157,64 +159,67 @@ func (currNd *NodeDesc) ReadMessage(c net.Conn) (ts []etf.Term, err error) {
|
|||
switch msg[0] {
|
||||
case 'n':
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
currNd.challenge = rand.Uint32()
|
||||
currentND.challenge = rand.Uint32()
|
||||
|
||||
if currNd.isacceptor {
|
||||
sn := currNd.read_SEND_NAME(msg)
|
||||
if currentND.isacceptor {
|
||||
sn := currentND.read_SEND_NAME(msg)
|
||||
// Statuses: ok, nok, ok_simultaneous, alive, not_allowed
|
||||
sok := currNd.compose_SEND_STATUS(sn, true)
|
||||
sok := currentND.compose_SEND_STATUS(sn, true)
|
||||
_, err = sendData(2, sok)
|
||||
if err != nil {
|
||||
currentND.HandshakeError <- err
|
||||
return
|
||||
}
|
||||
|
||||
// Now send challenge
|
||||
challenge := currNd.compose_SEND_CHALLENGE(sn)
|
||||
challenge := currentND.compose_SEND_CHALLENGE(sn)
|
||||
sendData(2, challenge)
|
||||
if err != nil {
|
||||
currentND.HandshakeError <- err
|
||||
return
|
||||
}
|
||||
} else {
|
||||
//
|
||||
dLog("Doing CHALLENGE (outgoing connection)")
|
||||
|
||||
challenge := currNd.read_SEND_CHALLENGE(msg)
|
||||
challenge_reply := currNd.compose_SEND_CHALENGE_REPLY(challenge)
|
||||
sendData(2, challenge_reply)
|
||||
challenge := currentND.read_SEND_CHALLENGE(msg)
|
||||
challengeReply := currentND.compose_SEND_CHALENGE_REPLY(challenge)
|
||||
sendData(2, challengeReply)
|
||||
return
|
||||
|
||||
}
|
||||
|
||||
case 'r':
|
||||
sn := currNd.remote
|
||||
ok := currNd.read_SEND_CHALLENGE_REPLY(sn, msg)
|
||||
sn := currentND.remote
|
||||
ok := currentND.read_SEND_CHALLENGE_REPLY(sn, msg)
|
||||
if ok {
|
||||
challengeAck := currNd.compose_SEND_CHALLENGE_ACK(sn)
|
||||
challengeAck := currentND.compose_SEND_CHALLENGE_ACK(sn)
|
||||
sendData(2, challengeAck)
|
||||
if err != nil {
|
||||
currentND.HandshakeError <- err
|
||||
return
|
||||
}
|
||||
dLog("Remote: %#v", sn)
|
||||
ts = []etf.Term{etf.Term(etf.Tuple{etf.Atom("$connection"), etf.Atom(sn.Name), currNd.Ready})}
|
||||
ts = []etf.Term{etf.Term(etf.Tuple{etf.Atom("$connection"), etf.Atom(sn.Name), currentND.HandshakeError})}
|
||||
} else {
|
||||
err = errors.New("bad handshake")
|
||||
err = fmt.Errorf("bad handshake")
|
||||
currentND.HandshakeError <- err
|
||||
return
|
||||
}
|
||||
case 's':
|
||||
r := string(msg[1:len(msg)])
|
||||
if r != "ok" {
|
||||
c.Close()
|
||||
dLog("Can't continue (recv_status: %s). Closing connection", r)
|
||||
panic("recv_status is not ok. Closing connection")
|
||||
err = fmt.Errorf("Can't continue (recv_status: %s). Closing connection", r)
|
||||
currentND.HandshakeError <- err
|
||||
}
|
||||
|
||||
return
|
||||
|
||||
case 'a':
|
||||
currNd.read_SEND_CHALLENGE_ACK(msg)
|
||||
sn := currNd.remote
|
||||
currentND.read_SEND_CHALLENGE_ACK(msg)
|
||||
sn := currentND.remote
|
||||
dLog("Remote (outgoing): %#v", sn)
|
||||
ts = []etf.Term{etf.Term(etf.Tuple{etf.Atom("$connection"), etf.Atom(sn.Name), currNd.Ready})}
|
||||
ts = []etf.Term{etf.Term(etf.Tuple{etf.Atom("$connection"), etf.Atom(sn.Name), currentND.HandshakeError})}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -225,23 +230,26 @@ func (currNd *NodeDesc) ReadMessage(c net.Conn) (ts []etf.Term, err error) {
|
|||
return
|
||||
}
|
||||
if length == 0 {
|
||||
dLog("Keepalive (%s)", currNd.remote.Name)
|
||||
dLog("Keepalive (%s)", currentND.remote.Name)
|
||||
sendData(4, []byte{})
|
||||
return
|
||||
}
|
||||
r := &io.LimitedReader{c, int64(length)}
|
||||
r := &io.LimitedReader{
|
||||
R: c,
|
||||
N: int64(length),
|
||||
}
|
||||
|
||||
if currNd.flag.isSet(DIST_HDR_ATOM_CACHE) {
|
||||
if currentND.flag.isSet(DIST_HDR_ATOM_CACHE) {
|
||||
var ctl, message etf.Term
|
||||
if err = currNd.readDist(r); err != nil {
|
||||
if err = currentND.readDist(r); err != nil {
|
||||
break
|
||||
}
|
||||
if ctl, err = currNd.readCtl(r); err != nil {
|
||||
if ctl, err = currentND.readCtl(r); err != nil {
|
||||
break
|
||||
}
|
||||
dLog("READ CTL: %#v", ctl)
|
||||
|
||||
if message, err1 = currNd.readMessage(r); err1 != nil {
|
||||
if message, err1 = currentND.readMessage(r); err1 != nil {
|
||||
// break
|
||||
|
||||
}
|
||||
|
@ -260,7 +268,7 @@ func (currNd *NodeDesc) ReadMessage(c net.Conn) (ts []etf.Term, err error) {
|
|||
ts = make([]etf.Term, 0)
|
||||
for {
|
||||
var res etf.Term
|
||||
if res, err = currNd.readTerm(r); err != nil {
|
||||
if res, err = currentND.readTerm(r); err != nil {
|
||||
break
|
||||
}
|
||||
ts = append(ts, res)
|
||||
|
@ -280,7 +288,7 @@ func (currNd *NodeDesc) ReadMessage(c net.Conn) (ts []etf.Term, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
func (currNd *NodeDesc) WriteMessage(c net.Conn, ts []etf.Term) (err error) {
|
||||
func (currentND *NodeDesc) WriteMessage(c net.Conn, ts []etf.Term) (err error) {
|
||||
sendData := func(data []byte) (int, error) {
|
||||
reply := make([]byte, len(data)+4)
|
||||
binary.BigEndian.PutUint32(reply[0:4], uint32(len(data)))
|
||||
|
@ -290,17 +298,17 @@ func (currNd *NodeDesc) WriteMessage(c net.Conn, ts []etf.Term) (err error) {
|
|||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
if currNd.flag.isSet(DIST_HDR_ATOM_CACHE) {
|
||||
if currentND.flag.isSet(DIST_HDR_ATOM_CACHE) {
|
||||
buf.Write([]byte{etf.EtVersion})
|
||||
currNd.term.WriteDist(buf, ts)
|
||||
currentND.term.WriteDist(buf, ts)
|
||||
for _, v := range ts {
|
||||
currNd.term.Write(buf, v)
|
||||
currentND.term.Write(buf, v)
|
||||
}
|
||||
} else {
|
||||
buf.Write([]byte{'p'})
|
||||
for _, v := range ts {
|
||||
buf.Write([]byte{etf.EtVersion})
|
||||
currNd.term.Write(buf, v)
|
||||
currentND.term.Write(buf, v)
|
||||
}
|
||||
}
|
||||
// dLog("WRITE: %#v: %#v", ts, buf.Bytes())
|
||||
|
@ -309,8 +317,12 @@ func (currNd *NodeDesc) WriteMessage(c net.Conn, ts []etf.Term) (err error) {
|
|||
|
||||
}
|
||||
|
||||
func (nd *NodeDesc) GetRemoteName() etf.Atom {
|
||||
return etf.Atom(nd.remote.Name)
|
||||
func (nd *NodeDesc) GetRemoteName() string {
|
||||
// nd.remote MUST not be nil otherwise is a bug. let it panic then
|
||||
if nd.state == CONNECTED {
|
||||
return nd.remote.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (nd *NodeDesc) compose_SEND_NAME() (msg []byte) {
|
||||
|
@ -322,7 +334,7 @@ func (nd *NodeDesc) compose_SEND_NAME() (msg []byte) {
|
|||
return
|
||||
}
|
||||
|
||||
func (currNd *NodeDesc) read_SEND_NAME(msg []byte) (nd *NodeDesc) {
|
||||
func (currentND *NodeDesc) read_SEND_NAME(msg []byte) (nd *NodeDesc) {
|
||||
version := binary.BigEndian.Uint16(msg[1:3])
|
||||
flag := nodeFlag(binary.BigEndian.Uint32(msg[3:7]))
|
||||
name := string(msg[7:])
|
||||
|
@ -331,45 +343,45 @@ func (currNd *NodeDesc) read_SEND_NAME(msg []byte) (nd *NodeDesc) {
|
|||
version: version,
|
||||
flag: flag,
|
||||
}
|
||||
currNd.remote = nd
|
||||
currentND.remote = nd
|
||||
return
|
||||
}
|
||||
|
||||
func (currNd *NodeDesc) compose_SEND_STATUS(nd *NodeDesc, isOk bool) (msg []byte) {
|
||||
func (currentND *NodeDesc) compose_SEND_STATUS(nd *NodeDesc, isOk bool) (msg []byte) {
|
||||
msg = make([]byte, 3)
|
||||
msg[0] = byte('s')
|
||||
copy(msg[1:], "ok")
|
||||
return
|
||||
}
|
||||
|
||||
func (currNd *NodeDesc) compose_SEND_CHALLENGE(nd *NodeDesc) (msg []byte) {
|
||||
msg = make([]byte, 11+len(currNd.Name))
|
||||
func (currentND *NodeDesc) compose_SEND_CHALLENGE(nd *NodeDesc) (msg []byte) {
|
||||
msg = make([]byte, 11+len(currentND.Name))
|
||||
msg[0] = byte('n')
|
||||
binary.BigEndian.PutUint16(msg[1:3], currNd.version)
|
||||
binary.BigEndian.PutUint32(msg[3:7], currNd.flag.toUint32())
|
||||
binary.BigEndian.PutUint32(msg[7:11], currNd.challenge)
|
||||
copy(msg[11:], currNd.Name)
|
||||
binary.BigEndian.PutUint16(msg[1:3], currentND.version)
|
||||
binary.BigEndian.PutUint32(msg[3:7], currentND.flag.toUint32())
|
||||
binary.BigEndian.PutUint32(msg[7:11], currentND.challenge)
|
||||
copy(msg[11:], currentND.Name)
|
||||
return
|
||||
}
|
||||
|
||||
func (currNd *NodeDesc) read_SEND_CHALLENGE(msg []byte) (challenge uint32) {
|
||||
func (currentND *NodeDesc) read_SEND_CHALLENGE(msg []byte) (challenge uint32) {
|
||||
nd := &NodeDesc{
|
||||
Name: string(msg[11:]),
|
||||
version: binary.BigEndian.Uint16(msg[1:3]),
|
||||
flag: nodeFlag(binary.BigEndian.Uint32(msg[3:7])),
|
||||
}
|
||||
currNd.remote = nd
|
||||
currentND.remote = nd
|
||||
return binary.BigEndian.Uint32(msg[7:11])
|
||||
}
|
||||
|
||||
func (currNd *NodeDesc) read_SEND_CHALLENGE_REPLY(nd *NodeDesc, msg []byte) (isOk bool) {
|
||||
func (currentND *NodeDesc) read_SEND_CHALLENGE_REPLY(nd *NodeDesc, msg []byte) (isOk bool) {
|
||||
nd.challenge = binary.BigEndian.Uint32(msg[1:5])
|
||||
digestB := msg[5:]
|
||||
|
||||
digestA := genDigest(currNd.challenge, currNd.Cookie)
|
||||
digestA := genDigest(currentND.challenge, currentND.Cookie)
|
||||
if bytes.Compare(digestA, digestB) == 0 {
|
||||
isOk = true
|
||||
currNd.state = CONNECTED
|
||||
currentND.state = CONNECTED
|
||||
} else {
|
||||
dLog("BAD HANDSHAKE: digestA: %+v, digestB: %+v", digestA, digestB)
|
||||
isOk = false
|
||||
|
@ -377,28 +389,28 @@ func (currNd *NodeDesc) read_SEND_CHALLENGE_REPLY(nd *NodeDesc, msg []byte) (isO
|
|||
return
|
||||
}
|
||||
|
||||
func (currNd *NodeDesc) compose_SEND_CHALLENGE_ACK(nd *NodeDesc) (msg []byte) {
|
||||
func (currentND *NodeDesc) compose_SEND_CHALLENGE_ACK(nd *NodeDesc) (msg []byte) {
|
||||
msg = make([]byte, 17)
|
||||
msg[0] = byte('a')
|
||||
|
||||
digestB := genDigest(nd.challenge, currNd.Cookie) // FIXME: use his cookie, not mine
|
||||
digestB := genDigest(nd.challenge, currentND.Cookie) // FIXME: use his cookie, not mine
|
||||
|
||||
copy(msg[1:], digestB)
|
||||
return
|
||||
}
|
||||
|
||||
func (currNd *NodeDesc) compose_SEND_CHALENGE_REPLY(challenge uint32) (msg []byte) {
|
||||
func (currentND *NodeDesc) compose_SEND_CHALENGE_REPLY(challenge uint32) (msg []byte) {
|
||||
msg = make([]byte, 21)
|
||||
msg[0] = byte('r')
|
||||
|
||||
binary.BigEndian.PutUint32(msg[1:5], currNd.challenge)
|
||||
digest := genDigest(challenge, currNd.Cookie)
|
||||
binary.BigEndian.PutUint32(msg[1:5], currentND.challenge)
|
||||
digest := genDigest(challenge, currentND.Cookie)
|
||||
copy(msg[5:], digest)
|
||||
return
|
||||
}
|
||||
|
||||
func (currNd *NodeDesc) read_SEND_CHALLENGE_ACK(msg []byte) {
|
||||
currNd.state = CONNECTED
|
||||
func (currentND *NodeDesc) read_SEND_CHALLENGE_ACK(msg []byte) {
|
||||
currentND.state = CONNECTED
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -440,7 +452,7 @@ func (nd NodeDesc) Flags() (flags []string) {
|
|||
return
|
||||
}
|
||||
|
||||
func (currNd *NodeDesc) readTerm(r io.Reader) (t etf.Term, err error) {
|
||||
func (currentND *NodeDesc) readTerm(r io.Reader) (t etf.Term, err error) {
|
||||
b := make([]byte, 1)
|
||||
_, err = io.ReadFull(r, b)
|
||||
|
||||
|
@ -451,11 +463,11 @@ func (currNd *NodeDesc) readTerm(r io.Reader) (t etf.Term, err error) {
|
|||
err = fmt.Errorf("Not ETF: %d", b[0])
|
||||
return
|
||||
}
|
||||
t, err = currNd.term.Read(r)
|
||||
t, err = currentND.term.Read(r)
|
||||
return
|
||||
}
|
||||
|
||||
func (currNd *NodeDesc) readDist(r io.Reader) (err error) {
|
||||
func (currentND *NodeDesc) readDist(r io.Reader) (err error) {
|
||||
b := make([]byte, 1)
|
||||
_, err = io.ReadFull(r, b)
|
||||
|
||||
|
@ -466,15 +478,15 @@ func (currNd *NodeDesc) readDist(r io.Reader) (err error) {
|
|||
err = fmt.Errorf("Not dist header: %d", b[0])
|
||||
return
|
||||
}
|
||||
return currNd.term.ReadDist(r)
|
||||
return currentND.term.ReadDist(r)
|
||||
}
|
||||
|
||||
func (currNd *NodeDesc) readCtl(r io.Reader) (t etf.Term, err error) {
|
||||
t, err = currNd.term.Read(r)
|
||||
func (currentND *NodeDesc) readCtl(r io.Reader) (t etf.Term, err error) {
|
||||
t, err = currentND.term.Read(r)
|
||||
return
|
||||
}
|
||||
|
||||
func (currNd *NodeDesc) readMessage(r io.Reader) (t etf.Term, err error) {
|
||||
t, err = currNd.term.Read(r)
|
||||
func (currentND *NodeDesc) readMessage(r io.Reader) (t etf.Term, err error) {
|
||||
t, err = currentND.term.Read(r)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -1,15 +1,17 @@
|
|||
package dist
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"github.com/halturin/ergonode/lib"
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/halturin/ergonode/lib"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -49,7 +51,7 @@ type EPMD struct {
|
|||
response chan interface{}
|
||||
}
|
||||
|
||||
func (e *EPMD) Init(name string, listenport uint16, epmdport uint16, hidden bool) {
|
||||
func (e *EPMD) Init(ctx context.Context, name string, listenport uint16, epmdport uint16, hidden bool, disableServer bool) {
|
||||
ns := strings.Split(name, "@")
|
||||
if len(ns) != 2 {
|
||||
panic("FQDN for node name is required (example: node@hostname)")
|
||||
|
@ -74,9 +76,10 @@ func (e *EPMD) Init(name string, listenport uint16, epmdport uint16, hidden bool
|
|||
|
||||
go func(e *EPMD) {
|
||||
for {
|
||||
// trying to start embedded EPMD before we go further
|
||||
Server(epmdport)
|
||||
|
||||
if !disableServer {
|
||||
// trying to start embedded EPMD before we go further
|
||||
Server(ctx, epmdport)
|
||||
}
|
||||
dsn := net.JoinHostPort("", strconv.Itoa(int(epmdport)))
|
||||
conn, err := net.Dial("tcp", dsn)
|
||||
if err != nil {
|
||||
|
@ -117,7 +120,6 @@ func (e *EPMD) Init(name string, listenport uint16, epmdport uint16, hidden bool
|
|||
|
||||
func (e *EPMD) ResolvePort(name string) (int, error) {
|
||||
ns := strings.Split(name, "@")
|
||||
|
||||
conn, err := net.Dial("tcp", net.JoinHostPort(ns[1], fmt.Sprintf("%d", e.PortEMPD)))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
|
@ -193,12 +195,12 @@ type nodeinfo struct {
|
|||
Extra []byte
|
||||
}
|
||||
|
||||
type epmdsrv struct {
|
||||
type embeddedEPMDserver struct {
|
||||
portmap map[string]*nodeinfo
|
||||
mtx sync.RWMutex
|
||||
}
|
||||
|
||||
func (e *epmdsrv) Join(name string, info *nodeinfo) bool {
|
||||
func (e *embeddedEPMDserver) Join(name string, info *nodeinfo) bool {
|
||||
|
||||
e.mtx.Lock()
|
||||
defer e.mtx.Unlock()
|
||||
|
@ -212,7 +214,7 @@ func (e *epmdsrv) Join(name string, info *nodeinfo) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func (e *epmdsrv) Get(name string) *nodeinfo {
|
||||
func (e *embeddedEPMDserver) Get(name string) *nodeinfo {
|
||||
e.mtx.RLock()
|
||||
defer e.mtx.RUnlock()
|
||||
if info, ok := e.portmap[name]; ok {
|
||||
|
@ -221,7 +223,7 @@ func (e *epmdsrv) Get(name string) *nodeinfo {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (e *epmdsrv) Leave(name string) {
|
||||
func (e *embeddedEPMDserver) Leave(name string) {
|
||||
lib.Log("EPMD unregistering node: '%s'", name)
|
||||
|
||||
e.mtx.Lock()
|
||||
|
@ -229,7 +231,7 @@ func (e *epmdsrv) Leave(name string) {
|
|||
e.mtx.Unlock()
|
||||
}
|
||||
|
||||
func (e *epmdsrv) ListAll() map[string]uint16 {
|
||||
func (e *embeddedEPMDserver) ListAll() map[string]uint16 {
|
||||
e.mtx.Lock()
|
||||
lst := make(map[string]uint16)
|
||||
for k, v := range e.portmap {
|
||||
|
@ -239,23 +241,17 @@ func (e *epmdsrv) ListAll() map[string]uint16 {
|
|||
return lst
|
||||
}
|
||||
|
||||
var epmdserver *epmdsrv
|
||||
func Server(ctx context.Context, port uint16) error {
|
||||
|
||||
func Server(port uint16) error {
|
||||
|
||||
if epmdserver != nil {
|
||||
// already started
|
||||
return fmt.Errorf("Already started")
|
||||
}
|
||||
|
||||
epmd, err := net.Listen("tcp", net.JoinHostPort("", strconv.Itoa(int(port))))
|
||||
lc := net.ListenConfig{}
|
||||
epmd, err := lc.Listen(ctx, "tcp", net.JoinHostPort("", strconv.Itoa(int(port))))
|
||||
if err != nil {
|
||||
lib.Log("Can't start embedded EPMD service: %s", err)
|
||||
return fmt.Errorf("Can't start embedded EPMD service: %s", err)
|
||||
|
||||
}
|
||||
|
||||
epmdserver = &epmdsrv{
|
||||
epmdServer := &embeddedEPMDserver{
|
||||
portmap: make(map[string]*nodeinfo),
|
||||
}
|
||||
|
||||
|
@ -281,7 +277,7 @@ func Server(port uint16) error {
|
|||
lib.Log("Request from EPMD client: %v", buf[:n])
|
||||
if err != nil {
|
||||
if name != "" {
|
||||
epmdserver.Leave(name)
|
||||
epmdServer.Leave(name)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -292,7 +288,7 @@ func Server(port uint16) error {
|
|||
|
||||
switch buf[2] {
|
||||
case EPMD_ALIVE2_REQ:
|
||||
reply, registered := compose_ALIVE2_RESP(buf[3:n])
|
||||
reply, registered := epmdServer.compose_ALIVE2_RESP(buf[3:n])
|
||||
c.Write(reply)
|
||||
if registered == "" {
|
||||
return
|
||||
|
@ -305,10 +301,10 @@ func Server(port uint16) error {
|
|||
}
|
||||
continue
|
||||
case EPMD_PORT_PLEASE2_REQ:
|
||||
c.Write(compose_EPMD_PORT2_RESP(buf[3:n]))
|
||||
c.Write(epmdServer.compose_EPMD_PORT2_RESP(buf[3:n]))
|
||||
return
|
||||
case EPMD_NAMES_REQ:
|
||||
c.Write(compose_EPMD_NAMES_RESP(port, buf[3:n]))
|
||||
c.Write(epmdServer.compose_EPMD_NAMES_RESP(port, buf[3:n]))
|
||||
return
|
||||
default:
|
||||
lib.Log("unknown EPMD request")
|
||||
|
@ -324,7 +320,7 @@ func Server(port uint16) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func compose_ALIVE2_RESP(req []byte) ([]byte, string) {
|
||||
func (e *embeddedEPMDserver) compose_ALIVE2_RESP(req []byte) ([]byte, string) {
|
||||
|
||||
hidden := false //
|
||||
if req[2] == 72 {
|
||||
|
@ -345,7 +341,7 @@ func compose_ALIVE2_RESP(req []byte) ([]byte, string) {
|
|||
reply[0] = EPMD_ALIVE2_RESP
|
||||
|
||||
registered := ""
|
||||
if epmdserver.Join(name, &info) {
|
||||
if e.Join(name, &info) {
|
||||
reply[1] = 0
|
||||
registered = name
|
||||
} else {
|
||||
|
@ -357,9 +353,9 @@ func compose_ALIVE2_RESP(req []byte) ([]byte, string) {
|
|||
return reply, registered
|
||||
}
|
||||
|
||||
func compose_EPMD_PORT2_RESP(req []byte) []byte {
|
||||
func (e *embeddedEPMDserver) compose_EPMD_PORT2_RESP(req []byte) []byte {
|
||||
name := string(req)
|
||||
info := epmdserver.Get(name)
|
||||
info := e.Get(name)
|
||||
|
||||
if info == nil {
|
||||
// not found
|
||||
|
@ -391,14 +387,14 @@ func compose_EPMD_PORT2_RESP(req []byte) []byte {
|
|||
return reply
|
||||
}
|
||||
|
||||
func compose_EPMD_NAMES_RESP(port uint16, req []byte) []byte {
|
||||
func (e *embeddedEPMDserver) compose_EPMD_NAMES_RESP(port uint16, req []byte) []byte {
|
||||
// io:format("name ~ts at port ~p~n", [NodeName, Port]).
|
||||
var str strings.Builder
|
||||
var s string
|
||||
var portbuf [4]byte
|
||||
binary.BigEndian.PutUint32(portbuf[0:4], uint32(port))
|
||||
str.WriteString(string(portbuf[0:]))
|
||||
for h, p := range epmdserver.ListAll() {
|
||||
for h, p := range e.ListAll() {
|
||||
s = fmt.Sprintf("name %s at port %d\n", h, p)
|
||||
str.WriteString(s)
|
||||
}
|
||||
|
|
591
ergonode.go
591
ergonode.go
|
@ -1,591 +0,0 @@
|
|||
// Copyright 2012-2013 Metachord Ltd.
|
||||
// All rights reserved.
|
||||
// Use of this source code is governed by a MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
package ergonode
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/halturin/ergonode/dist"
|
||||
"github.com/halturin/ergonode/etf"
|
||||
"github.com/halturin/ergonode/lib"
|
||||
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type regReq struct {
|
||||
replyTo chan etf.Pid
|
||||
channels procChannels
|
||||
}
|
||||
|
||||
type regNameReq struct {
|
||||
name etf.Atom
|
||||
pid etf.Pid
|
||||
}
|
||||
|
||||
type unregNameReq struct {
|
||||
name etf.Atom
|
||||
}
|
||||
|
||||
type registryChan struct {
|
||||
storeChan chan regReq
|
||||
regNameChan chan regNameReq
|
||||
unregNameChan chan unregNameReq
|
||||
}
|
||||
|
||||
type nodeConn struct {
|
||||
conn net.Conn
|
||||
wchan chan []etf.Term
|
||||
}
|
||||
|
||||
type systemProcs struct {
|
||||
netKernel *netKernel
|
||||
globalNameServer *globalNameServer
|
||||
rpcRex *rpcRex
|
||||
}
|
||||
|
||||
type Node struct {
|
||||
dist.EPMD
|
||||
epmdreply chan interface{}
|
||||
Cookie string
|
||||
registry *registryChan
|
||||
channels map[etf.Pid]procChannels
|
||||
registered map[etf.Atom]etf.Pid
|
||||
connections map[etf.Atom]nodeConn
|
||||
sysProcs systemProcs
|
||||
monitors map[etf.Atom][]etf.Pid // node monitors
|
||||
monitorsP map[etf.Pid][]etf.Pid // process monitors
|
||||
procID uint32
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
type procChannels struct {
|
||||
in chan etf.Term
|
||||
inFrom chan etf.Tuple
|
||||
init chan bool
|
||||
}
|
||||
|
||||
// Behaviour interface contains methods you should implement to make own process behaviour
|
||||
type Behaviour interface {
|
||||
ProcessLoop(pcs procChannels, pd Process, args ...interface{}) // method which implements control flow of process
|
||||
}
|
||||
|
||||
// Process interface contains methods which should be implemented in each process
|
||||
type Process interface {
|
||||
Options() (options map[string]interface{}) // method returns process-related options
|
||||
setNode(node *Node) // method set pointer to Node structure
|
||||
setPid(pid etf.Pid) // method set pid of started process
|
||||
}
|
||||
|
||||
// Create create new node context with specified name and cookie string
|
||||
func Create(name string, cookie string, ports ...uint16) (node *Node) {
|
||||
var listenRangeBegin uint16 = 15000
|
||||
var listenRangeEnd uint16 = 65000
|
||||
var hidden bool = false
|
||||
var portEPMD uint16 = 4369
|
||||
var listenPort uint16 = 0
|
||||
var listener net.Listener
|
||||
|
||||
lib.Log("Start with name '%s' and cookie '%s'", name, cookie)
|
||||
|
||||
switch len(ports) {
|
||||
case 0:
|
||||
// use defaults
|
||||
case 1:
|
||||
listenRangeBegin = ports[0]
|
||||
case 2:
|
||||
listenRangeBegin = ports[0]
|
||||
listenRangeEnd = ports[1]
|
||||
if listenRangeBegin-listenRangeEnd < 0 {
|
||||
panic("Wrong port arguments")
|
||||
}
|
||||
case 3:
|
||||
listenRangeBegin = ports[0]
|
||||
listenRangeEnd = ports[1]
|
||||
if listenRangeBegin-listenRangeEnd < 0 {
|
||||
panic("Wrong port arguments")
|
||||
}
|
||||
portEPMD = ports[2]
|
||||
|
||||
default:
|
||||
panic("Wrong port arguments")
|
||||
}
|
||||
|
||||
lib.Log("Listening range: %d...%d", listenRangeBegin, listenRangeEnd)
|
||||
if portEPMD != 4369 {
|
||||
lib.Log("Using custom EPMD port: %d", portEPMD)
|
||||
}
|
||||
|
||||
for p := listenRangeBegin; p <= listenRangeEnd; p++ {
|
||||
l, err := net.Listen("tcp", net.JoinHostPort("", strconv.Itoa(int(p))))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
listenPort = p
|
||||
listener = l
|
||||
break
|
||||
}
|
||||
|
||||
if listenPort == 0 {
|
||||
panic("Can't listen port")
|
||||
}
|
||||
|
||||
registry := ®istryChan{
|
||||
storeChan: make(chan regReq),
|
||||
regNameChan: make(chan regNameReq),
|
||||
unregNameChan: make(chan unregNameReq),
|
||||
}
|
||||
|
||||
epmd := dist.EPMD{}
|
||||
epmd.Init(name, listenPort, portEPMD, hidden)
|
||||
|
||||
node = &Node{
|
||||
EPMD: epmd,
|
||||
Cookie: cookie,
|
||||
registry: registry,
|
||||
channels: make(map[etf.Pid]procChannels),
|
||||
registered: make(map[etf.Atom]etf.Pid),
|
||||
connections: make(map[etf.Atom]nodeConn),
|
||||
monitors: make(map[etf.Atom][]etf.Pid),
|
||||
monitorsP: make(map[etf.Pid][]etf.Pid),
|
||||
procID: 1,
|
||||
}
|
||||
|
||||
go func() {
|
||||
for {
|
||||
c, err := listener.Accept()
|
||||
lib.Log("Accepted new connection from %s", c.RemoteAddr().String())
|
||||
if err != nil {
|
||||
lib.Log(err.Error())
|
||||
} else {
|
||||
node.run(c, false)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
go node.registrator()
|
||||
|
||||
node.sysProcs.netKernel = new(netKernel)
|
||||
node.Spawn(node.sysProcs.netKernel)
|
||||
|
||||
node.sysProcs.globalNameServer = new(globalNameServer)
|
||||
node.Spawn(node.sysProcs.globalNameServer)
|
||||
|
||||
node.sysProcs.rpcRex = new(rpcRex)
|
||||
node.Spawn(node.sysProcs.rpcRex)
|
||||
|
||||
return node
|
||||
}
|
||||
|
||||
// Spawn create new process and store its identificator in table at current node
|
||||
func (n *Node) Spawn(pd Process, args ...interface{}) (pid etf.Pid) {
|
||||
options := pd.Options()
|
||||
chanSize, ok := options["chan-size"].(int)
|
||||
if !ok {
|
||||
chanSize = 100
|
||||
}
|
||||
|
||||
in := make(chan etf.Term, chanSize)
|
||||
inFrom := make(chan etf.Tuple, chanSize)
|
||||
initCh := make(chan bool)
|
||||
pcs := procChannels{
|
||||
in: in,
|
||||
inFrom: inFrom,
|
||||
init: initCh,
|
||||
}
|
||||
pid = n.storeProcess(pcs)
|
||||
pd.setNode(n)
|
||||
pd.setPid(pid)
|
||||
go pd.(Behaviour).ProcessLoop(pcs, pd, args...)
|
||||
<-initCh
|
||||
return
|
||||
}
|
||||
|
||||
// Register associates the name with pid
|
||||
func (n *Node) Register(name etf.Atom, pid etf.Pid) {
|
||||
r := regNameReq{name: name, pid: pid}
|
||||
n.registry.regNameChan <- r
|
||||
}
|
||||
|
||||
// Unregister removes the registered name
|
||||
func (n *Node) Unregister(name etf.Atom) {
|
||||
r := unregNameReq{name: name}
|
||||
n.registry.unregNameChan <- r
|
||||
}
|
||||
|
||||
// Registered returns a list of names which have been registered using Register
|
||||
func (n *Node) Registered() (pids []etf.Atom) {
|
||||
pids = make([]etf.Atom, len(n.registered))
|
||||
i := 0
|
||||
for p, _ := range n.registered {
|
||||
pids[i] = p
|
||||
i++
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (n *Node) registrator() {
|
||||
for {
|
||||
select {
|
||||
case req := <-n.registry.storeChan:
|
||||
var pid etf.Pid
|
||||
pid.Node = etf.Atom(n.FullName)
|
||||
pid.Id = n.getProcID()
|
||||
pid.Serial = 1
|
||||
pid.Creation = byte(n.Creation)
|
||||
|
||||
n.channels[pid] = req.channels
|
||||
req.replyTo <- pid
|
||||
case req := <-n.registry.regNameChan:
|
||||
n.registered[req.name] = req.pid
|
||||
case req := <-n.registry.unregNameChan:
|
||||
delete(n.registered, req.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) storeProcess(chs procChannels) (pid etf.Pid) {
|
||||
myChan := make(chan etf.Pid)
|
||||
n.registry.storeChan <- regReq{replyTo: myChan, channels: chs}
|
||||
pid = <-myChan
|
||||
return pid
|
||||
}
|
||||
|
||||
func (n *Node) getProcID() (s uint32) {
|
||||
|
||||
n.lock.Lock()
|
||||
defer n.lock.Unlock()
|
||||
|
||||
s = n.procID
|
||||
n.procID += 1
|
||||
return
|
||||
}
|
||||
|
||||
func (n *Node) run(c net.Conn, negotiate bool) {
|
||||
|
||||
var currNd *dist.NodeDesc
|
||||
|
||||
if negotiate {
|
||||
currNd = dist.NewNodeDesc(n.FullName, n.Cookie, false, c)
|
||||
} else {
|
||||
currNd = dist.NewNodeDesc(n.FullName, n.Cookie, false, nil)
|
||||
}
|
||||
|
||||
wchan := make(chan []etf.Term, 10)
|
||||
// run writer routine
|
||||
go func() {
|
||||
for {
|
||||
terms := <-wchan
|
||||
err := currNd.WriteMessage(c, terms)
|
||||
if err != nil {
|
||||
lib.Log("Enode error (writing): %s", err.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
c.Close()
|
||||
n.lock.Lock()
|
||||
n.handle_monitors_node(currNd.GetRemoteName())
|
||||
delete(n.connections, currNd.GetRemoteName())
|
||||
n.lock.Unlock()
|
||||
}()
|
||||
|
||||
go func() {
|
||||
for {
|
||||
terms, err := currNd.ReadMessage(c)
|
||||
if err != nil {
|
||||
lib.Log("Enode error (reading): %s", err.Error())
|
||||
break
|
||||
}
|
||||
n.handleTerms(c, wchan, terms)
|
||||
}
|
||||
c.Close()
|
||||
n.lock.Lock()
|
||||
n.handle_monitors_node(currNd.GetRemoteName())
|
||||
delete(n.connections, currNd.GetRemoteName())
|
||||
n.lock.Unlock()
|
||||
}()
|
||||
|
||||
<-currNd.Ready
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (n *Node) handleTerms(c net.Conn, wchan chan []etf.Term, terms []etf.Term) {
|
||||
lib.Log("Node terms: %#v", terms)
|
||||
|
||||
if len(terms) == 0 {
|
||||
return
|
||||
}
|
||||
switch t := terms[0].(type) {
|
||||
case etf.Tuple:
|
||||
if len(t) > 0 {
|
||||
switch act := t.Element(1).(type) {
|
||||
case int:
|
||||
switch act {
|
||||
case REG_SEND:
|
||||
if len(terms) == 2 {
|
||||
n.route(t.Element(2), t.Element(4), terms[1])
|
||||
} else {
|
||||
lib.Log("*** ERROR: bad REG_SEND: %#v", terms)
|
||||
}
|
||||
case SEND:
|
||||
n.route(nil, t.Element(3), terms[1])
|
||||
|
||||
// Not implemented yet, just stubs. TODO.
|
||||
case LINK:
|
||||
lib.Log("LINK message (act %d): %#v", act, t)
|
||||
case UNLINK:
|
||||
lib.Log("UNLINK message (act %d): %#v", act, t)
|
||||
case NODE_LINK:
|
||||
lib.Log("NODE_LINK message (act %d): %#v", act, t)
|
||||
case EXIT:
|
||||
lib.Log("EXIT message (act %d): %#v", act, t)
|
||||
case EXIT2:
|
||||
lib.Log("EXIT2 message (act %d): %#v", act, t)
|
||||
case MONITOR:
|
||||
lib.Log("MONITOR message (act %d): %#v", act, t)
|
||||
case DEMONITOR:
|
||||
lib.Log("DEMONITOR message (act %d): %#v", act, t)
|
||||
case MONITOR_EXIT:
|
||||
lib.Log("MONITOR_EXIT message (act %d): %#v", act, t)
|
||||
|
||||
// {'DOWN',#Ref<0.0.13893633.237772>,process,<26194.4.1>,reason}
|
||||
M := etf.Term(etf.Tuple{etf.Atom("DOWN"),
|
||||
t.Element(3), etf.Atom("process"),
|
||||
t.Element(2), t.Element(5)})
|
||||
|
||||
n.route(t.Element(2), t.Element(3), M)
|
||||
|
||||
default:
|
||||
lib.Log("Unhandled node message (act %d): %#v", act, t)
|
||||
}
|
||||
case etf.Atom:
|
||||
switch act {
|
||||
case etf.Atom("$connection"):
|
||||
lib.Log("SET NODE %#v", t)
|
||||
n.lock.Lock()
|
||||
n.connections[t[1].(etf.Atom)] = nodeConn{conn: c, wchan: wchan}
|
||||
n.lock.Unlock()
|
||||
|
||||
// currNd.Ready channel waiting for registration of this connection
|
||||
ready := (t[2]).(chan bool)
|
||||
ready <- true
|
||||
}
|
||||
default:
|
||||
lib.Log("UNHANDLED ACT: %#v", t.Element(1))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// route incomming message to registered (with sender 'from' value)
|
||||
func (n *Node) route(from, to etf.Term, message etf.Term) {
|
||||
var toPid etf.Pid
|
||||
switch tp := to.(type) {
|
||||
case etf.Pid:
|
||||
toPid = tp
|
||||
case etf.Atom:
|
||||
toPid, _ = n.registered[tp]
|
||||
}
|
||||
pcs := n.channels[toPid]
|
||||
if from == nil {
|
||||
lib.Log("SEND: To: %#v, Message: %#v", to, message)
|
||||
pcs.in <- message
|
||||
} else {
|
||||
lib.Log("REG_SEND: (%#v )From: %#v, To: %#v, Message: %#v", pcs.inFrom, from, to, message)
|
||||
pcs.inFrom <- etf.Tuple{from, message}
|
||||
}
|
||||
}
|
||||
|
||||
// Send making outgoing message
|
||||
func (n *Node) Send(from interface{}, to interface{}, message *etf.Term) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = errors.New(fmt.Sprint(r))
|
||||
}
|
||||
}()
|
||||
|
||||
switch tto := to.(type) {
|
||||
case etf.Pid:
|
||||
n.sendbyPid(tto, message)
|
||||
case etf.Tuple:
|
||||
if len(tto) == 2 {
|
||||
// causes panic if casting to etf.Atom goes wrong
|
||||
if tto[0].(etf.Atom) == tto[1].(etf.Atom) {
|
||||
// just stub.
|
||||
}
|
||||
n.sendbyTuple(from.(etf.Pid), tto, message)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Node) sendbyPid(to etf.Pid, message *etf.Term) {
|
||||
var conn nodeConn
|
||||
var exists bool
|
||||
lib.Log("Send (via PID): %#v, %#v", to, message)
|
||||
if string(to.Node) == n.FullName {
|
||||
lib.Log("Send to local node")
|
||||
pcs := n.channels[to]
|
||||
pcs.in <- *message
|
||||
} else {
|
||||
|
||||
lib.Log("Send to remote node: %#v, %#v", to, n.connections[to.Node])
|
||||
|
||||
if conn, exists = n.connections[to.Node]; !exists {
|
||||
lib.Log("Send (via PID): create new connection (%s)", to.Node)
|
||||
if err := connect(n, to.Node); err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
conn, _ = n.connections[to.Node]
|
||||
}
|
||||
|
||||
msg := []etf.Term{etf.Tuple{SEND, etf.Atom(""), to}, *message}
|
||||
conn.wchan <- msg
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) sendbyTuple(from etf.Pid, to etf.Tuple, message *etf.Term) {
|
||||
var conn nodeConn
|
||||
var exists bool
|
||||
lib.Log("Send (via NAME): %#v, %#v", to, message)
|
||||
|
||||
// to = {processname, 'nodename@hostname'}
|
||||
|
||||
if conn, exists = n.connections[to[1].(etf.Atom)]; !exists {
|
||||
lib.Log("Send (via NAME): create new connection (%s)", to[1])
|
||||
if err := connect(n, to[1].(etf.Atom)); err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
conn, _ = n.connections[to[1].(etf.Atom)]
|
||||
}
|
||||
|
||||
msg := []etf.Term{etf.Tuple{REG_SEND, from, etf.Atom(""), to[0]}, *message}
|
||||
conn.wchan <- msg
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (n *Node) Monitor(by etf.Pid, to etf.Pid) {
|
||||
var conn nodeConn
|
||||
var exists bool
|
||||
|
||||
if string(to.Node) == n.FullName {
|
||||
lib.Log("Monitor local PID: %#v by %#v", to, by)
|
||||
|
||||
pcs := n.channels[to]
|
||||
msg := []etf.Term{etf.Tuple{MONITOR, by, to, n.MakeRef()}}
|
||||
pcs.in <- msg
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
lib.Log("Monitor remote PID: %#v by %#v", to, by)
|
||||
|
||||
if conn, exists = n.connections[to.Node]; !exists {
|
||||
lib.Log("Send (via PID): create new connection (%s)", to.Node)
|
||||
if err := connect(n, to.Node); err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
conn, _ = n.connections[to.Node]
|
||||
}
|
||||
|
||||
msg := []etf.Term{etf.Tuple{MONITOR, by, to, n.MakeRef()}}
|
||||
conn.wchan <- msg
|
||||
}
|
||||
|
||||
func (n *Node) MonitorNode(by etf.Pid, node etf.Atom, flag bool) {
|
||||
var exists bool
|
||||
var monitors []etf.Pid
|
||||
|
||||
lib.Log("Monitor node: %#v by %#v", node, by)
|
||||
if _, exists = n.connections[node]; !exists {
|
||||
lib.Log("... connecting to %#v", node)
|
||||
if err := connect(n, node); err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
monitors = n.monitors[node]
|
||||
|
||||
if !flag {
|
||||
lib.Log("... removing monitor: %#v by %#v", node, by)
|
||||
monitors = removePid(monitors, by)
|
||||
} else {
|
||||
lib.Log("... setting up monitor: %#v by %#v", node, by)
|
||||
// DUE TO...
|
||||
|
||||
// http://erlang.org/doc/man/erlang.html#monitor_node-2
|
||||
// Making several calls to monitor_node(Node, true) for the same Node is not an error;
|
||||
// it results in as many independent monitoring instances.
|
||||
|
||||
// DO NOT CHECK for existing this pid in the list, just add one more
|
||||
monitors = append(monitors, by)
|
||||
}
|
||||
|
||||
n.monitors[node] = monitors
|
||||
lib.Log("Monitors for node (%#v): %#v", node, monitors)
|
||||
|
||||
}
|
||||
|
||||
func (n *Node) handle_monitors_node(node etf.Atom) {
|
||||
lib.Log("Node (%#v) is down. Send it to %#v", node, n.monitors[node])
|
||||
for _, pid := range n.monitors[node] {
|
||||
pcs := n.channels[pid]
|
||||
msg := etf.Term(etf.Tuple{etf.Atom("nodedown"), node})
|
||||
pcs.in <- msg
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) MakeRef() (ref etf.Ref) {
|
||||
ref.Node = etf.Atom(n.FullName)
|
||||
ref.Creation = 1
|
||||
|
||||
nt := time.Now().UnixNano()
|
||||
id1 := uint32(uint64(nt) & ((2 << 17) - 1))
|
||||
id2 := uint32(uint64(nt) >> 46)
|
||||
ref.Id = []uint32{id1, id2, 0}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func connect(n *Node, to etf.Atom) error {
|
||||
|
||||
var port int
|
||||
var err error
|
||||
|
||||
if port, err = n.ResolvePort(string(to)); port < 0 {
|
||||
return fmt.Errorf("Can't resolve port: %s", err)
|
||||
}
|
||||
ns := strings.Split(string(to), "@")
|
||||
|
||||
c, err := net.Dial("tcp", net.JoinHostPort(ns[1], strconv.Itoa(int(port))))
|
||||
if err != nil {
|
||||
lib.Log("Error calling net.Dial : %s", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
if tcp, ok := c.(*net.TCPConn); ok {
|
||||
tcp.SetKeepAlive(true)
|
||||
}
|
||||
|
||||
n.run(c, true)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func removePid(pids []etf.Pid, pid etf.Pid) []etf.Pid {
|
||||
for i, p := range pids {
|
||||
if p == pid {
|
||||
return append(pids[:i], pids[i+1:]...)
|
||||
}
|
||||
}
|
||||
return pids
|
||||
}
|
|
@ -0,0 +1,167 @@
|
|||
package ergonode
|
||||
|
||||
// TODO: https://github.com/erlang/otp/blob/master/lib/runtime_tools-1.13.1/src/erlang_info.erl
|
||||
|
||||
import (
|
||||
"github.com/halturin/ergonode/etf"
|
||||
"github.com/halturin/ergonode/lib"
|
||||
)
|
||||
|
||||
type erlang struct {
|
||||
GenServer
|
||||
}
|
||||
|
||||
type erlangState struct {
|
||||
process *Process
|
||||
}
|
||||
|
||||
// Init initializes process state using arbitrary arguments
|
||||
// Init -> state
|
||||
func (e *erlang) Init(p *Process, args ...interface{}) interface{} {
|
||||
lib.Log("ERLANG: Init: %#v", args)
|
||||
return erlangState{
|
||||
process: p,
|
||||
}
|
||||
}
|
||||
|
||||
// HandleCast -> ("noreply", state) - noreply
|
||||
// ("stop", reason) - stop with reason
|
||||
func (e *erlang) HandleCast(message etf.Term, state interface{}) (string, interface{}) {
|
||||
var newState erlangState = state.(erlangState)
|
||||
lib.Log("ERLANG: HandleCast: %#v", message)
|
||||
return "noreply", newState
|
||||
}
|
||||
|
||||
// HandleCall serves incoming messages sending via gen_server:call
|
||||
// HandleCall -> ("reply", message, state) - reply
|
||||
// ("noreply", _, state) - noreply
|
||||
// ("stop", reason, _) - normal stop
|
||||
func (e *erlang) HandleCall(from etf.Tuple, message etf.Term, state interface{}) (string, etf.Term, interface{}) {
|
||||
lib.Log("ERLANG: HandleCall: %#v, From: %#v", message, from)
|
||||
var newState erlangState = state.(erlangState)
|
||||
|
||||
switch m := message.(type) {
|
||||
case etf.Tuple:
|
||||
switch m.Element(1) {
|
||||
case etf.Atom("process_info"):
|
||||
args := m.Element(2).(etf.List)
|
||||
reply := processInfo(newState.process, args[0].(etf.Pid), args[1])
|
||||
return "reply", reply, state
|
||||
case etf.Atom("system_info"):
|
||||
args := m.Element(2).(etf.List)
|
||||
reply := systemInfo(newState.process, args[0].(etf.Atom))
|
||||
return "reply", reply, state
|
||||
|
||||
case etf.Atom("function_exported"):
|
||||
return "reply", true, state
|
||||
}
|
||||
|
||||
}
|
||||
return "reply", etf.Atom("ok"), state
|
||||
}
|
||||
|
||||
// HandleInfo serves all another incoming messages (Pid ! message)
|
||||
// HandleInfo -> ("noreply", state) - noreply
|
||||
// ("stop", reason) - normal stop
|
||||
func (e *erlang) HandleInfo(message etf.Term, state interface{}) (string, interface{}) {
|
||||
lib.Log("ERLANG: HandleInfo: %#v", message)
|
||||
return "noreply", "normal"
|
||||
}
|
||||
|
||||
// Terminate called when process died
|
||||
func (e *erlang) Terminate(reason string, state interface{}) {
|
||||
lib.Log("ERLANG: Terminate: %#v", reason)
|
||||
}
|
||||
|
||||
func processInfo(p *Process, pid etf.Pid, property etf.Term) etf.Term {
|
||||
process := p.Node.GetProcessByPid(pid)
|
||||
if process == nil {
|
||||
return etf.Atom("undefined")
|
||||
}
|
||||
|
||||
switch property {
|
||||
case etf.Atom("registered_name"):
|
||||
name := process.Name()
|
||||
if name == "" {
|
||||
return etf.List{}
|
||||
}
|
||||
|
||||
return etf.Tuple{property, etf.Atom(name)}
|
||||
case etf.Atom("messages"):
|
||||
return etf.Tuple{property, etf.List{}}
|
||||
case etf.Atom("dictionary"):
|
||||
return etf.Tuple{property, etf.List{}}
|
||||
case etf.Atom("current_stacktrace"):
|
||||
return etf.Tuple{property, etf.List{}}
|
||||
}
|
||||
|
||||
switch p := property.(type) {
|
||||
case etf.List:
|
||||
values := etf.List{}
|
||||
info := process.Info()
|
||||
for i := range p {
|
||||
switch p[i] {
|
||||
case etf.Atom("binary"):
|
||||
values = append(values, etf.Tuple{p[i], etf.List{}})
|
||||
case etf.Atom("catchlevel"):
|
||||
// values = append(values, etf.Tuple{p[i], 0})
|
||||
case etf.Atom("current_function"):
|
||||
values = append(values, etf.Tuple{p[i], info.CurrentFunction})
|
||||
case etf.Atom("error_handler"):
|
||||
// values = append(values, etf.Tuple{p[i], })
|
||||
case etf.Atom("garbage_collection"):
|
||||
values = append(values, etf.Tuple{p[i], etf.List{}})
|
||||
case etf.Atom("group_leader"):
|
||||
values = append(values, etf.Tuple{p[i], info.GroupLeader})
|
||||
case etf.Atom("heap_size"):
|
||||
// values = append(values, etf.Tuple{p[i], etf.Tuple{etf.Atom("words"), 0}})
|
||||
case etf.Atom("initial_call"):
|
||||
values = append(values, etf.Tuple{p[i], "object:loop"})
|
||||
case etf.Atom("last_calls"):
|
||||
// values = append(values, etf.Tuple{p[i], })
|
||||
case etf.Atom("links"):
|
||||
values = append(values, etf.Tuple{p[i], info.Links})
|
||||
case etf.Atom("memory"):
|
||||
values = append(values, etf.Tuple{p[i], 0})
|
||||
case etf.Atom("message_queue_len"):
|
||||
values = append(values, etf.Tuple{p[i], info.MessageQueueLen})
|
||||
case etf.Atom("monitored_by"):
|
||||
values = append(values, etf.Tuple{p[i], info.MonitoredBy})
|
||||
case etf.Atom("monitors"):
|
||||
values = append(values, etf.Tuple{p[i], info.Monitors})
|
||||
case etf.Atom("priority"):
|
||||
// values = append(values, etf.Tuple{p[i], 0})
|
||||
case etf.Atom("reductions"):
|
||||
values = append(values, etf.Tuple{p[i], info.Reductions})
|
||||
case etf.Atom("registered_name"):
|
||||
values = append(values, etf.Tuple{p[i], process.Name()})
|
||||
case etf.Atom("sequential_trace_token"):
|
||||
// values = append(values, etf.Tuple{p[i], })
|
||||
case etf.Atom("stack_size"):
|
||||
// values = append(values, etf.Tuple{p[i], etf.Tuple{etf.Atom("words"), 0}})
|
||||
case etf.Atom("status"):
|
||||
values = append(values, etf.Tuple{p[i], info.Status})
|
||||
case etf.Atom("suspending"):
|
||||
// values = append(values, etf.Tuple{p[i], })
|
||||
case etf.Atom("total_heap_size"):
|
||||
// values = append(values, etf.Tuple{p[i], etf.Tuple{etf.Atom("words"), 0}})
|
||||
case etf.Atom("trace"):
|
||||
// values = append(values, etf.Tuple{p[i], 0})
|
||||
case etf.Atom("trap_exit"):
|
||||
values = append(values, etf.Tuple{p[i], info.TrapExit})
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
return values
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func systemInfo(p *Process, name etf.Atom) etf.Term {
|
||||
switch name {
|
||||
case etf.Atom("dirty_cpu_schedulers"):
|
||||
return 1
|
||||
}
|
||||
return etf.Atom("unknown")
|
||||
}
|
18
etf/etf.go
18
etf/etf.go
|
@ -1,8 +1,8 @@
|
|||
package etf
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
@ -63,6 +63,7 @@ type Function struct {
|
|||
|
||||
var (
|
||||
MapType = reflect.TypeOf(Map{})
|
||||
hasher = fnv.New32a()
|
||||
)
|
||||
|
||||
func StringTerm(t Term) (s string, ok bool) {
|
||||
|
@ -159,7 +160,6 @@ var tagNames = map[byte]string{
|
|||
ettString: "STRING_EXT",
|
||||
}
|
||||
|
||||
|
||||
func (m Map) Element(k Term) Term {
|
||||
return m[k]
|
||||
}
|
||||
|
@ -172,6 +172,11 @@ func (t Tuple) Element(i int) Term {
|
|||
return t[i-1]
|
||||
}
|
||||
|
||||
func (p Pid) Str() string {
|
||||
hasher.Write([]byte(p.Node))
|
||||
defer hasher.Reset()
|
||||
return fmt.Sprintf("<%X.%d.%d>", hasher.Sum32(), p.Id, p.Serial)
|
||||
}
|
||||
func tagName(t byte) (name string) {
|
||||
name = tagNames[t]
|
||||
if name == "" {
|
||||
|
@ -407,12 +412,3 @@ type InvalidStructKeyError struct {
|
|||
func (s *InvalidStructKeyError) Error() string {
|
||||
return fmt.Sprintf("Cannot use %s as struct field name", reflect.TypeOf(s.Term).Name())
|
||||
}
|
||||
|
||||
func (m Map) MarshalJSON() ([]byte, error) {
|
||||
var v map[string]interface{}
|
||||
v = make(map[string]interface{}, len(m))
|
||||
for key, val := range m {
|
||||
v[key.(string)] = val
|
||||
}
|
||||
return json.Marshal(v)
|
||||
}
|
||||
|
|
|
@ -316,7 +316,7 @@ func TestReadInt(t *testing.T) {
|
|||
t.Error(err)
|
||||
} else if l := in.Len(); l != 0 {
|
||||
t.Errorf("buffer len %d", l)
|
||||
} else if exp := int(0); v != exp {
|
||||
} else if exp := int64(0); v != exp {
|
||||
t.Errorf("expected %v, got %v", exp, v)
|
||||
}
|
||||
}
|
||||
|
|
10
etf/write.go
10
etf/write.go
|
@ -31,7 +31,7 @@ func (c *Context) Write(w io.Writer, term interface{}) (err error) {
|
|||
case *big.Int:
|
||||
err = c.writeBigInt(w, v)
|
||||
case string:
|
||||
err = c.writeBinary(w, []byte(v))
|
||||
err = c.writeString(w, v)
|
||||
case []byte:
|
||||
err = c.writeBinary(w, v)
|
||||
case float64:
|
||||
|
@ -230,19 +230,17 @@ func (c *Context) writePid(w io.Writer, p Pid) (err error) {
|
|||
}
|
||||
|
||||
func (c *Context) writeString(w io.Writer, s string) (err error) {
|
||||
switch size := len(s); {
|
||||
case size <= math.MaxUint16:
|
||||
if size := len(s); size <= math.MaxUint16 {
|
||||
// $kLL…
|
||||
_, err = w.Write([]byte{ettString, byte(size >> 8), byte(size)})
|
||||
if err == nil {
|
||||
_, err = w.Write([]byte(s))
|
||||
}
|
||||
|
||||
default:
|
||||
err = fmt.Errorf("string is too big (%d bytes)", size)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
return c.writeList(w, []byte(s))
|
||||
}
|
||||
|
||||
func (c *Context) writeList(w io.Writer, l interface{}) (err error) {
|
||||
|
|
|
@ -176,26 +176,24 @@ func TestWritePid(t *testing.T) {
|
|||
|
||||
func TestWriteString(t *testing.T) {
|
||||
c := new(Context)
|
||||
test := func(in string, shouldFail bool) {
|
||||
test := func(in string, identical bool) {
|
||||
w := new(bytes.Buffer)
|
||||
if err := c.writeString(w, in); err != nil {
|
||||
if !shouldFail {
|
||||
t.Error(in, err)
|
||||
}
|
||||
} else if shouldFail {
|
||||
t.Errorf("err == nil (%v)", in)
|
||||
t.Error(in, err)
|
||||
} else if v, err := c.Read(w); err != nil {
|
||||
t.Error(in, err)
|
||||
} else if l := w.Len(); l != 0 {
|
||||
t.Errorf("%v: buffer len %d", in, l)
|
||||
} else if v != in {
|
||||
t.Errorf("expected %v, got %v", in, v)
|
||||
if identical {
|
||||
t.Errorf("expected %v, got %v", in, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
test(string(bytes.Repeat([]byte{'a'}, math.MaxUint16)), false)
|
||||
test(string(bytes.Repeat([]byte{'a'}, math.MaxUint16)), true)
|
||||
test("", false)
|
||||
test(string(bytes.Repeat([]byte{'a'}, math.MaxUint16+1)), true)
|
||||
test(string(bytes.Repeat([]byte{'a'}, math.MaxUint16+1)), false)
|
||||
}
|
||||
|
||||
func TestWriteTerm(t *testing.T) {
|
||||
|
|
|
@ -0,0 +1,185 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
|
||||
ergo "github.com/halturin/ergonode"
|
||||
"github.com/halturin/ergonode/etf"
|
||||
)
|
||||
|
||||
var (
|
||||
NodeName string
|
||||
Cookie string
|
||||
err error
|
||||
ListenRangeBegin int
|
||||
ListenRangeEnd int = 35000
|
||||
Listen string
|
||||
ListenEPMD int
|
||||
|
||||
EnableRPC bool
|
||||
)
|
||||
|
||||
type demoApp struct {
|
||||
ergo.Application
|
||||
}
|
||||
|
||||
func (da *demoApp) Load(args ...interface{}) (ergo.ApplicationSpec, error) {
|
||||
return ergo.ApplicationSpec{
|
||||
Name: "demoApp",
|
||||
Description: "Demo Applicatoin",
|
||||
Version: "v.1.0",
|
||||
Environment: map[string]interface{}{
|
||||
"envName1": 123,
|
||||
"envName2": "Hello world",
|
||||
},
|
||||
Children: []ergo.ApplicationChildSpec{
|
||||
ergo.ApplicationChildSpec{
|
||||
Child: &demoSup{},
|
||||
Name: "demoSup",
|
||||
},
|
||||
ergo.ApplicationChildSpec{
|
||||
Child: &demoGenServ{},
|
||||
Name: "justDemoGS",
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (da *demoApp) Start(process *ergo.Process, args ...interface{}) {
|
||||
fmt.Println("Application started!")
|
||||
}
|
||||
|
||||
type demoSup struct {
|
||||
ergo.Supervisor
|
||||
}
|
||||
|
||||
func (ds *demoSup) Init(args ...interface{}) ergo.SupervisorSpec {
|
||||
return ergo.SupervisorSpec{
|
||||
Name: "demoAppSup",
|
||||
Children: []ergo.SupervisorChildSpec{
|
||||
ergo.SupervisorChildSpec{
|
||||
Name: "demoServer01",
|
||||
Child: &demoGenServ{},
|
||||
Restart: ergo.SupervisorChildRestartTemporary,
|
||||
// Restart: ergo.SupervisorChildRestartTransient,
|
||||
// Restart: ergo.SupervisorChildRestartPermanent,
|
||||
},
|
||||
ergo.SupervisorChildSpec{
|
||||
Name: "demoServer02",
|
||||
Child: &demoGenServ{},
|
||||
Restart: ergo.SupervisorChildRestartPermanent,
|
||||
Args: []interface{}{12345},
|
||||
},
|
||||
ergo.SupervisorChildSpec{
|
||||
Name: "demoServer03",
|
||||
Child: &demoGenServ{},
|
||||
Restart: ergo.SupervisorChildRestartPermanent,
|
||||
Args: []interface{}{"abc", 67890},
|
||||
},
|
||||
},
|
||||
Strategy: ergo.SupervisorStrategy{
|
||||
Type: ergo.SupervisorStrategyOneForAll,
|
||||
// Type: ergo.SupervisorStrategyRestForOne,
|
||||
// Type: ergo.SupervisorStrategyOneForOne,
|
||||
Intensity: 2,
|
||||
Period: 5,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GenServer implementation structure
|
||||
type demoGenServ struct {
|
||||
ergo.GenServer
|
||||
process *ergo.Process
|
||||
}
|
||||
|
||||
type state struct {
|
||||
i int
|
||||
}
|
||||
|
||||
// Init initializes process state using arbitrary arguments
|
||||
// Init(...) -> state
|
||||
func (dgs *demoGenServ) Init(p *ergo.Process, args ...interface{}) interface{} {
|
||||
fmt.Printf("Init (%s): args %v \n", p.Name(), args)
|
||||
dgs.process = p
|
||||
return state{i: 12345}
|
||||
}
|
||||
|
||||
// HandleCast serves incoming messages sending via gen_server:cast
|
||||
// HandleCast -> ("noreply", state) - noreply
|
||||
// ("stop", reason) - stop with reason
|
||||
func (dgs *demoGenServ) HandleCast(message etf.Term, state interface{}) (string, interface{}) {
|
||||
fmt.Printf("HandleCast (%s): %#v\n", dgs.process.Name(), message)
|
||||
switch message {
|
||||
case etf.Atom("stop"):
|
||||
return "stop", "they said"
|
||||
}
|
||||
return "noreply", state
|
||||
}
|
||||
|
||||
// HandleCall serves incoming messages sending via gen_server:call
|
||||
// HandleCall -> ("reply", message, state) - reply
|
||||
// ("noreply", _, state) - noreply
|
||||
// ("stop", reason, _) - normal stop
|
||||
func (dgs *demoGenServ) HandleCall(from etf.Tuple, message etf.Term, state interface{}) (string, etf.Term, interface{}) {
|
||||
fmt.Printf("HandleCall (%s): %#v, From: %#v\n", dgs.process.Name(), message, from)
|
||||
|
||||
reply := etf.Term(etf.Tuple{etf.Atom("error"), etf.Atom("unknown_request")})
|
||||
|
||||
switch message {
|
||||
case etf.Atom("hello"):
|
||||
reply = etf.Term(etf.Atom("hi"))
|
||||
}
|
||||
return "reply", reply, state
|
||||
}
|
||||
|
||||
// HandleInfo serves all another incoming messages (Pid ! message)
|
||||
// HandleInfo -> ("noreply", state) - noreply
|
||||
// ("stop", reason) - normal stop
|
||||
func (dgs *demoGenServ) HandleInfo(message etf.Term, state interface{}) (string, interface{}) {
|
||||
fmt.Printf("HandleInfo (%s): %#v\n", dgs.process.Name(), message)
|
||||
return "noreply", state
|
||||
}
|
||||
|
||||
// Terminate called when process died
|
||||
func (dgs *demoGenServ) Terminate(reason string, state interface{}) {
|
||||
fmt.Printf("Terminate (%s): %#v\n", dgs.process.Name(), reason)
|
||||
}
|
||||
|
||||
func init() {
|
||||
flag.IntVar(&ListenRangeBegin, "listen_begin", 15151, "listen port range")
|
||||
flag.IntVar(&ListenRangeEnd, "listen_end", 25151, "listen port range")
|
||||
flag.StringVar(&NodeName, "name", "demo@127.0.0.1", "node name")
|
||||
flag.IntVar(&ListenEPMD, "epmd", 4369, "EPMD port")
|
||||
flag.StringVar(&Cookie, "cookie", "123", "cookie for interaction with erlang cluster")
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
opts := ergo.NodeOptions{
|
||||
ListenRangeBegin: uint16(ListenRangeBegin),
|
||||
ListenRangeEnd: uint16(ListenRangeEnd),
|
||||
EPMDPort: uint16(ListenEPMD),
|
||||
}
|
||||
|
||||
// Initialize new node with given name, cookie, listening port range and epmd port
|
||||
node := ergo.CreateNode(NodeName, Cookie, opts)
|
||||
|
||||
// start application
|
||||
if err := node.ApplicationLoad(&demoApp{}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
process, _ := node.ApplicationStart("demoApp")
|
||||
fmt.Println("Run erl shell:")
|
||||
fmt.Printf("erl -name %s -setcookie %s\n", "erl-"+node.FullName, Cookie)
|
||||
|
||||
fmt.Println("-----Examples that can be tried from 'erl'-shell")
|
||||
fmt.Printf("gen_server:cast({%s,'%s'}, stop).\n", "demoServer01", NodeName)
|
||||
fmt.Printf("gen_server:call({%s,'%s'}, hello).\n", "demoServer01", NodeName)
|
||||
|
||||
process.Wait()
|
||||
node.Stop()
|
||||
}
|
|
@ -0,0 +1,120 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
|
||||
ergo "github.com/halturin/ergonode"
|
||||
"github.com/halturin/ergonode/etf"
|
||||
)
|
||||
|
||||
// GenServer implementation structure
|
||||
type demoGenServ struct {
|
||||
ergo.GenServer
|
||||
process *ergo.Process
|
||||
}
|
||||
|
||||
type state struct {
|
||||
i int
|
||||
}
|
||||
|
||||
var (
|
||||
GenServerName string
|
||||
NodeName string
|
||||
Cookie string
|
||||
err error
|
||||
ListenRangeBegin int
|
||||
ListenRangeEnd int = 35000
|
||||
Listen string
|
||||
ListenEPMD int
|
||||
|
||||
EnableRPC bool
|
||||
)
|
||||
|
||||
// Init initializes process state using arbitrary arguments
|
||||
// Init(...) -> state
|
||||
func (dgs *demoGenServ) Init(p *ergo.Process, args ...interface{}) interface{} {
|
||||
fmt.Printf("Init: args %v \n", args)
|
||||
dgs.process = p
|
||||
return state{i: 12345}
|
||||
}
|
||||
|
||||
// HandleCast serves incoming messages sending via gen_server:cast
|
||||
// HandleCast -> ("noreply", state) - noreply
|
||||
// ("stop", reason) - stop with reason
|
||||
func (dgs *demoGenServ) HandleCast(message etf.Term, state interface{}) (string, interface{}) {
|
||||
fmt.Printf("HandleCast: %#v\n", message)
|
||||
switch message {
|
||||
case etf.Atom("stop"):
|
||||
return "stop", "they said"
|
||||
}
|
||||
return "noreply", state
|
||||
}
|
||||
|
||||
// HandleCall serves incoming messages sending via gen_server:call
|
||||
// HandleCall -> ("reply", message, state) - reply
|
||||
// ("noreply", _, state) - noreply
|
||||
// ("stop", reason, _) - normal stop
|
||||
func (dgs *demoGenServ) HandleCall(from etf.Tuple, message etf.Term, state interface{}) (string, etf.Term, interface{}) {
|
||||
fmt.Printf("HandleCall: %#v, From: %#v\n", message, from)
|
||||
|
||||
reply := etf.Term(etf.Tuple{etf.Atom("error"), etf.Atom("unknown_request")})
|
||||
switch message {
|
||||
case etf.Atom("hello"):
|
||||
reply = etf.Term("hi")
|
||||
}
|
||||
return "reply", reply, state
|
||||
}
|
||||
|
||||
// HandleInfo serves all another incoming messages (Pid ! message)
|
||||
// HandleInfo -> ("noreply", state) - noreply
|
||||
// ("stop", reason) - normal stop
|
||||
func (dgs *demoGenServ) HandleInfo(message etf.Term, state interface{}) (string, interface{}) {
|
||||
fmt.Printf("HandleInfo: %#v\n", message)
|
||||
return "noreply", state
|
||||
}
|
||||
|
||||
// Terminate called when process died
|
||||
func (dgs *demoGenServ) Terminate(reason string, state interface{}) {
|
||||
fmt.Printf("Terminate: %#v\n", reason)
|
||||
}
|
||||
|
||||
func init() {
|
||||
flag.IntVar(&ListenRangeBegin, "listen_begin", 15151, "listen port range")
|
||||
flag.IntVar(&ListenRangeEnd, "listen_end", 25151, "listen port range")
|
||||
flag.StringVar(&GenServerName, "gen_server_name", "example", "gen_server name")
|
||||
flag.StringVar(&NodeName, "name", "demo@127.0.0.1", "node name")
|
||||
flag.IntVar(&ListenEPMD, "epmd", 4369, "EPMD port")
|
||||
flag.StringVar(&Cookie, "cookie", "123", "cookie for interaction with erlang cluster")
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
opts := ergo.NodeOptions{
|
||||
ListenRangeBegin: uint16(ListenRangeBegin),
|
||||
ListenRangeEnd: uint16(ListenRangeEnd),
|
||||
EPMDPort: uint16(ListenEPMD),
|
||||
}
|
||||
|
||||
// Initialize new node with given name, cookie, listening port range and epmd port
|
||||
node := ergo.CreateNode(NodeName, Cookie, opts)
|
||||
|
||||
// Initialize new instance of demoGenServ structure which implements Process behaviour
|
||||
demoGS := new(demoGenServ)
|
||||
|
||||
// Spawn process with one arguments
|
||||
process, _ := node.Spawn(GenServerName, ergo.ProcessOptions{}, demoGS)
|
||||
fmt.Println("Run erl shell:")
|
||||
fmt.Printf("erl -name %s -setcookie %s\n", "erl-"+node.FullName, Cookie)
|
||||
|
||||
fmt.Println("-----Examples that can be tried from 'erl'-shell")
|
||||
fmt.Printf("gen_server:cast({%s,'%s'}, stop).\n", GenServerName, NodeName)
|
||||
fmt.Printf("gen_server:call({%s,'%s'}, hello).\n", GenServerName, NodeName)
|
||||
|
||||
select {
|
||||
case <-process.Context.Done():
|
||||
|
||||
}
|
||||
node.Stop()
|
||||
}
|
|
@ -1,233 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/halturin/ergonode"
|
||||
"github.com/halturin/ergonode/etf"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// GenServer implementation structure
|
||||
type goGenServ struct {
|
||||
ergonode.GenServer
|
||||
completeChan chan bool
|
||||
}
|
||||
|
||||
var (
|
||||
SrvName string
|
||||
NodeName string
|
||||
Cookie string
|
||||
err error
|
||||
ListenRangeBegin uint16
|
||||
ListenRangeEnd uint16 = 35000
|
||||
Listen string
|
||||
ListenEPMD int
|
||||
|
||||
EnableRPC bool
|
||||
)
|
||||
|
||||
// Init initializes process state using arbitrary arguments
|
||||
func (gs *goGenServ) Init(args ...interface{}) interface{} {
|
||||
// Self-registration with name go_srv
|
||||
gs.Node.Register(etf.Atom(SrvName), gs.Self)
|
||||
|
||||
// Store first argument as channel
|
||||
gs.completeChan = args[0].(chan bool)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// HandleCast serves incoming messages sending via gen_server:cast
|
||||
func (gs *goGenServ) HandleCast(message *etf.Term, state interface{}) (code int, stateout interface{}) {
|
||||
fmt.Printf("HandleCast: %#v", *message)
|
||||
stateout = state
|
||||
code = 0
|
||||
// Check type of message
|
||||
switch req := (*message).(type) {
|
||||
case etf.Tuple:
|
||||
if len(req) == 2 {
|
||||
switch act := req[0].(type) {
|
||||
case etf.Atom:
|
||||
if string(act) == "ping" {
|
||||
var self_pid etf.Pid = gs.Self
|
||||
rep := etf.Term(etf.Tuple{etf.Atom("pong"), etf.Pid(self_pid)})
|
||||
gs.Send(req[1].(etf.Pid), &rep)
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
case etf.Atom:
|
||||
// If message is atom 'stop', we should say it to main process
|
||||
if string(req) == "stop" {
|
||||
gs.completeChan <- true
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// HandleCall serves incoming messages sending via gen_server:call
|
||||
func (gs *goGenServ) HandleCall(from *etf.Tuple, message *etf.Term, state interface{}) (code int, reply *etf.Term, stateout interface{}) {
|
||||
// fmt.Printf("HandleCall: %#v, From: %#v\n", *message, *from)
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
fmt.Printf("Call recovered: %#v\n", r)
|
||||
}
|
||||
}()
|
||||
|
||||
stateout = state
|
||||
code = 1
|
||||
replyTerm := etf.Term(etf.Tuple{etf.Atom("error"), etf.Atom("unknown_request")})
|
||||
reply = &replyTerm
|
||||
|
||||
switch req := (*message).(type) {
|
||||
case etf.Atom:
|
||||
// If message is atom 'stop', we should say it to main process
|
||||
switch string(req) {
|
||||
case "pid":
|
||||
replyTerm = etf.Term(etf.Pid(gs.Self))
|
||||
reply = &replyTerm
|
||||
}
|
||||
case etf.Tuple:
|
||||
var cto, cmess etf.Term
|
||||
// {testcall, { {name, node}, message }}
|
||||
// {testcast, { {name, node}, message }}
|
||||
if len(req) == 2 {
|
||||
act := req[0].(etf.Atom)
|
||||
c := req[1].(etf.Tuple)
|
||||
|
||||
switch c[0].(type) {
|
||||
case etf.Tuple:
|
||||
switch ct := c[0].(type) {
|
||||
case etf.Tuple:
|
||||
if ct[0].(etf.Atom) == ct[1].(etf.Atom) {
|
||||
}
|
||||
cto = etf.Term(c[0])
|
||||
default:
|
||||
return
|
||||
}
|
||||
case etf.Pid:
|
||||
cto = etf.Term(c[0])
|
||||
default:
|
||||
return
|
||||
}
|
||||
|
||||
cmess = c[1]
|
||||
|
||||
if string(act) == "testcall" {
|
||||
fmt.Printf("!!!!!!!testcall... %#v : %#v\n", cto, cmess)
|
||||
if reply, err = gs.Call(cto, &cmess); err != nil {
|
||||
fmt.Println(err.Error())
|
||||
}
|
||||
} else if string(act) == "testcast" {
|
||||
fmt.Println("testcast...")
|
||||
gs.Cast(cto, &cmess)
|
||||
fmt.Println("testcast...2222")
|
||||
replyTerm = etf.Term(etf.Atom("ok"))
|
||||
reply = &replyTerm
|
||||
fmt.Println("testcast...3333")
|
||||
} else {
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// HandleInfo serves all another incoming messages (Pid ! message)
|
||||
func (gs *goGenServ) HandleInfo(message *etf.Term, state interface{}) (code int, stateout interface{}) {
|
||||
fmt.Printf("HandleInfo: %#v\n", *message)
|
||||
stateout = state
|
||||
code = 0
|
||||
return
|
||||
}
|
||||
|
||||
// Terminate called when process died
|
||||
func (gs *goGenServ) Terminate(reason int, state interface{}) {
|
||||
fmt.Printf("Terminate: %#v\n", reason)
|
||||
}
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&Listen, "listen", "15151-20151", "listen port range")
|
||||
flag.StringVar(&SrvName, "gen_server", "examplegs", "gen_server name")
|
||||
flag.StringVar(&NodeName, "name", "examplenode@127.0.0.1", "node name")
|
||||
flag.IntVar(&ListenEPMD, "epmd", 4369, "EPMD port")
|
||||
flag.StringVar(&Cookie, "cookie", "123", "cookie for interaction with erlang cluster")
|
||||
flag.BoolVar(&EnableRPC, "rpc", false, "enable RPC")
|
||||
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
// parse listen range port
|
||||
l := strings.Split(Listen, "-")
|
||||
switch len(l) {
|
||||
case 1:
|
||||
if i, err := strconv.ParseUint(l[0], 10, 16); err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
ListenRangeBegin = uint16(i)
|
||||
}
|
||||
case 2:
|
||||
if i, err := strconv.ParseUint(l[0], 10, 16); err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
ListenRangeBegin = uint16(i)
|
||||
}
|
||||
if i, err := strconv.ParseUint(l[1], 10, 16); err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
ListenRangeEnd = uint16(i)
|
||||
}
|
||||
default:
|
||||
panic("wrong port range arg")
|
||||
}
|
||||
|
||||
// Initialize new node with given name, cookie, listening port range and epmd port
|
||||
n := ergonode.Create(NodeName, Cookie, uint16(ListenRangeBegin), uint16(ListenRangeEnd), uint16(ListenEPMD))
|
||||
|
||||
// listen from ListenRangeBegin ... 65000
|
||||
// n := ergonode.Create(NodeName, Cookie, uint16(ListenRangeBegin))
|
||||
|
||||
// use default listen port range: 15000...65000
|
||||
//n := ergonode.Create(NodeName, Cookie)
|
||||
|
||||
// Create channel to receive message when main process should be stopped
|
||||
completeChan := make(chan bool)
|
||||
|
||||
// Initialize new instance of goGenServ structure which implements Process behaviour
|
||||
gs := new(goGenServ)
|
||||
|
||||
// Spawn process with one arguments
|
||||
n.Spawn(gs, completeChan)
|
||||
|
||||
// RPC
|
||||
// Create closure
|
||||
rpc := func(terms etf.List) (r etf.Term) {
|
||||
r = etf.Term(etf.Tuple{etf.Atom(NodeName), etf.Atom("reply"), len(terms)})
|
||||
return
|
||||
}
|
||||
|
||||
// Provide it to call via RPC with `rpc:call(gonode@localhost, rpc, call, [as, qwe])`
|
||||
err = n.RpcProvide("rpc", "call", rpc)
|
||||
if err != nil {
|
||||
fmt.Printf("Cannot provide function to RPC: %s\n", err)
|
||||
}
|
||||
|
||||
fmt.Println("Allowed commands...")
|
||||
fmt.Printf("gen_server:cast({%s,'%s'}, stop).\n", SrvName, NodeName)
|
||||
fmt.Printf("gen_server:call({%s,'%s'}, pid).\n", SrvName, NodeName)
|
||||
fmt.Printf("gen_server:cast({%s,'%s'}, {ping, self()}), flush().\n", SrvName, NodeName)
|
||||
fmt.Println("make remote call by golang node...")
|
||||
fmt.Printf("gen_server:call({%s,'%s'}, {testcall, {Pid, Message}}).\n", SrvName, NodeName)
|
||||
fmt.Printf("gen_server:call({%s,'%s'}, {testcall, {{pname, remotenode}, Message}}).\n", SrvName, NodeName)
|
||||
|
||||
// Wait to stop
|
||||
<-completeChan
|
||||
|
||||
return
|
||||
}
|
|
@ -0,0 +1,143 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
ergo "github.com/halturin/ergonode"
|
||||
"github.com/halturin/ergonode/etf"
|
||||
)
|
||||
|
||||
// GenServer implementation structure
|
||||
type demoGenServ struct {
|
||||
ergo.GenServer
|
||||
process *ergo.Process
|
||||
wg *sync.WaitGroup
|
||||
bridge chan interface{}
|
||||
}
|
||||
|
||||
type state struct {
|
||||
i int
|
||||
}
|
||||
|
||||
var (
|
||||
GenServerName string
|
||||
)
|
||||
|
||||
// Init initializes process state using arbitrary arguments
|
||||
// Init(...) -> state
|
||||
func (dgs *demoGenServ) Init(p *ergo.Process, args ...interface{}) interface{} {
|
||||
// fmt.Printf("Init: args %v \n", args)
|
||||
dgs.process = p
|
||||
dgs.wg = args[0].(*sync.WaitGroup)
|
||||
dgs.bridge = args[2].(chan interface{})
|
||||
|
||||
go func() {
|
||||
ctx, cancel := context.WithCancel(p.Context)
|
||||
defer cancel()
|
||||
for {
|
||||
select {
|
||||
case msg := <-args[1].(chan interface{}):
|
||||
p.Send(p.Self(), etf.Tuple{"forwarded", msg})
|
||||
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return state{i: 12345}
|
||||
}
|
||||
|
||||
// HandleCast serves incoming messages sending via gen_server:cast
|
||||
// HandleCast -> ("noreply", state) - noreply
|
||||
// ("stop", reason) - stop with reason
|
||||
func (dgs *demoGenServ) HandleCast(message etf.Term, state interface{}) (string, interface{}) {
|
||||
fmt.Printf("[%s] HandleCast: %#v\n", dgs.process.Node.Name, message)
|
||||
switch message {
|
||||
case etf.Atom("stop"):
|
||||
return "stop", "they said"
|
||||
case etf.Atom("forward"):
|
||||
dgs.bridge <- fmt.Sprintf("Hi from %v", dgs.process.Self())
|
||||
}
|
||||
return "noreply", state
|
||||
}
|
||||
|
||||
// HandleCall serves incoming messages sending via gen_server:call
|
||||
// HandleCall -> ("reply", message, state) - reply
|
||||
// ("noreply", _, state) - noreply
|
||||
// ("stop", reason, _) - normal stop
|
||||
func (dgs *demoGenServ) HandleCall(from etf.Tuple, message etf.Term, state interface{}) (string, etf.Term, interface{}) {
|
||||
fmt.Printf("[%s] HandleCall: %#v, From: %#v\n", dgs.process.Node.Name, message, from)
|
||||
|
||||
reply := etf.Term(etf.Tuple{etf.Atom("error"), etf.Atom("unknown_request")})
|
||||
|
||||
switch message {
|
||||
case etf.Atom("hello"):
|
||||
reply = etf.Term(etf.Atom("hi"))
|
||||
}
|
||||
return "reply", reply, state
|
||||
}
|
||||
|
||||
// HandleInfo serves all another incoming messages (Pid ! message)
|
||||
// HandleInfo -> ("noreply", state) - noreply
|
||||
// ("stop", reason) - normal stop
|
||||
func (dgs *demoGenServ) HandleInfo(message etf.Term, state interface{}) (string, interface{}) {
|
||||
fmt.Printf("[%s] HandleInfo: %#v\n", dgs.process.Node.Name, message)
|
||||
return "noreply", state
|
||||
}
|
||||
|
||||
// Terminate called when process died
|
||||
func (dgs *demoGenServ) Terminate(reason string, state interface{}) {
|
||||
fmt.Printf("[%s] Terminate: %#v\n", dgs.process.Node.Name, reason)
|
||||
dgs.wg.Done()
|
||||
}
|
||||
|
||||
func main() {
|
||||
var wg sync.WaitGroup
|
||||
// Initialize new node with EPMD port 7878
|
||||
optsNode01 := ergo.NodeOptions{
|
||||
EPMDPort: 7878,
|
||||
}
|
||||
node01 := ergo.CreateNode("demoNode7878@127.0.0.1", "cookie123", optsNode01)
|
||||
fmt.Println("Started ergo node: demoNode7878@127.0.0.1 on port 7878")
|
||||
optsNode02 := ergo.NodeOptions{
|
||||
EPMDPort: 8787,
|
||||
}
|
||||
node02 := ergo.CreateNode("demoNode8787@127.0.0.1", "cookie456", optsNode02)
|
||||
fmt.Println("Started ergo node: demoNode8787@127.0.0.1 on port 8787")
|
||||
|
||||
// Spawn process with one arguments
|
||||
wg.Add(1)
|
||||
bridgeToNode01 := make(chan interface{}, 10)
|
||||
bridgeToNode02 := make(chan interface{}, 10)
|
||||
p1, _ := node01.Spawn("example", ergo.ProcessOptions{}, &demoGenServ{}, &wg, bridgeToNode01, bridgeToNode02)
|
||||
fmt.Println("Started 'example' GenServer at demoNode7878@127.0.0.1 with PID", p1.Self())
|
||||
|
||||
wg.Add(1)
|
||||
p2, _ := node02.Spawn("example", ergo.ProcessOptions{}, &demoGenServ{}, &wg, bridgeToNode02, bridgeToNode01)
|
||||
fmt.Println("Started 'example' GenServer at demoNode8787@127.0.0.1 with PID", p2.Self())
|
||||
|
||||
fmt.Println("")
|
||||
|
||||
fmt.Println("Run erl shell (cluster with cookie123):")
|
||||
fmt.Printf("ERL_EPMD_PORT=7878 erl -name %s -setcookie cookie123\n", "erl-demoNode-cookie123@127.0.0.1")
|
||||
|
||||
fmt.Println("\n-----Examples that can be tried from 'erl'-shell")
|
||||
fmt.Println("gen_server:cast({example,'demoNode7878@127.0.0.1'}, stop).")
|
||||
fmt.Println("gen_server:call({example,'demoNode7878@127.0.0.1'}, hello).")
|
||||
fmt.Println("gen_server:cast({example,'demoNode7878@127.0.0.1'}, forward).")
|
||||
fmt.Println("")
|
||||
|
||||
fmt.Println("Run erl shell (cluster with cookie456):")
|
||||
fmt.Printf("ERL_EPMD_PORT=8787 erl -name %s -setcookie cookie456\n", "erl-demoNode-cookie456@127.0.0.1")
|
||||
fmt.Println("\n-----Examples that can be tried from 'erl'-shell")
|
||||
fmt.Println("gen_server:cast({example,'demoNode8787@127.0.0.1'}, stop).")
|
||||
fmt.Println("gen_server:call({example,'demoNode8787@127.0.0.1'}, hello).")
|
||||
fmt.Println("gen_server:cast({example,'demoNode8787@127.0.0.1'}, forward).")
|
||||
|
||||
wg.Wait()
|
||||
node01.Stop()
|
||||
node02.Stop()
|
||||
}
|
|
@ -0,0 +1,93 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
ergo "github.com/halturin/ergonode"
|
||||
"github.com/halturin/ergonode/etf"
|
||||
)
|
||||
|
||||
// ExampleGenServer simple implementation of GenServer
|
||||
type ExampleGenServer struct {
|
||||
ergo.GenServer
|
||||
process *ergo.Process
|
||||
}
|
||||
|
||||
type State struct {
|
||||
value int
|
||||
}
|
||||
|
||||
// Init initializes process state using arbitrary arguments
|
||||
// Init -> state
|
||||
func (egs *ExampleGenServer) Init(p *ergo.Process, args ...interface{}) (state interface{}) {
|
||||
fmt.Printf("Init: args %v \n", args)
|
||||
egs.process = p
|
||||
InitialState := &State{
|
||||
value: args[0].(int), // 100
|
||||
}
|
||||
return InitialState
|
||||
}
|
||||
|
||||
// HandleCast -> ("noreply", state) - noreply
|
||||
// ("stop", reason) - stop with reason
|
||||
func (egs *ExampleGenServer) HandleCast(message etf.Term, state interface{}) (string, interface{}) {
|
||||
fmt.Printf("HandleCast: %#v (state value %d) \n", message, state.(*State).value)
|
||||
time.Sleep(1 * time.Second)
|
||||
state.(*State).value++
|
||||
|
||||
if state.(*State).value > 103 {
|
||||
egs.process.Send(egs.process.Self(), "hello")
|
||||
} else {
|
||||
egs.process.Cast(egs.process.Self(), "hi")
|
||||
}
|
||||
|
||||
return "noreply", state
|
||||
}
|
||||
|
||||
// HandleCall serves incoming messages sending via gen_server:call
|
||||
// HandleCall -> ("reply", message, state) - reply
|
||||
// ("noreply", _, state) - noreply
|
||||
// ("stop", reason, _) - normal stop
|
||||
func (egs *ExampleGenServer) HandleCall(from etf.Tuple, message etf.Term, state interface{}) (string, etf.Term, interface{}) {
|
||||
fmt.Printf("HandleCall: %#v, From: %#v\n", message, from)
|
||||
return "reply", message, state
|
||||
}
|
||||
|
||||
// HandleInfo serves all another incoming messages (Pid ! message)
|
||||
// HandleInfo -> ("noreply", state) - noreply
|
||||
// ("stop", reason) - normal stop
|
||||
func (egs *ExampleGenServer) HandleInfo(message etf.Term, state interface{}) (string, interface{}) {
|
||||
fmt.Printf("HandleInfo: %#v (state value %d) \n", message, state.(*State).value)
|
||||
time.Sleep(1 * time.Second)
|
||||
state.(*State).value++
|
||||
if state.(*State).value > 106 {
|
||||
return "stop", "normal"
|
||||
} else {
|
||||
egs.process.Send(egs.process.Self(), "hello")
|
||||
}
|
||||
return "noreply", state
|
||||
}
|
||||
|
||||
// Terminate called when process died
|
||||
func (egs *ExampleGenServer) Terminate(reason string, state interface{}) {
|
||||
fmt.Printf("Terminate: %#v \n", reason)
|
||||
}
|
||||
|
||||
func main() {
|
||||
// create a new node
|
||||
node := ergo.CreateNode("node@localhost", "cookies", ergo.NodeOptions{})
|
||||
gs1 := &ExampleGenServer{}
|
||||
|
||||
// spawn new process of genserver
|
||||
process, _ := node.Spawn("gs1", ergo.ProcessOptions{}, gs1, 100)
|
||||
|
||||
// self casting
|
||||
process.Cast(process.Self(), "hey")
|
||||
|
||||
// waiting for the process termination.
|
||||
select {
|
||||
case <-process.Context.Done():
|
||||
fmt.Println("exited")
|
||||
}
|
||||
}
|
|
@ -0,0 +1,160 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
|
||||
ergo "github.com/halturin/ergonode"
|
||||
"github.com/halturin/ergonode/etf"
|
||||
)
|
||||
|
||||
var (
|
||||
NodeName string
|
||||
Cookie string
|
||||
err error
|
||||
ListenRangeBegin int
|
||||
ListenRangeEnd int = 35000
|
||||
Listen string
|
||||
ListenEPMD int
|
||||
|
||||
EnableRPC bool
|
||||
)
|
||||
|
||||
type demoSup struct {
|
||||
ergo.Supervisor
|
||||
}
|
||||
|
||||
func (ds *demoSup) Init(args ...interface{}) ergo.SupervisorSpec {
|
||||
return ergo.SupervisorSpec{
|
||||
Name: "demoSupervisorSup",
|
||||
Children: []ergo.SupervisorChildSpec{
|
||||
ergo.SupervisorChildSpec{
|
||||
Name: "demoServer01",
|
||||
Child: &demoGenServ{},
|
||||
Restart: ergo.SupervisorChildRestartTemporary,
|
||||
// Restart: ergo.SupervisorChildRestartTransient,
|
||||
// Restart: ergo.SupervisorChildRestartPermanent,
|
||||
},
|
||||
ergo.SupervisorChildSpec{
|
||||
Name: "demoServer02",
|
||||
Child: &demoGenServ{},
|
||||
Restart: ergo.SupervisorChildRestartPermanent,
|
||||
Args: []interface{}{12345},
|
||||
},
|
||||
ergo.SupervisorChildSpec{
|
||||
Name: "demoServer03",
|
||||
Child: &demoGenServ{},
|
||||
Restart: ergo.SupervisorChildRestartPermanent,
|
||||
Args: []interface{}{"abc", 67890},
|
||||
},
|
||||
},
|
||||
Strategy: ergo.SupervisorStrategy{
|
||||
Type: ergo.SupervisorStrategyOneForAll,
|
||||
// Type: ergo.SupervisorStrategyRestForOne,
|
||||
// Type: ergo.SupervisorStrategyOneForOne,
|
||||
Intensity: 2,
|
||||
Period: 5,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GenServer implementation structure
|
||||
type demoGenServ struct {
|
||||
ergo.GenServer
|
||||
process *ergo.Process
|
||||
}
|
||||
|
||||
type state struct {
|
||||
i int
|
||||
}
|
||||
|
||||
// Init initializes process state using arbitrary arguments
|
||||
// Init(...) -> state
|
||||
func (dgs *demoGenServ) Init(p *ergo.Process, args ...interface{}) interface{} {
|
||||
fmt.Printf("Init (%s): args %v \n", p.Name(), args)
|
||||
dgs.process = p
|
||||
return state{i: 12345}
|
||||
}
|
||||
|
||||
// HandleCast serves incoming messages sending via gen_server:cast
|
||||
// HandleCast -> ("noreply", state) - noreply
|
||||
// ("stop", reason) - stop with reason
|
||||
func (dgs *demoGenServ) HandleCast(message etf.Term, state interface{}) (string, interface{}) {
|
||||
fmt.Printf("HandleCast (%s): %#v\n", dgs.process.Name(), message)
|
||||
switch message {
|
||||
case etf.Atom("stop"):
|
||||
return "stop", "they said"
|
||||
}
|
||||
return "noreply", state
|
||||
}
|
||||
|
||||
// HandleCall serves incoming messages sending via gen_server:call
|
||||
// HandleCall -> ("reply", message, state) - reply
|
||||
// ("noreply", _, state) - noreply
|
||||
// ("stop", reason, _) - normal stop
|
||||
func (dgs *demoGenServ) HandleCall(from etf.Tuple, message etf.Term, state interface{}) (string, etf.Term, interface{}) {
|
||||
fmt.Printf("HandleCall (%s): %#v, From: %#v\n", dgs.process.Name(), message, from)
|
||||
|
||||
reply := etf.Term(etf.Tuple{etf.Atom("error"), etf.Atom("unknown_request")})
|
||||
|
||||
switch message {
|
||||
case etf.Atom("hello"):
|
||||
reply = etf.Term(etf.Atom("hi"))
|
||||
}
|
||||
return "reply", reply, state
|
||||
}
|
||||
|
||||
// HandleInfo serves all another incoming messages (Pid ! message)
|
||||
// HandleInfo -> ("noreply", state) - noreply
|
||||
// ("stop", reason) - normal stop
|
||||
func (dgs *demoGenServ) HandleInfo(message etf.Term, state interface{}) (string, interface{}) {
|
||||
fmt.Printf("HandleInfo (%s): %#v\n", dgs.process.Name(), message)
|
||||
return "noreply", state
|
||||
}
|
||||
|
||||
// Terminate called when process died
|
||||
func (dgs *demoGenServ) Terminate(reason string, state interface{}) {
|
||||
fmt.Printf("Terminate (%s): %#v\n", dgs.process.Name(), reason)
|
||||
}
|
||||
|
||||
func init() {
|
||||
flag.IntVar(&ListenRangeBegin, "listen_begin", 15151, "listen port range")
|
||||
flag.IntVar(&ListenRangeEnd, "listen_end", 25151, "listen port range")
|
||||
flag.StringVar(&NodeName, "name", "demo@127.0.0.1", "node name")
|
||||
flag.IntVar(&ListenEPMD, "epmd", 4369, "EPMD port")
|
||||
flag.StringVar(&Cookie, "cookie", "123", "cookie for interaction with erlang cluster")
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
opts := ergo.NodeOptions{
|
||||
ListenRangeBegin: uint16(ListenRangeBegin),
|
||||
ListenRangeEnd: uint16(ListenRangeEnd),
|
||||
EPMDPort: uint16(ListenEPMD),
|
||||
}
|
||||
|
||||
// Initialize new node with given name, cookie, listening port range and epmd port
|
||||
node := ergo.CreateNode(NodeName, Cookie, opts)
|
||||
|
||||
// Spawn supervisor process
|
||||
process, _ := node.Spawn("demo_sup", ergo.ProcessOptions{}, &demoSup{})
|
||||
|
||||
fmt.Println("Run erl shell:")
|
||||
fmt.Printf("erl -name %s -setcookie %s\n", "erl-"+node.FullName, Cookie)
|
||||
|
||||
fmt.Println("-----Examples that can be tried from 'erl'-shell")
|
||||
fmt.Printf("gen_server:cast({%s,'%s'}, stop).\n", "demoServer01", NodeName)
|
||||
fmt.Printf("gen_server:call({%s,'%s'}, hello).\n", "demoServer01", NodeName)
|
||||
fmt.Println("or...")
|
||||
fmt.Printf("gen_server:cast({%s,'%s'}, stop).\n", "demoServer02", NodeName)
|
||||
fmt.Printf("gen_server:call({%s,'%s'}, hello).\n", "demoServer02", NodeName)
|
||||
fmt.Println("or...")
|
||||
fmt.Printf("gen_server:cast({%s,'%s'}, stop).\n", "demoServer03", NodeName)
|
||||
fmt.Printf("gen_server:call({%s,'%s'}, hello).\n", "demoServer03", NodeName)
|
||||
select {
|
||||
case <-process.Context.Done():
|
||||
|
||||
}
|
||||
node.Stop()
|
||||
}
|
307
gen_server.go
307
gen_server.go
|
@ -1,235 +1,196 @@
|
|||
package ergonode
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/halturin/ergonode/etf"
|
||||
"github.com/halturin/ergonode/lib"
|
||||
)
|
||||
|
||||
// GenServerInt interface
|
||||
type GenServerInt interface {
|
||||
const (
|
||||
DefaultCallTimeout = 5
|
||||
)
|
||||
|
||||
// GenServerBehavior interface
|
||||
type GenServerBehavior interface {
|
||||
// Init(...) -> state
|
||||
Init(args ...interface{}) (state interface{})
|
||||
// HandleCast -> (0, state) - noreply
|
||||
// (-1, state) - normal stop
|
||||
HandleCast(message *etf.Term, state interface{}) (int, interface{})
|
||||
// HandleCall -> (1, reply, state) - reply
|
||||
// (0, _, state) - noreply
|
||||
// (-1, state) - normal stop
|
||||
HandleCall(from *etf.Tuple, message *etf.Term, state interface{}) (int, *etf.Term, interface{})
|
||||
// HandleInfo -> (0, state) - noreply
|
||||
// (-1, state) - normal stop (-2, -3 .... custom reasons to stop)
|
||||
HandleInfo(message *etf.Term, state interface{}) (int, interface{})
|
||||
Terminate(reason int, state interface{})
|
||||
|
||||
// Making outgoing request
|
||||
Call(to interface{}, message *etf.Term, options ...interface{}) (reply *etf.Term, err error)
|
||||
Cast(to interface{}, message *etf.Term) (err error)
|
||||
|
||||
// Monitors
|
||||
Monitor(to etf.Pid)
|
||||
MonitorNode(to etf.Atom, flag bool)
|
||||
Init(process *Process, args ...interface{}) (state interface{})
|
||||
// HandleCast -> ("noreply", state) - noreply
|
||||
// ("stop", reason) - stop with reason
|
||||
HandleCast(message etf.Term, state interface{}) (string, interface{})
|
||||
// HandleCall -> ("reply", message, state) - reply
|
||||
// ("noreply", _, state) - noreply
|
||||
// ("stop", reason, _) - normal stop
|
||||
HandleCall(from etf.Tuple, message etf.Term, state interface{}) (string, etf.Term, interface{})
|
||||
// HandleInfo -> ("noreply", state) - noreply
|
||||
// ("stop", reason) - normal stop
|
||||
HandleInfo(message etf.Term, state interface{}) (string, interface{})
|
||||
Terminate(reason string, state interface{})
|
||||
}
|
||||
|
||||
// GenServer is implementation of GenServerInt interface
|
||||
type GenServer struct {
|
||||
Node *Node // current node of process
|
||||
Self etf.Pid // Pid of process
|
||||
state interface{}
|
||||
lock sync.Mutex
|
||||
chreply chan *etf.Tuple
|
||||
}
|
||||
// GenServer is implementation of ProcessBehavior interface for GenServer objects
|
||||
type GenServer struct{}
|
||||
|
||||
// Options returns map of default process-related options
|
||||
func (gs *GenServer) Options() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"chan-size": 100, // size of channel for regular messages
|
||||
}
|
||||
}
|
||||
func (gs *GenServer) loop(p *Process, object interface{}, args ...interface{}) string {
|
||||
p.state = object.(GenServerBehavior).Init(p, args...)
|
||||
p.ready <- true
|
||||
|
||||
stop := make(chan string, 2)
|
||||
|
||||
p.currentFunction = "GenServer:loop"
|
||||
|
||||
// ProcessLoop executes during whole time of process life.
|
||||
// It receives incoming messages from channels and handle it using methods of behaviour implementation
|
||||
func (gs *GenServer) ProcessLoop(pcs procChannels, pd Process, args ...interface{}) {
|
||||
state := pd.(GenServerInt).Init(args...)
|
||||
gs.state = state
|
||||
pcs.init <- true
|
||||
var chstop chan int
|
||||
chstop = make(chan int)
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
log.Printf("GenServerInt recovered: %#v", r)
|
||||
}
|
||||
}()
|
||||
for {
|
||||
var message etf.Term
|
||||
var fromPid etf.Pid
|
||||
select {
|
||||
case reason := <-chstop:
|
||||
pd.(GenServerInt).Terminate(reason, gs.state)
|
||||
case msg := <-pcs.in:
|
||||
message = msg
|
||||
case msgFrom := <-pcs.inFrom:
|
||||
message = msgFrom[1]
|
||||
fromPid = msgFrom[0].(etf.Pid)
|
||||
var lockState = &sync.Mutex{}
|
||||
|
||||
select {
|
||||
case ex := <-p.gracefulExit:
|
||||
if p.trapExit {
|
||||
message = etf.Tuple{
|
||||
etf.Atom("EXIT"),
|
||||
ex.from,
|
||||
etf.Atom(ex.reason),
|
||||
}
|
||||
} else {
|
||||
object.(GenServerBehavior).Terminate(ex.reason, p.state)
|
||||
return ex.reason
|
||||
}
|
||||
case reason := <-stop:
|
||||
object.(GenServerBehavior).Terminate(reason, p.state)
|
||||
return reason
|
||||
case msg := <-p.mailBox:
|
||||
fromPid = msg.Element(1).(etf.Pid)
|
||||
message = msg.Element(2)
|
||||
case <-p.Context.Done():
|
||||
return "kill"
|
||||
case direct := <-p.direct:
|
||||
gs.handleDirect(direct)
|
||||
continue
|
||||
}
|
||||
lib.Log("[%#v]. Message from %#v\n", gs.Self, fromPid)
|
||||
|
||||
lib.Log("[%s]. %v got message from %#v\n", p.Node.FullName, p.self, fromPid)
|
||||
|
||||
p.reductions++
|
||||
|
||||
switch m := message.(type) {
|
||||
case etf.Tuple:
|
||||
switch mtag := m[0].(type) {
|
||||
switch mtag := m.Element(1).(type) {
|
||||
case etf.Atom:
|
||||
gs.lock.Lock()
|
||||
switch mtag {
|
||||
case etf.Atom("$gen_call"):
|
||||
|
||||
// We need to wrap it out using goroutine in order to serve
|
||||
// sync-requests (like 'process.Call') within callback execution
|
||||
// since reply (etf.Ref) comes through the same mailBox channel
|
||||
go func() {
|
||||
fromTuple := m[1].(etf.Tuple)
|
||||
code, reply, state1 := pd.(GenServerInt).HandleCall(&fromTuple, &m[2], gs.state)
|
||||
fromTuple := m.Element(2).(etf.Tuple)
|
||||
lockState.Lock()
|
||||
|
||||
gs.state = state1
|
||||
gs.lock.Unlock()
|
||||
if code < 0 {
|
||||
chstop <- code
|
||||
cf := p.currentFunction
|
||||
p.currentFunction = "GenServer:HandleCall"
|
||||
code, reply, state := object.(GenServerBehavior).HandleCall(fromTuple, m.Element(3), p.state)
|
||||
p.currentFunction = cf
|
||||
|
||||
if code == "stop" {
|
||||
stop <- reply.(string)
|
||||
// do not unlock, coz we have to keep this state unchanged for Terminate handler
|
||||
return
|
||||
}
|
||||
if reply != nil && code == 1 {
|
||||
pid := fromTuple[0].(etf.Pid)
|
||||
ref := fromTuple[1]
|
||||
rep := etf.Term(etf.Tuple{ref, *reply})
|
||||
gs.Send(pid, &rep)
|
||||
|
||||
p.state = state
|
||||
lockState.Unlock()
|
||||
|
||||
if reply != nil && code == "reply" {
|
||||
pid := fromTuple.Element(1).(etf.Pid)
|
||||
ref := fromTuple.Element(2)
|
||||
rep := etf.Term(etf.Tuple{ref, reply})
|
||||
p.Send(pid, rep)
|
||||
}
|
||||
}()
|
||||
|
||||
case etf.Atom("$gen_cast"):
|
||||
go func() {
|
||||
code, state1 := pd.(GenServerInt).HandleCast(&m[1], gs.state)
|
||||
gs.state = state1
|
||||
gs.lock.Unlock()
|
||||
if code < 0 {
|
||||
chstop <- code
|
||||
lockState.Lock()
|
||||
|
||||
cf := p.currentFunction
|
||||
p.currentFunction = "GenServer:HandleCast"
|
||||
code, state := object.(GenServerBehavior).HandleCast(m.Element(2), p.state)
|
||||
p.currentFunction = cf
|
||||
|
||||
if code == "stop" {
|
||||
stop <- state.(string)
|
||||
return
|
||||
}
|
||||
p.state = state
|
||||
lockState.Unlock()
|
||||
}()
|
||||
|
||||
default:
|
||||
go func() {
|
||||
code, state1 := pd.(GenServerInt).HandleInfo(&message, gs.state)
|
||||
gs.state = state1
|
||||
gs.lock.Unlock()
|
||||
if code < 0 {
|
||||
chstop <- code
|
||||
lockState.Lock()
|
||||
|
||||
cf := p.currentFunction
|
||||
p.currentFunction = "GenServer:HandleInfo"
|
||||
code, state := object.(GenServerBehavior).HandleInfo(message, p.state)
|
||||
p.currentFunction = cf
|
||||
|
||||
if code == "stop" {
|
||||
stop <- state.(string)
|
||||
return
|
||||
}
|
||||
p.state = state
|
||||
lockState.Unlock()
|
||||
}()
|
||||
|
||||
}
|
||||
|
||||
case etf.Ref:
|
||||
lib.Log("got reply: %#v\n%#v", mtag, message)
|
||||
gs.chreply <- &m
|
||||
p.reply <- m
|
||||
|
||||
default:
|
||||
lib.Log("mtag: %#v", mtag)
|
||||
gs.lock.Lock()
|
||||
go func() {
|
||||
code, state1 := pd.(GenServerInt).HandleInfo(&message, gs.state)
|
||||
gs.state = state1
|
||||
gs.lock.Unlock()
|
||||
if code < 0 {
|
||||
chstop <- code
|
||||
return
|
||||
lockState.Lock()
|
||||
|
||||
cf := p.currentFunction
|
||||
p.currentFunction = "GenServer:HandleInfo"
|
||||
code, state := object.(GenServerBehavior).HandleInfo(message, p.state)
|
||||
p.currentFunction = cf
|
||||
|
||||
if code == "stop" {
|
||||
stop <- state.(string)
|
||||
}
|
||||
p.state = state
|
||||
lockState.Unlock()
|
||||
}()
|
||||
}
|
||||
|
||||
default:
|
||||
lib.Log("m: %#v", m)
|
||||
gs.lock.Lock()
|
||||
go func() {
|
||||
code, state1 := pd.(GenServerInt).HandleInfo(&message, gs.state)
|
||||
gs.state = state1
|
||||
gs.lock.Unlock()
|
||||
if code < 0 {
|
||||
chstop <- code
|
||||
lockState.Lock()
|
||||
|
||||
cf := p.currentFunction
|
||||
p.currentFunction = "GenServer:HandleInfo"
|
||||
code, state := object.(GenServerBehavior).HandleInfo(message, p.state)
|
||||
p.currentFunction = cf
|
||||
|
||||
if code == "stop" {
|
||||
stop <- state.(string)
|
||||
return
|
||||
}
|
||||
p.state = state
|
||||
lockState.Unlock()
|
||||
}()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (gs *GenServer) setNode(node *Node) {
|
||||
gs.Node = node
|
||||
}
|
||||
func (gs *GenServer) handleDirect(m directMessage) {
|
||||
|
||||
func (gs *GenServer) setPid(pid etf.Pid) {
|
||||
gs.Self = pid
|
||||
}
|
||||
|
||||
func (gs *GenServer) Call(to interface{}, message *etf.Term, options ...interface{}) (reply *etf.Term, err error) {
|
||||
var (
|
||||
option_timeout int = 5
|
||||
)
|
||||
|
||||
gs.chreply = make(chan *etf.Tuple)
|
||||
defer close(gs.chreply)
|
||||
|
||||
ref := gs.Node.MakeRef()
|
||||
from := etf.Tuple{gs.Self, ref}
|
||||
msg := etf.Term(etf.Tuple{etf.Atom("$gen_call"), from, *message})
|
||||
if err := gs.Node.Send(gs.Self, to, &msg); err != nil {
|
||||
return nil, err
|
||||
if m.reply != nil {
|
||||
m.err = ErrUnsupportedRequest
|
||||
m.reply <- m
|
||||
}
|
||||
|
||||
switch len(options) {
|
||||
case 1:
|
||||
switch options[0].(type) {
|
||||
case int:
|
||||
if options[0].(int) > 0 {
|
||||
option_timeout = options[0].(int)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case m := <-gs.chreply:
|
||||
retmsg := *m
|
||||
ref1 := retmsg[0].(etf.Ref)
|
||||
val := retmsg[1].(etf.Term)
|
||||
|
||||
//check by id
|
||||
if ref.Id[0] == ref1.Id[0] && ref.Id[1] == ref1.Id[1] && ref.Id[2] == ref1.Id[2] {
|
||||
reply = &val
|
||||
goto out
|
||||
}
|
||||
case <-time.After(time.Second * time.Duration(option_timeout)):
|
||||
err = errors.New("timeout")
|
||||
goto out
|
||||
}
|
||||
}
|
||||
out:
|
||||
gs.chreply = nil
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (gs *GenServer) Cast(to interface{}, message *etf.Term) error {
|
||||
msg := etf.Term(etf.Tuple{etf.Atom("$gen_cast"), *message})
|
||||
if err := gs.Node.Send(gs.Self, to, &msg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gs *GenServer) Send(to etf.Pid, reply *etf.Term) {
|
||||
gs.Node.Send(nil, to, reply)
|
||||
}
|
||||
|
||||
func (gs *GenServer) Monitor(to etf.Pid) {
|
||||
gs.Node.Monitor(gs.Self, to)
|
||||
}
|
||||
|
||||
func (gs *GenServer) MonitorNode(to etf.Atom, flag bool) {
|
||||
gs.Node.MonitorNode(gs.Self, to, flag)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,206 @@
|
|||
package ergonode
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/halturin/ergonode/etf"
|
||||
)
|
||||
|
||||
// This test is checking the cases below:
|
||||
//
|
||||
// initiation:
|
||||
// - starting 2 nodes (node1, node2)
|
||||
// - starting 4 GenServers
|
||||
// * 2 on node1 - gs1, gs2
|
||||
// * 2 on node2 - gs3, gs4
|
||||
//
|
||||
// checking:
|
||||
// - local sending
|
||||
// * send: node1 (gs1) -> node1 (gs2). in fashion of erlang sending `erlang:send`
|
||||
// * cast: node1 (gs1) -> node1 (gs2). like `gen_server:cast` does
|
||||
// * call: node1 (gs1) -> node1 (gs2). like `gen_server:call` does
|
||||
//
|
||||
// - remote sending
|
||||
// * send: node1 (gs1) -> node2 (gs3)
|
||||
// * cast: node1 (gs1) -> node2 (gs3)
|
||||
// * call: node1 (gs1) -> node2 (gs3)
|
||||
|
||||
type testGenServer struct {
|
||||
GenServer
|
||||
err chan error
|
||||
}
|
||||
|
||||
func (tgs *testGenServer) Init(p *Process, args ...interface{}) (state interface{}) {
|
||||
tgs.err <- nil
|
||||
return nil
|
||||
}
|
||||
func (tgs *testGenServer) HandleCast(message etf.Term, state interface{}) (string, interface{}) {
|
||||
// fmt.Printf("testGenServer ({%s, %s}): HandleCast: %#v\n", tgs.process.name, tgs.process.Node.FullName, message)
|
||||
tgs.err <- nil
|
||||
return "noreply", state
|
||||
}
|
||||
func (tgs *testGenServer) HandleCall(from etf.Tuple, message etf.Term, state interface{}) (string, etf.Term, interface{}) {
|
||||
// fmt.Printf("testGenServer ({%s, %s}): HandleCall: %#v, From: %#v\n", tgs.process.name, tgs.process.Node.FullName, message, from)
|
||||
return "reply", message, state
|
||||
}
|
||||
func (tgs *testGenServer) HandleInfo(message etf.Term, state interface{}) (string, interface{}) {
|
||||
// fmt.Printf("testGenServer ({%s, %s}): HandleInfo: %#v\n", tgs.process.name, tgs.process.Node.FullName, message)
|
||||
tgs.err <- nil
|
||||
return "noreply", state
|
||||
}
|
||||
func (tgs *testGenServer) Terminate(reason string, state interface{}) {
|
||||
// fmt.Printf("testGenServer ({%s, %s}): Terminate: %#v\n", tgs.process.name, tgs.process.Node.FullName, reason)
|
||||
tgs.err <- nil
|
||||
}
|
||||
|
||||
func TestGenServer(t *testing.T) {
|
||||
fmt.Printf("\n=== Test GenServer\n")
|
||||
fmt.Printf("Starting nodes: nodeGS1@localhost, nodeGS2@localhost: ")
|
||||
node1 := CreateNode("nodeGS1@localhost", "cookies", NodeOptions{})
|
||||
node2 := CreateNode("nodeGS2@localhost", "cookies", NodeOptions{})
|
||||
if node1 == nil || node2 == nil {
|
||||
t.Fatal("can't start nodes")
|
||||
} else {
|
||||
fmt.Println("OK")
|
||||
}
|
||||
|
||||
gs1 := &testGenServer{
|
||||
err: make(chan error, 2),
|
||||
}
|
||||
gs2 := &testGenServer{
|
||||
err: make(chan error, 2),
|
||||
}
|
||||
gs3 := &testGenServer{
|
||||
err: make(chan error, 2),
|
||||
}
|
||||
|
||||
fmt.Printf(" wait for start of gs1 on %#v: ", node1.FullName)
|
||||
node1gs1, _ := node1.Spawn("gs1", ProcessOptions{}, gs1, nil)
|
||||
waitForResult(t, gs1.err)
|
||||
|
||||
fmt.Printf(" wait for start of gs2 on %#v: ", node1.FullName)
|
||||
node1gs2, _ := node1.Spawn("gs2", ProcessOptions{}, gs2, nil)
|
||||
waitForResult(t, gs2.err)
|
||||
|
||||
fmt.Printf(" wait for start of gs3 on %#v: ", node2.FullName)
|
||||
node2gs3, _ := node2.Spawn("gs3", ProcessOptions{}, gs3, nil)
|
||||
waitForResult(t, gs3.err)
|
||||
|
||||
fmt.Println("Testing GenServer process:")
|
||||
|
||||
fmt.Printf(" process.Send (by Pid) local (gs1) -> local (gs2) : ")
|
||||
node1gs1.Send(node1gs2.Self(), etf.Atom("hi"))
|
||||
waitForResult(t, gs2.err)
|
||||
|
||||
node1gs1.Cast(node1gs2.Self(), etf.Atom("hi cast"))
|
||||
fmt.Printf(" process.Cast (by Pid) local (gs1) -> local (gs2) : ")
|
||||
waitForResult(t, gs2.err)
|
||||
|
||||
fmt.Printf(" process.Call (by Pid) local (gs1) -> local (gs2): ")
|
||||
v := etf.Atom("hi call")
|
||||
if v1, err := node1gs1.Call(node1gs2.Self(), v); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
if v == v1 {
|
||||
fmt.Println("OK")
|
||||
} else {
|
||||
e := fmt.Errorf("expected: %#v , got: %#v", v, v1)
|
||||
t.Fatal(e)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf(" process.Send (by Name) local (gs1) -> local (gs2) : ")
|
||||
node1gs1.Send(etf.Atom("gs2"), etf.Atom("hi"))
|
||||
waitForResult(t, gs2.err)
|
||||
|
||||
node1gs1.Cast(etf.Atom("gs2"), etf.Atom("hi cast"))
|
||||
fmt.Printf(" process.Cast (by Name) local (gs1) -> local (gs2) : ")
|
||||
waitForResult(t, gs2.err)
|
||||
|
||||
fmt.Printf(" process.Call (by Name) local (gs1) -> local (gs2): ")
|
||||
if v1, err := node1gs1.Call(etf.Atom("gs2"), v); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
if v == v1 {
|
||||
fmt.Println("OK")
|
||||
} else {
|
||||
e := fmt.Errorf("expected: %#v , got: %#v", v, v1)
|
||||
t.Fatal(e)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf(" process.Send (by Pid) local (gs1) -> remote (gs3) : ")
|
||||
node1gs1.Send(node2gs3.Self(), etf.Atom("hi"))
|
||||
waitForResult(t, gs3.err)
|
||||
|
||||
node1gs1.Cast(node2gs3.Self(), etf.Atom("hi cast"))
|
||||
fmt.Printf(" process.Cast (by Pid) local (gs1) -> remote (gs3) : ")
|
||||
waitForResult(t, gs3.err)
|
||||
|
||||
fmt.Printf(" process.Call (by Pid) local (gs1) -> remote (gs3): ")
|
||||
if v1, err := node1gs1.Call(node2gs3.Self(), v); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
if v == v1 {
|
||||
fmt.Println("OK")
|
||||
} else {
|
||||
e := fmt.Errorf("expected: %#v , got: %#v", v, v1)
|
||||
t.Fatal(e)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf(" process.Send (by Name) local (gs1) -> remote (gs3) : ")
|
||||
processName := etf.Tuple{"gs3", node2.FullName}
|
||||
node1gs1.Send(processName, etf.Atom("hi"))
|
||||
waitForResult(t, gs3.err)
|
||||
|
||||
node1gs1.Cast(processName, etf.Atom("hi cast"))
|
||||
fmt.Printf(" process.Cast (by Name) local (gs1) -> remote (gs3) : ")
|
||||
waitForResult(t, gs3.err)
|
||||
|
||||
fmt.Printf(" process.Call (by Name) local (gs1) -> remote (gs3): ")
|
||||
if v1, err := node1gs1.Call(processName, v); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
if v == v1 {
|
||||
fmt.Println("OK")
|
||||
} else {
|
||||
e := fmt.Errorf("expected: %#v , got: %#v", v, v1)
|
||||
t.Fatal(e)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Stopping nodes: %v, %v\n", node1.FullName, node2.FullName)
|
||||
node1.Stop()
|
||||
node2.Stop()
|
||||
}
|
||||
|
||||
func waitForResult(t *testing.T, w chan error) {
|
||||
select {
|
||||
case e := <-w:
|
||||
if e == nil {
|
||||
fmt.Println("OK")
|
||||
}
|
||||
|
||||
case <-time.After(time.Second * time.Duration(1)):
|
||||
t.Fatal("result timeout")
|
||||
}
|
||||
}
|
||||
|
||||
func waitForResultWithValue(t *testing.T, w chan interface{}, value interface{}) {
|
||||
select {
|
||||
case v := <-w:
|
||||
if reflect.DeepEqual(v, value) {
|
||||
fmt.Println("OK")
|
||||
} else {
|
||||
e := fmt.Errorf("expected: %#v , got: %#v", value, v)
|
||||
t.Fatal(e)
|
||||
}
|
||||
|
||||
case <-time.After(time.Second * time.Duration(2)):
|
||||
t.Fatal("result timeout")
|
||||
}
|
||||
}
|
|
@ -1,5 +1,7 @@
|
|||
package ergonode
|
||||
|
||||
// TODO: https://github.com/erlang/otp/blob/master/lib/kernel/src/global.erl
|
||||
|
||||
import (
|
||||
"github.com/halturin/ergonode/etf"
|
||||
"github.com/halturin/ergonode/lib"
|
||||
|
@ -7,37 +9,47 @@ import (
|
|||
|
||||
type globalNameServer struct {
|
||||
GenServer
|
||||
process *Process
|
||||
}
|
||||
|
||||
func (gns *globalNameServer) Init(args ...interface{}) (state interface{}) {
|
||||
type state struct {
|
||||
}
|
||||
|
||||
// Init initializes process state using arbitrary arguments
|
||||
// Init -> state
|
||||
func (ns *globalNameServer) Init(p *Process, args ...interface{}) interface{} {
|
||||
lib.Log("GLOBAL_NAME_SERVER: Init: %#v", args)
|
||||
gns.Node.Register(etf.Atom("global_name_server"), gns.Self)
|
||||
return nil
|
||||
ns.process = p
|
||||
|
||||
return state{}
|
||||
}
|
||||
|
||||
func (gns *globalNameServer) HandleCast(message *etf.Term, state interface{}) (code int, stateout interface{}) {
|
||||
lib.Log("GLOBAL_NAME_SERVER: HandleCast: %#v", *message)
|
||||
stateout = state
|
||||
code = 0
|
||||
return
|
||||
// HandleCast -> ("noreply", state) - noreply
|
||||
// ("stop", reason) - stop with reason
|
||||
func (ns *globalNameServer) HandleCast(message etf.Term, state interface{}) (string, interface{}) {
|
||||
lib.Log("GLOBAL_NAME_SERVER: HandleCast: %#v", message)
|
||||
return "noreply", state
|
||||
}
|
||||
|
||||
func (gns *globalNameServer) HandleCall(from *etf.Tuple, message *etf.Term, state interface{}) (code int, reply *etf.Term, stateout interface{}) {
|
||||
lib.Log("GLOBAL_NAME_SERVER: HandleCall: %#v, From: %#v", *message, *from)
|
||||
stateout = state
|
||||
code = 1
|
||||
replyTerm := etf.Term(etf.Atom("reply"))
|
||||
reply = &replyTerm
|
||||
return
|
||||
// HandleCall serves incoming messages sending via gen_server:call
|
||||
// HandleCall -> ("reply", message, state) - reply
|
||||
// ("noreply", _, state) - noreply
|
||||
// ("stop", reason, _) - normal stop
|
||||
func (ns *globalNameServer) HandleCall(from etf.Tuple, message etf.Term, state interface{}) (string, etf.Term, interface{}) {
|
||||
lib.Log("GLOBAL_NAME_SERVER: HandleCall: %#v, From: %#v", message, from)
|
||||
reply := etf.Term(etf.Atom("reply"))
|
||||
return "reply", reply, state
|
||||
}
|
||||
|
||||
func (gns *globalNameServer) HandleInfo(message *etf.Term, state interface{}) (code int, stateout interface{}) {
|
||||
lib.Log("GLOBAL_NAME_SERVER: HandleInfo: %#v", *message)
|
||||
stateout = state
|
||||
code = 0
|
||||
return
|
||||
// HandleInfo serves all another incoming messages (Pid ! message)
|
||||
// HandleInfo -> ("noreply", state) - noreply
|
||||
// ("stop", reason) - normal stop
|
||||
func (ns *globalNameServer) HandleInfo(message etf.Term, state interface{}) (string, interface{}) {
|
||||
lib.Log("GLOBAL_NAME_SERVER: HandleInfo: %#v", message)
|
||||
return "noreply", state
|
||||
}
|
||||
|
||||
func (gns *globalNameServer) Terminate(reason int, state interface{}) {
|
||||
// Terminate called when process died
|
||||
func (ns *globalNameServer) Terminate(reason string, state interface{}) {
|
||||
lib.Log("GLOBAL_NAME_SERVER: Terminate: %#v", reason)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,578 @@
|
|||
package ergonode
|
||||
|
||||
// http://erlang.org/doc/reference_manual/processes.html
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/halturin/ergonode/etf"
|
||||
"github.com/halturin/ergonode/lib"
|
||||
)
|
||||
|
||||
type monitorProcessRequest struct {
|
||||
process etf.Pid
|
||||
by etf.Pid
|
||||
ref etf.Ref
|
||||
}
|
||||
|
||||
type monitorNodeRequest struct {
|
||||
node string
|
||||
by etf.Pid
|
||||
ref etf.Ref
|
||||
}
|
||||
|
||||
type processTerminatedRequest struct {
|
||||
process etf.Pid
|
||||
name etf.Atom
|
||||
reason string
|
||||
}
|
||||
|
||||
type monitorChannels struct {
|
||||
monitorProcess chan monitorProcessRequest
|
||||
demonitorProcess chan monitorProcessRequest
|
||||
link chan linkProcessRequest
|
||||
unlink chan linkProcessRequest
|
||||
node chan monitorNodeRequest
|
||||
demonitorName chan monitorNodeRequest
|
||||
|
||||
request chan Request
|
||||
|
||||
nodeDown chan string
|
||||
processTerminated chan processTerminatedRequest
|
||||
}
|
||||
|
||||
type monitorItem struct {
|
||||
pid etf.Pid
|
||||
ref etf.Ref
|
||||
refs string
|
||||
}
|
||||
|
||||
type linkProcessRequest struct {
|
||||
pidA etf.Pid
|
||||
pidB etf.Pid
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
name string
|
||||
pid etf.Pid
|
||||
reply chan []etf.Pid
|
||||
}
|
||||
|
||||
type monitor struct {
|
||||
processes map[etf.Pid][]monitorItem
|
||||
links map[etf.Pid][]etf.Pid
|
||||
nodes map[string][]monitorItem
|
||||
ref2pid map[string]etf.Pid
|
||||
ref2node map[string]string
|
||||
|
||||
channels monitorChannels
|
||||
|
||||
node *Node
|
||||
}
|
||||
|
||||
func createMonitor(node *Node) *monitor {
|
||||
m := &monitor{
|
||||
processes: make(map[etf.Pid][]monitorItem),
|
||||
links: make(map[etf.Pid][]etf.Pid),
|
||||
nodes: make(map[string][]monitorItem),
|
||||
|
||||
ref2pid: make(map[string]etf.Pid),
|
||||
ref2node: make(map[string]string),
|
||||
|
||||
channels: monitorChannels{
|
||||
monitorProcess: make(chan monitorProcessRequest, 10),
|
||||
demonitorProcess: make(chan monitorProcessRequest, 10),
|
||||
link: make(chan linkProcessRequest, 10),
|
||||
unlink: make(chan linkProcessRequest, 10),
|
||||
node: make(chan monitorNodeRequest, 10),
|
||||
demonitorName: make(chan monitorNodeRequest, 10),
|
||||
|
||||
request: make(chan Request),
|
||||
|
||||
nodeDown: make(chan string, 10),
|
||||
processTerminated: make(chan processTerminatedRequest, 10),
|
||||
},
|
||||
node: node,
|
||||
}
|
||||
|
||||
go m.run()
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *monitor) run() {
|
||||
for {
|
||||
select {
|
||||
case p := <-m.channels.monitorProcess:
|
||||
lib.Log("[%s] MONITOR process: %v => %v", m.node.FullName, p.by, p.process)
|
||||
// http://erlang.org/doc/reference_manual/processes.html#monitors
|
||||
// FIXME: If Pid does not exist, the 'DOWN' message should be
|
||||
// send immediately with Reason set to noproc.
|
||||
l := m.processes[p.process]
|
||||
key := ref2key(p.ref)
|
||||
item := monitorItem{
|
||||
pid: p.by,
|
||||
ref: p.ref,
|
||||
refs: key,
|
||||
}
|
||||
m.processes[p.process] = append(l, item)
|
||||
m.ref2pid[key] = p.process
|
||||
|
||||
if !isFakePid(p.process) && string(p.process.Node) != m.node.FullName { // request monitor remote process
|
||||
message := etf.Tuple{distProtoMONITOR, p.by, p.process, p.ref}
|
||||
m.node.registrar.routeRaw(p.process.Node, message)
|
||||
}
|
||||
|
||||
case dp := <-m.channels.demonitorProcess:
|
||||
key := ref2key(dp.ref)
|
||||
if pid, ok := m.ref2pid[key]; ok {
|
||||
dp.process = pid
|
||||
} else {
|
||||
// unknown monitor reference
|
||||
continue
|
||||
}
|
||||
|
||||
if !isFakePid(dp.process) && string(dp.process.Node) != m.node.FullName { // request demonitor remote process
|
||||
message := etf.Tuple{distProtoDEMONITOR, dp.by, dp.process, dp.ref}
|
||||
m.node.registrar.routeRaw(dp.process.Node, message)
|
||||
}
|
||||
|
||||
l := m.processes[dp.process]
|
||||
|
||||
// remove PID from monitoring processes list
|
||||
for i := range l {
|
||||
if l[i].refs == key {
|
||||
l[i] = l[0]
|
||||
l = l[1:]
|
||||
delete(m.ref2pid, key)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(l) == 0 {
|
||||
delete(m.processes, dp.process)
|
||||
} else {
|
||||
m.processes[dp.process] = l
|
||||
}
|
||||
|
||||
case l := <-m.channels.link:
|
||||
lib.Log("[%s] LINK process: %v => %v", m.node.FullName, l.pidA, l.pidB)
|
||||
|
||||
// http://erlang.org/doc/reference_manual/processes.html#links
|
||||
// Links are bidirectional and there can only be one link between
|
||||
// two processes. Repeated calls to link(Pid) have no effect.
|
||||
|
||||
var linksA, linksB []etf.Pid
|
||||
|
||||
// remote makes link to local
|
||||
if l.pidA.Node != etf.Atom(m.node.FullName) {
|
||||
goto doneAl
|
||||
}
|
||||
|
||||
linksA = m.links[l.pidA]
|
||||
for i := range linksA {
|
||||
if linksA[i] == l.pidB {
|
||||
goto doneAl
|
||||
}
|
||||
}
|
||||
|
||||
linksA = append(linksA, l.pidB)
|
||||
m.links[l.pidA] = linksA
|
||||
|
||||
doneAl:
|
||||
// local makes link to remote
|
||||
if l.pidB.Node != etf.Atom(m.node.FullName) {
|
||||
message := etf.Tuple{distProtoLINK, l.pidA, l.pidB}
|
||||
m.node.registrar.routeRaw(l.pidB.Node, message)
|
||||
|
||||
// goto doneBl
|
||||
// we do not jump to doneBl in order to be able to handle
|
||||
// 'nodedown' event and notify that kind of links
|
||||
// with 'EXIT' messages and 'noconnection' as a reason
|
||||
}
|
||||
|
||||
linksB = m.links[l.pidB]
|
||||
for i := range linksB {
|
||||
if linksB[i] == l.pidA {
|
||||
goto doneBl
|
||||
}
|
||||
}
|
||||
|
||||
linksB = append(linksB, l.pidA)
|
||||
m.links[l.pidB] = linksB
|
||||
|
||||
doneBl:
|
||||
continue
|
||||
|
||||
case ul := <-m.channels.unlink:
|
||||
if ul.pidB.Node != etf.Atom(m.node.FullName) {
|
||||
message := etf.Tuple{distProtoUNLINK, ul.pidA, ul.pidB}
|
||||
m.node.registrar.routeRaw(ul.pidB.Node, message)
|
||||
}
|
||||
|
||||
linksA := m.links[ul.pidA]
|
||||
for i := range linksA {
|
||||
if linksA[i] == ul.pidB {
|
||||
linksA[i] = linksA[0]
|
||||
linksA = linksA[1:]
|
||||
m.links[ul.pidA] = linksA
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
linksB := m.links[ul.pidB]
|
||||
for i := range linksB {
|
||||
if linksB[i] == ul.pidA {
|
||||
linksB[i] = linksB[0]
|
||||
linksB = linksB[1:]
|
||||
m.links[ul.pidB] = linksB
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
case n := <-m.channels.node:
|
||||
lib.Log("[%s] MONITOR NODE : %v => %s", m.node.FullName, n.by, n.node)
|
||||
|
||||
l := m.nodes[n.node]
|
||||
key := ref2key(n.ref)
|
||||
item := monitorItem{
|
||||
pid: n.by,
|
||||
ref: n.ref,
|
||||
refs: key,
|
||||
}
|
||||
m.nodes[n.node] = append(l, item)
|
||||
m.ref2node[key] = n.node
|
||||
|
||||
case dn := <-m.channels.demonitorName:
|
||||
key := ref2key(dn.ref)
|
||||
if name, ok := m.ref2node[key]; ok {
|
||||
dn.node = name
|
||||
} else {
|
||||
// unknown monitor reference
|
||||
continue
|
||||
}
|
||||
|
||||
l := m.nodes[dn.node]
|
||||
|
||||
// remove PID from monitoring processes list
|
||||
for i := range l {
|
||||
if l[i].pid == dn.by && l[i].refs == key {
|
||||
l[i] = l[0]
|
||||
l = l[1:]
|
||||
delete(m.ref2pid, key)
|
||||
break
|
||||
}
|
||||
}
|
||||
m.nodes[dn.node] = l
|
||||
|
||||
case nd := <-m.channels.nodeDown:
|
||||
lib.Log("[%s] MONITOR NODE down: %v", m.node.FullName, nd)
|
||||
|
||||
if pids, ok := m.nodes[nd]; ok {
|
||||
for i := range pids {
|
||||
lib.Log("[%s] MONITOR node down: %v. send notify to: %v", m.node.FullName, nd, pids[i])
|
||||
m.notifyNodeDown(pids[i].pid, nd)
|
||||
delete(m.nodes, nd)
|
||||
}
|
||||
}
|
||||
|
||||
// notify process monitors
|
||||
for pid, ps := range m.processes {
|
||||
if pid.Node == etf.Atom(nd) {
|
||||
for i := range ps {
|
||||
m.notifyProcessTerminated(ps[i].ref, ps[i].pid, pid, "noconnection")
|
||||
}
|
||||
delete(m.processes, pid)
|
||||
}
|
||||
}
|
||||
|
||||
// notify linked processes
|
||||
for link, pids := range m.links {
|
||||
if link.Node == etf.Atom(nd) {
|
||||
for i := range pids {
|
||||
m.notifyProcessExit(pids[i], link, "noconnection")
|
||||
}
|
||||
delete(m.links, link)
|
||||
}
|
||||
}
|
||||
|
||||
case pt := <-m.channels.processTerminated:
|
||||
lib.Log("[%s] MONITOR process terminated: %v", m.node.FullName, pt)
|
||||
|
||||
if pids, ok := m.processes[pt.process]; ok {
|
||||
for i := range pids {
|
||||
lib.Log("[%s] MONITOR process terminated: %v send notify to: %v", m.node.FullName, pt, pids[i].pid)
|
||||
m.notifyProcessTerminated(pids[i].ref, pids[i].pid, pt.process, pt.reason)
|
||||
}
|
||||
delete(m.processes, pt.process)
|
||||
}
|
||||
|
||||
if pidLinks, ok := m.links[pt.process]; ok {
|
||||
for i := range pidLinks {
|
||||
lib.Log("[%s] LINK process exited: %v send notify to: %v", m.node.FullName, pt, pidLinks[i])
|
||||
m.notifyProcessExit(pidLinks[i], pt.process, pt.reason)
|
||||
|
||||
// remove A link
|
||||
if pids, ok := m.links[pidLinks[i]]; ok {
|
||||
for k := range pids {
|
||||
if pids[k] == pt.process {
|
||||
pids[k] = pids[0]
|
||||
pids = pids[1:]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(pids) > 0 {
|
||||
m.links[pidLinks[i]] = pids
|
||||
} else {
|
||||
delete(m.links, pidLinks[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
// remove link
|
||||
delete(m.links, pt.process)
|
||||
}
|
||||
|
||||
// handling termination monitors that have setted up by name.
|
||||
if pt.name != "" {
|
||||
fakePid := fakeMonitorPidFromName(string(pt.name))
|
||||
m.ProcessTerminated(fakePid, "", pt.reason)
|
||||
}
|
||||
|
||||
case r := <-m.channels.request:
|
||||
r.reply <- m.handleRequest(r.name, r.pid)
|
||||
case <-m.node.context.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *monitor) MonitorProcess(by etf.Pid, process interface{}) etf.Ref {
|
||||
ref := m.node.MakeRef()
|
||||
m.MonitorProcessWithRef(by, process, ref)
|
||||
return ref
|
||||
}
|
||||
|
||||
func (m *monitor) MonitorProcessWithRef(by etf.Pid, process interface{}, ref etf.Ref) {
|
||||
switch t := process.(type) {
|
||||
case etf.Atom: // requesting monitor of local process
|
||||
fakePid := fakeMonitorPidFromName(string(t))
|
||||
p := monitorProcessRequest{
|
||||
process: fakePid,
|
||||
by: by,
|
||||
ref: ref,
|
||||
}
|
||||
m.channels.monitorProcess <- p
|
||||
|
||||
case etf.Tuple:
|
||||
// requesting monitor of remote process by the local one using registered process name
|
||||
nodeName := t.Element(2).(etf.Atom)
|
||||
if nodeName != etf.Atom(m.node.FullName) {
|
||||
message := etf.Tuple{distProtoMONITOR, by, t, ref}
|
||||
m.node.registrar.routeRaw(nodeName, message)
|
||||
// FIXME:
|
||||
// make fake pid with remote nodename and keep it
|
||||
// in order to handle 'nodedown' event
|
||||
// fakePid := fakeMonitorPidFromName(string(nodeName))
|
||||
// p := monitorProcessRequest{
|
||||
// process: fakePid,
|
||||
// by: by,
|
||||
// ref: ref,
|
||||
// }
|
||||
// m.channels.process <- p
|
||||
// return
|
||||
}
|
||||
|
||||
// registering monitor of local process
|
||||
local := t.Element(1).(etf.Atom)
|
||||
message := etf.Tuple{distProtoMONITOR, by, local, ref}
|
||||
m.node.registrar.route(by, local, message)
|
||||
|
||||
case etf.Pid:
|
||||
p := monitorProcessRequest{
|
||||
process: t,
|
||||
by: by,
|
||||
ref: ref,
|
||||
}
|
||||
m.channels.monitorProcess <- p
|
||||
}
|
||||
}
|
||||
|
||||
func (m *monitor) DemonitorProcess(ref etf.Ref) {
|
||||
p := monitorProcessRequest{
|
||||
ref: ref,
|
||||
}
|
||||
m.channels.demonitorProcess <- p
|
||||
}
|
||||
|
||||
func (m *monitor) Link(pidA, pidB etf.Pid) {
|
||||
p := linkProcessRequest{
|
||||
pidA: pidA,
|
||||
pidB: pidB,
|
||||
}
|
||||
m.channels.link <- p
|
||||
}
|
||||
|
||||
func (m *monitor) Unink(pidA, pidB etf.Pid) {
|
||||
p := linkProcessRequest{
|
||||
pidA: pidA,
|
||||
pidB: pidB,
|
||||
}
|
||||
m.channels.unlink <- p
|
||||
}
|
||||
|
||||
func (m *monitor) MonitorNode(by etf.Pid, node string) etf.Ref {
|
||||
ref := m.node.MakeRef()
|
||||
n := monitorNodeRequest{
|
||||
node: node,
|
||||
by: by,
|
||||
ref: ref,
|
||||
}
|
||||
|
||||
m.channels.node <- n
|
||||
return ref
|
||||
}
|
||||
|
||||
func (m *monitor) DemonitorNode(ref etf.Ref) {
|
||||
n := monitorNodeRequest{
|
||||
ref: ref,
|
||||
}
|
||||
|
||||
m.channels.node <- n
|
||||
}
|
||||
|
||||
func (m *monitor) NodeDown(node string) {
|
||||
m.channels.nodeDown <- node
|
||||
}
|
||||
|
||||
func (m *monitor) ProcessTerminated(process etf.Pid, name etf.Atom, reason string) {
|
||||
p := processTerminatedRequest{
|
||||
process: process,
|
||||
name: name,
|
||||
reason: reason,
|
||||
}
|
||||
m.channels.processTerminated <- p
|
||||
}
|
||||
|
||||
func (m *monitor) GetLinks(process etf.Pid) []etf.Pid {
|
||||
reply := make(chan []etf.Pid)
|
||||
r := Request{
|
||||
name: "getLinks",
|
||||
pid: process,
|
||||
reply: reply,
|
||||
}
|
||||
m.channels.request <- r
|
||||
|
||||
return <-reply
|
||||
}
|
||||
|
||||
func (m *monitor) GetMonitors(process etf.Pid) []etf.Pid {
|
||||
reply := make(chan []etf.Pid)
|
||||
r := Request{
|
||||
name: "getMonitors",
|
||||
pid: process,
|
||||
reply: reply,
|
||||
}
|
||||
m.channels.request <- r
|
||||
return <-reply
|
||||
}
|
||||
|
||||
func (m *monitor) GetMonitoredBy(process etf.Pid) []etf.Pid {
|
||||
reply := make(chan []etf.Pid)
|
||||
r := Request{
|
||||
name: "getMonitoredBy",
|
||||
pid: process,
|
||||
reply: reply,
|
||||
}
|
||||
m.channels.request <- r
|
||||
return <-reply
|
||||
}
|
||||
|
||||
func (m *monitor) notifyNodeDown(to etf.Pid, node string) {
|
||||
message := etf.Term(etf.Tuple{etf.Atom("nodedown"), node})
|
||||
m.node.registrar.route(etf.Pid{}, to, message)
|
||||
}
|
||||
|
||||
func (m *monitor) notifyProcessTerminated(ref etf.Ref, to etf.Pid, terminated etf.Pid, reason string) {
|
||||
|
||||
// for remote {21, FromProc, ToPid, Ref, Reason}, where FromProc = monitored process
|
||||
if to.Node != etf.Atom(m.node.FullName) {
|
||||
message := etf.Tuple{distProtoMONITOR_EXIT, terminated, to, ref, etf.Atom(reason)}
|
||||
m.node.registrar.routeRaw(to.Node, message)
|
||||
return
|
||||
}
|
||||
|
||||
// {'DOWN', Ref, process, Pid, Reason}
|
||||
// {'DOWN',#Ref<0.0.13893633.237772>,process,<26194.4.1>,reason}
|
||||
fakePid := fakeMonitorPidFromName(string(terminated.Node))
|
||||
if terminated == fakePid {
|
||||
p := etf.Tuple{terminated.Node, m.node.FullName}
|
||||
message := etf.Term(etf.Tuple{etf.Atom("DOWN"), ref, etf.Atom("process"), p, etf.Atom(reason)})
|
||||
m.node.registrar.route(terminated, to, message)
|
||||
return
|
||||
}
|
||||
|
||||
message := etf.Term(etf.Tuple{etf.Atom("DOWN"), ref, etf.Atom("process"), terminated, etf.Atom(reason)})
|
||||
m.node.registrar.route(terminated, to, message)
|
||||
}
|
||||
|
||||
func (m *monitor) notifyProcessExit(to etf.Pid, terminated etf.Pid, reason string) {
|
||||
// for remote: {3, FromPid, ToPid, Reason}
|
||||
if to.Node != etf.Atom(m.node.FullName) {
|
||||
message := etf.Tuple{distProtoEXIT, terminated, to, etf.Atom(reason)}
|
||||
m.node.registrar.routeRaw(to.Node, message)
|
||||
return
|
||||
}
|
||||
message := etf.Term(etf.Tuple{etf.Atom("EXIT"), terminated, etf.Atom(reason)})
|
||||
m.node.registrar.route(terminated, to, message)
|
||||
}
|
||||
|
||||
func (m *monitor) handleRequest(name string, pid etf.Pid) []etf.Pid {
|
||||
switch name {
|
||||
case "getLinks":
|
||||
if links, ok := m.links[pid]; ok {
|
||||
return links
|
||||
}
|
||||
case "getMonitors":
|
||||
monitors := []etf.Pid{}
|
||||
for p, by := range m.processes {
|
||||
for b := range by {
|
||||
if by[b].pid == pid {
|
||||
monitors = append(monitors, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
return monitors
|
||||
|
||||
case "getMonitoredBy":
|
||||
if m, ok := m.processes[pid]; ok {
|
||||
monitors := []etf.Pid{}
|
||||
for i := range m {
|
||||
monitors = append(monitors, m[i].pid)
|
||||
}
|
||||
return monitors
|
||||
}
|
||||
|
||||
}
|
||||
return []etf.Pid{}
|
||||
}
|
||||
|
||||
func ref2key(ref etf.Ref) string {
|
||||
return fmt.Sprintf("%v", ref)
|
||||
}
|
||||
|
||||
func fakeMonitorPidFromName(name string) etf.Pid {
|
||||
fakePid := etf.Pid{}
|
||||
fakePid.Node = etf.Atom(name) // registered process name
|
||||
fakePid.Id = 4294967295 // 2^32 - 1
|
||||
fakePid.Serial = 4294967295 // 2^32 - 1
|
||||
fakePid.Creation = 255 // 2^8 - 1
|
||||
return fakePid
|
||||
}
|
||||
|
||||
func isFakePid(pid etf.Pid) bool {
|
||||
if pid.Id == 4294967295 && pid.Serial == 4294967295 && pid.Creation == 255 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -0,0 +1,233 @@
|
|||
package ergonode
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/halturin/ergonode/etf"
|
||||
)
|
||||
|
||||
// This test is checking the cases below:
|
||||
//
|
||||
// initiation:
|
||||
// - starting 2 nodes (node1, node2)
|
||||
// - starting 5 GenServers
|
||||
// * 3 on node1 - gs1, gs2, gs3
|
||||
// * 2 on node2 - gs4, gs5
|
||||
//
|
||||
// checking:
|
||||
// - monitor/link processes by Pid (and monitor node test)
|
||||
// * node1.gs1 (by Pid) monitor -> gs2
|
||||
// * node1.gs2 (by Pid) link -> gs3
|
||||
// * call gs3.Stop (gs2 should receive 'exit' message)
|
||||
// * call gs2.Stop (gs1 should receive 'down' message)
|
||||
// ... testing remote processes ...
|
||||
// * node1.gs1 (by Pid) monitor -> node2 gs4
|
||||
// * node1.gs1 (by Pid) link -> node2 gs5
|
||||
// * call gs5.Stop (node1.gs1 should receive 'exit')
|
||||
// * call gs4.Stop (node1.gs1 should receive 'down')
|
||||
// ... start gs2 on node1 and gs4,gs5 on node2 again
|
||||
// * node1.gs1 (by Pid) monitor -> node2 gs4
|
||||
// * node1.gs1 (by Pid) link -> node2.gs5
|
||||
// ... add monitor node ...
|
||||
// * node1.gs2 monitor node -> node2
|
||||
// ...
|
||||
// * call node2.Stop
|
||||
// node1.gs1 should receive 'down' message for gs4 with 'noconnection' as reason
|
||||
// node1.gs1 should receive 'exit' message for gs5 with 'noconnection' as reason
|
||||
// node1.gs2 should receive 'nodedown' message
|
||||
//
|
||||
// - monitor/link processes by Name
|
||||
// * node1.gs1 (by Name) monitor -> gs2
|
||||
// * node1.gs2 (by Name) link -> gs3
|
||||
// * call gs3.Stop (gs2 should receive 'exit' message)
|
||||
// * call gs2.Stop (gs1 should receive 'down' message)
|
||||
// ... testing remote processes ...
|
||||
// * node1.gs1 (by Name) monitor -> node2 gs4
|
||||
// * node1.gs1 (by Name) link -> node2 gs5
|
||||
// * call gs5.Stop (node1.gs1 should receive 'exit')
|
||||
// * call gs4.Stop (node1.gs1 should receive 'down')
|
||||
// ... start gs3 on node1 and gs4,gs5 on node2 again
|
||||
// * node1.gs1 (by Name) monitor -> node2 gs4
|
||||
// * node1.gs1 (by Name) link -> node2.gs5
|
||||
// * node1.gs2 monitor node -> node2
|
||||
// * call node2.Stop
|
||||
// node1.gs1 should receive 'down' message for gs4 with 'noconnection' as reason
|
||||
// node1.gs1 should receive 'exit' message for gs5 with 'noconnection' as reason
|
||||
|
||||
type testMonitorGenServer struct {
|
||||
GenServer
|
||||
process *Process
|
||||
v chan interface{}
|
||||
}
|
||||
|
||||
func (tgs *testMonitorGenServer) Init(p *Process, args ...interface{}) (state interface{}) {
|
||||
tgs.v <- p.Self()
|
||||
tgs.process = p
|
||||
return nil
|
||||
}
|
||||
func (tgs *testMonitorGenServer) HandleCast(message etf.Term, state interface{}) (string, interface{}) {
|
||||
// fmt.Printf("testMonitorGenServer ({%s, %s}): HandleCast: %#v\n", tgs.process.name, tgs.process.Node.FullName, message)
|
||||
tgs.v <- message
|
||||
return "noreply", state
|
||||
}
|
||||
func (tgs *testMonitorGenServer) HandleCall(from etf.Tuple, message etf.Term, state interface{}) (string, etf.Term, interface{}) {
|
||||
// fmt.Printf("testMonitorGenServer ({%s, %s}): HandleCall: %#v, From: %#v\n", tgs.process.name, tgs.process.Node.FullName, message, from)
|
||||
return "reply", message, state
|
||||
}
|
||||
func (tgs *testMonitorGenServer) HandleInfo(message etf.Term, state interface{}) (string, interface{}) {
|
||||
// fmt.Printf("testMonitorGenServer ({%s, %s}): HandleInfo: %#v\n", tgs.process.name, tgs.process.Node.FullName, message)
|
||||
tgs.v <- message
|
||||
return "noreply", state
|
||||
}
|
||||
func (tgs *testMonitorGenServer) Terminate(reason string, state interface{}) {
|
||||
// fmt.Printf("\ntestMonitorGenServer ({%s, %s}): Terminate: %#v\n", tgs.process.name, tgs.process.Node.FullName, reason)
|
||||
}
|
||||
|
||||
func TestMonitor(t *testing.T) {
|
||||
fmt.Printf("\n=== Test Monitor/Link\n")
|
||||
fmt.Printf("Starting nodes: nodeM1@localhost, nodeM2@localhost: ")
|
||||
node1 := CreateNode("nodeM1@localhost", "cookies", NodeOptions{})
|
||||
node2 := CreateNode("nodeM2@localhost", "cookies", NodeOptions{})
|
||||
if node1 == nil || node2 == nil {
|
||||
t.Fatal("can't start nodes")
|
||||
} else {
|
||||
fmt.Println("OK")
|
||||
}
|
||||
|
||||
gs1 := &testMonitorGenServer{
|
||||
v: make(chan interface{}, 2),
|
||||
}
|
||||
gs2 := &testMonitorGenServer{
|
||||
v: make(chan interface{}, 2),
|
||||
}
|
||||
gs3 := &testMonitorGenServer{
|
||||
v: make(chan interface{}, 2),
|
||||
}
|
||||
gs4 := &testMonitorGenServer{
|
||||
v: make(chan interface{}, 2),
|
||||
}
|
||||
gs5 := &testMonitorGenServer{
|
||||
v: make(chan interface{}, 2),
|
||||
}
|
||||
|
||||
// starting gen servers
|
||||
fmt.Printf(" wait for start of gs1 on %#v: ", node1.FullName)
|
||||
node1gs1, _ := node1.Spawn("gs1", ProcessOptions{}, gs1, nil)
|
||||
waitForResultWithValue(t, gs1.v, node1gs1.Self())
|
||||
|
||||
fmt.Printf(" wait for start of gs2 on %#v: ", node1.FullName)
|
||||
node1gs2, _ := node1.Spawn("gs2", ProcessOptions{}, gs2, nil)
|
||||
waitForResultWithValue(t, gs2.v, node1gs2.Self())
|
||||
|
||||
fmt.Printf(" wait for start of gs3 on %#v: ", node1.FullName)
|
||||
node1gs3, _ := node1.Spawn("gs3", ProcessOptions{}, gs3, nil)
|
||||
waitForResultWithValue(t, gs3.v, node1gs3.Self())
|
||||
|
||||
fmt.Printf(" wait for start of gs4 on %#v: ", node2.FullName)
|
||||
node2gs4, _ := node2.Spawn("gs4", ProcessOptions{}, gs4, nil)
|
||||
waitForResultWithValue(t, gs4.v, node2gs4.Self())
|
||||
|
||||
fmt.Printf(" wait for start of gs5 on %#v: ", node2.FullName)
|
||||
node2gs5, _ := node2.Spawn("gs5", ProcessOptions{}, gs5, nil)
|
||||
waitForResultWithValue(t, gs5.v, node2gs5.Self())
|
||||
|
||||
// start testing
|
||||
fmt.Println("Testing Monitor/Link process by Pid:")
|
||||
|
||||
fmt.Println("... gs1 -local monitor-> gs2")
|
||||
ref := node1gs1.MonitorProcess(node1gs2.Self())
|
||||
fmt.Println("... stop gs2")
|
||||
node1gs2.Exit(etf.Pid{}, "normal")
|
||||
fmt.Printf(" wait for 'DOWN' message of gs2 by gs1: ")
|
||||
waitFor := etf.Tuple{etf.Atom("DOWN"), ref, etf.Atom("process"), node1gs2.Self(), etf.Atom("normal")}
|
||||
waitForResultWithValue(t, gs1.v, waitFor)
|
||||
|
||||
fmt.Println("... gs1 -local link-> gs3")
|
||||
node1gs1.Link(node1gs3.Self())
|
||||
fmt.Println("... stop gs3")
|
||||
node1gs3.Exit(etf.Pid{}, "normal")
|
||||
fmt.Printf(" wait for 'EXIT' message of gs3 by gs1: ")
|
||||
waitFor = etf.Tuple{etf.Atom("EXIT"), node1gs3.Self(), etf.Atom("normal")}
|
||||
waitForResultWithValue(t, gs1.v, waitFor)
|
||||
|
||||
fmt.Println("... restarting gs2:")
|
||||
|
||||
fmt.Printf(" wait for start of gs2 on %#v: ", node1.FullName)
|
||||
node1gs2, _ = node1.Spawn("gs2", ProcessOptions{}, gs2, nil)
|
||||
waitForResultWithValue(t, gs2.v, node1gs2.Self())
|
||||
|
||||
fmt.Println("... gs2 -local link-> gs1")
|
||||
node1gs2.Link(node1gs1.Self())
|
||||
fmt.Println("... stop gs2")
|
||||
node1gs2.Exit(etf.Pid{}, "normal")
|
||||
fmt.Printf(" wait for 'EXIT' message of gs2 by gs1: ")
|
||||
waitFor = etf.Tuple{etf.Atom("EXIT"), node1gs2.Self(), etf.Atom("normal")}
|
||||
waitForResultWithValue(t, gs1.v, waitFor)
|
||||
|
||||
fmt.Println("... gs1 -remote monitor-> gs4")
|
||||
ref = node1gs1.MonitorProcess(node2gs4.Self())
|
||||
// since MonitorProcess is asynchronous for remote calls
|
||||
// we have to put some sleep to make sure the stop calling
|
||||
// will handle after monitor
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
fmt.Println("... stop gs4")
|
||||
node2gs4.Exit(etf.Pid{}, "normal")
|
||||
fmt.Printf(" wait for 'DOWN' message of node2.gs4 by gs1: ")
|
||||
waitFor = etf.Tuple{etf.Atom("DOWN"), ref, etf.Atom("process"), node2gs4.Self(), etf.Atom("normal")}
|
||||
waitForResultWithValue(t, gs1.v, waitFor)
|
||||
|
||||
fmt.Println("... gs1 -remote link-> gs5")
|
||||
node1gs1.Link(node2gs5.Self())
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
fmt.Println("... stop gs5")
|
||||
node2gs5.Exit(etf.Pid{}, "normal")
|
||||
fmt.Printf(" wait for 'EXIT' message of node2.gs5 by gs1: ")
|
||||
waitFor = etf.Tuple{etf.Atom("EXIT"), node2gs5.Self(), etf.Atom("normal")}
|
||||
waitForResultWithValue(t, gs1.v, waitFor)
|
||||
|
||||
// starting gs2,gs3,gs4,gs5
|
||||
fmt.Println("... restarting gs2, gs3, gs4, gs5:")
|
||||
|
||||
fmt.Printf(" wait for start of gs2 on %#v: ", node1.FullName)
|
||||
node1gs2, _ = node1.Spawn("gs2", ProcessOptions{}, gs2, nil)
|
||||
waitForResultWithValue(t, gs2.v, node1gs2.Self())
|
||||
|
||||
fmt.Printf(" wait for start of gs3 on %#v: ", node1.FullName)
|
||||
node1gs3, _ = node1.Spawn("gs3", ProcessOptions{}, gs3, nil)
|
||||
waitForResultWithValue(t, gs3.v, node1gs3.Self())
|
||||
|
||||
fmt.Printf(" wait for start of gs4 on %#v: ", node2.FullName)
|
||||
node2gs4, _ = node2.Spawn("gs4", ProcessOptions{}, gs4, nil)
|
||||
waitForResultWithValue(t, gs4.v, node2gs4.Self())
|
||||
|
||||
fmt.Printf(" wait for start of gs5 on %#v: ", node2.FullName)
|
||||
node2gs5, _ = node2.Spawn("gs5", ProcessOptions{}, gs5, nil)
|
||||
waitForResultWithValue(t, gs5.v, node2gs5.Self())
|
||||
|
||||
fmt.Println("... gs1 -remote monitor-> gs4")
|
||||
fmt.Println("... gs3 -remote link-> gs5")
|
||||
fmt.Println("... gs2 -monitor node-> node2")
|
||||
|
||||
ref = node1gs1.MonitorProcess(node2gs4.Self())
|
||||
node1gs3.Link(node2gs5.Self())
|
||||
node1gs2.MonitorNode(node2.FullName)
|
||||
|
||||
fmt.Println("... stop node2")
|
||||
node2.Stop()
|
||||
|
||||
waitFor = etf.Tuple{etf.Atom("DOWN"), ref, etf.Atom("process"), node2gs4.Self(), etf.Atom("noconnection")}
|
||||
fmt.Printf(" wait for 'DOWN' with reason 'noconnection' by gs1: ")
|
||||
waitForResultWithValue(t, gs1.v, waitFor)
|
||||
waitFor = etf.Tuple{etf.Atom("EXIT"), node2gs5.Self(), etf.Atom("noconnection")}
|
||||
fmt.Printf(" wait for 'EXIT' with reason 'noconnection' by gs3: ")
|
||||
waitForResultWithValue(t, gs3.v, waitFor)
|
||||
waitFor = etf.Tuple{etf.Atom("nodedown"), node2.FullName}
|
||||
fmt.Printf(" wait for 'nodedown' by gs2: ")
|
||||
waitForResultWithValue(t, gs2.v, waitFor)
|
||||
|
||||
fmt.Printf("Stopping nodes: %v, %v\n", node1.FullName, node2.FullName)
|
||||
node1.Stop()
|
||||
node2.Stop()
|
||||
}
|
22
nconst.go
22
nconst.go
|
@ -1,22 +0,0 @@
|
|||
package ergonode
|
||||
|
||||
// Distributed operations codes (http://www.erlang.org/doc/apps/erts/erl_dist_protocol.html)
|
||||
const (
|
||||
LINK = 1
|
||||
SEND = 2
|
||||
EXIT = 3
|
||||
UNLINK = 4
|
||||
NODE_LINK = 5
|
||||
REG_SEND = 6
|
||||
GROUP_LEADER = 7
|
||||
EXIT2 = 8
|
||||
SEND_TT = 12
|
||||
EXIT_TT = 13
|
||||
REG_SEND_TT = 16
|
||||
EXIT2_TT = 18
|
||||
MONITOR = 19
|
||||
DEMONITOR = 20
|
||||
MONITOR_EXIT = 21
|
||||
SEND_SENDER = 22
|
||||
SEND_SENDER_TT = 23
|
||||
)
|
230
net_kernel.go
230
net_kernel.go
|
@ -1,54 +1,242 @@
|
|||
package ergonode
|
||||
|
||||
// https://github.com/erlang/otp/blob/master/lib/kernel/src/net_kernel.erl
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/halturin/ergonode/etf"
|
||||
"github.com/halturin/ergonode/lib"
|
||||
)
|
||||
|
||||
type netKernel struct {
|
||||
GenServer
|
||||
type netKernelSup struct {
|
||||
Supervisor
|
||||
}
|
||||
|
||||
func (nk *netKernel) Init(args ...interface{}) (state interface{}) {
|
||||
func (nks *netKernelSup) Init(args ...interface{}) SupervisorSpec {
|
||||
return SupervisorSpec{
|
||||
Children: []SupervisorChildSpec{
|
||||
SupervisorChildSpec{
|
||||
Name: "net_kernel",
|
||||
Child: &netKernel{},
|
||||
Restart: SupervisorChildRestartPermanent,
|
||||
},
|
||||
SupervisorChildSpec{
|
||||
Name: "global_name_server",
|
||||
Child: &globalNameServer{},
|
||||
Restart: SupervisorChildRestartPermanent,
|
||||
},
|
||||
SupervisorChildSpec{
|
||||
Name: "rex",
|
||||
Child: &rex{},
|
||||
Restart: SupervisorChildRestartPermanent,
|
||||
},
|
||||
SupervisorChildSpec{
|
||||
Name: "observer_backend",
|
||||
Child: &observerBackend{},
|
||||
Restart: SupervisorChildRestartPermanent,
|
||||
},
|
||||
SupervisorChildSpec{
|
||||
Name: "erlang",
|
||||
Child: &erlang{},
|
||||
Restart: SupervisorChildRestartPermanent,
|
||||
},
|
||||
},
|
||||
Strategy: SupervisorStrategy{
|
||||
Type: SupervisorStrategyOneForOne,
|
||||
Intensity: 10,
|
||||
Period: 5,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type netKernel struct {
|
||||
GenServer
|
||||
process *Process
|
||||
routinesCtx map[etf.Pid]context.CancelFunc
|
||||
}
|
||||
|
||||
// Init initializes process state using arbitrary arguments
|
||||
// Init(...) -> state
|
||||
func (nk *netKernel) Init(p *Process, args ...interface{}) (state interface{}) {
|
||||
lib.Log("NET_KERNEL: Init: %#v", args)
|
||||
nk.Node.Register(etf.Atom("net_kernel"), nk.Self)
|
||||
nk.process = p
|
||||
nk.routinesCtx = make(map[etf.Pid]context.CancelFunc)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nk *netKernel) HandleCast(message *etf.Term, state interface{}) (code int, stateout interface{}) {
|
||||
lib.Log("NET_KERNEL: HandleCast: %#v", *message)
|
||||
stateout = state
|
||||
code = 0
|
||||
return
|
||||
// HandleCast -> ("noreply", state) - noreply
|
||||
// ("stop", reason) - stop with reason
|
||||
func (nk *netKernel) HandleCast(message etf.Term, state interface{}) (string, interface{}) {
|
||||
lib.Log("NET_KERNEL: HandleCast: %#v", message)
|
||||
return "noreply", state
|
||||
}
|
||||
|
||||
func (nk *netKernel) HandleCall(from *etf.Tuple, message *etf.Term, state interface{}) (code int, reply *etf.Term, stateout interface{}) {
|
||||
lib.Log("NET_KERNEL: HandleCall: %#v, From: %#v", *message, *from)
|
||||
// HandleCall serves incoming messages sending via gen_server:call
|
||||
// HandleCall -> ("reply", message, state) - reply
|
||||
// ("noreply", _, state) - noreply
|
||||
// ("stop", reason, _) - normal stop
|
||||
func (nk *netKernel) HandleCall(from etf.Tuple, message etf.Term, state interface{}) (code string, reply etf.Term, stateout interface{}) {
|
||||
lib.Log("NET_KERNEL: HandleCall: %#v, From: %#v", message, from)
|
||||
stateout = state
|
||||
code = 1
|
||||
switch t := (*message).(type) {
|
||||
code = "reply"
|
||||
|
||||
switch t := (message).(type) {
|
||||
case etf.Tuple:
|
||||
if len(t) == 2 {
|
||||
switch tag := t[0].(type) {
|
||||
case etf.Atom:
|
||||
if string(tag) == "is_auth" {
|
||||
lib.Log("NET_KERNEL: is_auth: %#v", t[1])
|
||||
replyTerm := etf.Term(etf.Atom("yes"))
|
||||
reply = &replyTerm
|
||||
reply = etf.Atom("yes")
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(t) == 5 {
|
||||
switch t.Element(3) {
|
||||
case etf.Atom("procs_info"):
|
||||
// etf.Tuple{"spawn_link", "observer_backend", "procs_info", etf.List{etf.Pid{}}, etf.Pid{}}
|
||||
sendTo := t.Element(4).(etf.List).Element(1).(etf.Pid)
|
||||
go sendProcInfo(nk.process, sendTo)
|
||||
reply = nk.process.Self()
|
||||
case etf.Atom("fetch_stats"):
|
||||
// etf.Tuple{"spawn_link", "observer_backend", "fetch_stats", etf.List{etf.Pid{}, 500}, etf.Pid{}}
|
||||
sendTo := t.Element(4).(etf.List).Element(1).(etf.Pid)
|
||||
period := t.Element(4).(etf.List).Element(2).(int)
|
||||
if _, ok := nk.routinesCtx[sendTo]; ok {
|
||||
reply = etf.Atom("error")
|
||||
return
|
||||
}
|
||||
|
||||
nk.process.MonitorProcess(sendTo)
|
||||
ctx, cancel := context.WithCancel(nk.process.Context)
|
||||
nk.routinesCtx[sendTo] = cancel
|
||||
go sendStats(ctx, nk.process, sendTo, period, cancel)
|
||||
reply = nk.process.Self()
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (nk *netKernel) HandleInfo(message *etf.Term, state interface{}) (code int, stateout interface{}) {
|
||||
lib.Log("NET_KERNEL: HandleInfo: %#v", *message)
|
||||
stateout = state
|
||||
code = 0
|
||||
return
|
||||
// HandleInfo serves all another incoming messages (Pid ! message)
|
||||
// HandleInfo -> ("noreply", state) - noreply
|
||||
// ("stop", reason) - normal stop
|
||||
func (nk *netKernel) HandleInfo(message etf.Term, state interface{}) (string, interface{}) {
|
||||
lib.Log("NET_KERNEL: HandleInfo: %#v", message)
|
||||
// {"DOWN", etf.Ref{Node:"demo@127.0.0.1", Creation:0x1, Id:[]uint32{0x27715, 0x5762, 0x0}}, "process",
|
||||
// etf.Pid{Node:"erl-demo@127.0.0.1", Id:0x460, Serial:0x0, Creation:0x1}, "normal"}
|
||||
switch m := message.(type) {
|
||||
case etf.Tuple:
|
||||
if m.Element(1) == etf.Atom("DOWN") {
|
||||
pid := m.Element(4).(etf.Pid)
|
||||
if cancel, ok := nk.routinesCtx[pid]; ok {
|
||||
cancel()
|
||||
delete(nk.routinesCtx, pid)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return "noreply", state
|
||||
}
|
||||
|
||||
func (nk *netKernel) Terminate(reason int, state interface{}) {
|
||||
// Terminate called when process died
|
||||
func (nk *netKernel) Terminate(reason string, state interface{}) {
|
||||
lib.Log("NET_KERNEL: Terminate: %#v", reason)
|
||||
}
|
||||
|
||||
func sendProcInfo(p *Process, to etf.Pid) {
|
||||
list := p.Node.GetProcessList()
|
||||
procsInfoList := etf.List{}
|
||||
for i := range list {
|
||||
info := list[i].Info()
|
||||
// {procs_info, self(), etop_collect(Pids, [])}
|
||||
procsInfoList = append(procsInfoList,
|
||||
etf.Tuple{
|
||||
etf.Atom("etop_proc_info"), // record name #etop_proc_info
|
||||
list[i].Self(), // pid
|
||||
0, // mem
|
||||
info.Reductions, // reds
|
||||
etf.Atom(list[i].Name()), // etf.Tuple{etf.Atom("ergo"), etf.Atom(list[i].Name()), 0}, // name
|
||||
0, // runtime
|
||||
info.CurrentFunction, // etf.Tuple{etf.Atom("ergo"), etf.Atom(info.CurrentFunction), 0}, // cf
|
||||
info.MessageQueueLen, // mq
|
||||
},
|
||||
)
|
||||
|
||||
}
|
||||
|
||||
procsInfo := etf.Tuple{
|
||||
etf.Atom("procs_info"),
|
||||
p.Self(),
|
||||
procsInfoList,
|
||||
}
|
||||
p.Send(to, procsInfo)
|
||||
// observer waits for the EXIT message since this function was executed via spawn
|
||||
p.Send(to, etf.Tuple{etf.Atom("EXIT"), p.Self(), etf.Atom("normal")})
|
||||
}
|
||||
|
||||
func sendStats(ctx context.Context, p *Process, to etf.Pid, period int, cancel context.CancelFunc) {
|
||||
var usage syscall.Rusage
|
||||
var utime, utimetotal, stime, stimetotal int64
|
||||
defer cancel()
|
||||
for {
|
||||
|
||||
select {
|
||||
case <-time.After(time.Duration(period) * time.Millisecond):
|
||||
|
||||
runtime.ReadMemStats(&m)
|
||||
|
||||
total := etf.Tuple{etf.Atom("total"), m.TotalAlloc}
|
||||
system := etf.Tuple{etf.Atom("system"), m.HeapSys}
|
||||
processes := etf.Tuple{etf.Atom("processes"), m.Alloc}
|
||||
processesUsed := etf.Tuple{etf.Atom("processes_used"), m.HeapInuse}
|
||||
atom := etf.Tuple{etf.Atom("atom"), 0}
|
||||
atomUsed := etf.Tuple{etf.Atom("atom_used"), 0}
|
||||
binary := etf.Tuple{etf.Atom("binary"), 0}
|
||||
code := etf.Tuple{etf.Atom("code"), 0}
|
||||
ets := etf.Tuple{etf.Atom("ets"), 0}
|
||||
|
||||
if err := syscall.Getrusage(syscall.RUSAGE_SELF, &usage); err != nil {
|
||||
fmt.Println("cannot get rusage for", syscall.Getpid(), err)
|
||||
continue
|
||||
} else {
|
||||
}
|
||||
utime = usage.Utime.Sec*1000000000 + usage.Utime.Nano()
|
||||
stime = usage.Stime.Sec*1000000000 + usage.Stime.Nano()
|
||||
utimetotal += utime
|
||||
stimetotal += stime
|
||||
stats := etf.Tuple{
|
||||
etf.Atom("stats"),
|
||||
1,
|
||||
etf.List{
|
||||
etf.Tuple{1, utime, utimetotal},
|
||||
etf.Tuple{2, stime, stimetotal},
|
||||
},
|
||||
etf.Tuple{
|
||||
etf.Tuple{etf.Atom("input"), 0},
|
||||
etf.Tuple{etf.Atom("output"), 0},
|
||||
},
|
||||
etf.List{
|
||||
total,
|
||||
system,
|
||||
processes,
|
||||
processesUsed,
|
||||
atom,
|
||||
atomUsed,
|
||||
binary,
|
||||
code,
|
||||
ets,
|
||||
},
|
||||
}
|
||||
p.Send(to, stats)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,692 @@
|
|||
package ergonode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
|
||||
"github.com/halturin/ergonode/dist"
|
||||
"github.com/halturin/ergonode/etf"
|
||||
"github.com/halturin/ergonode/lib"
|
||||
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Node instance of created node using CreateNode
|
||||
type Node struct {
|
||||
dist.EPMD
|
||||
listener net.Listener
|
||||
Cookie string
|
||||
|
||||
registrar *registrar
|
||||
monitor *monitor
|
||||
context context.Context
|
||||
Stop context.CancelFunc
|
||||
|
||||
StartedAt time.Time
|
||||
uniqID int64
|
||||
}
|
||||
|
||||
// NodeOptions struct with bootstrapping options for CreateNode
|
||||
type NodeOptions struct {
|
||||
ListenRangeBegin uint16
|
||||
ListenRangeEnd uint16
|
||||
Hidden bool
|
||||
EPMDPort uint16
|
||||
DisableEPMDServer bool
|
||||
}
|
||||
|
||||
const (
|
||||
defaultListenRangeBegin uint16 = 15000
|
||||
defaultListenRangeEnd uint16 = 65000
|
||||
defaultEPMDPort uint16 = 4369
|
||||
versionOTP int = 21
|
||||
versionERTSprefix = "ergo"
|
||||
version = "1.0.0"
|
||||
)
|
||||
|
||||
// CreateNode create new node with name and cookie string
|
||||
func CreateNode(name string, cookie string, opts NodeOptions) *Node {
|
||||
return CreateNodeWithContext(context.Background(), name, cookie, opts)
|
||||
}
|
||||
|
||||
// CreateNodeWithContext create new node with specified context, name and cookie string
|
||||
func CreateNodeWithContext(ctx context.Context, name string, cookie string, opts NodeOptions) *Node {
|
||||
|
||||
lib.Log("Start with name '%s' and cookie '%s'", name, cookie)
|
||||
nodectx, nodestop := context.WithCancel(ctx)
|
||||
|
||||
node := Node{
|
||||
Cookie: cookie,
|
||||
context: nodectx,
|
||||
Stop: nodestop,
|
||||
StartedAt: time.Now(),
|
||||
uniqID: time.Now().UnixNano(),
|
||||
}
|
||||
|
||||
// start networking if name is defined
|
||||
if name != "" {
|
||||
// set defaults
|
||||
if opts.ListenRangeBegin == 0 {
|
||||
opts.ListenRangeBegin = defaultListenRangeBegin
|
||||
}
|
||||
if opts.ListenRangeEnd == 0 {
|
||||
opts.ListenRangeEnd = defaultListenRangeEnd
|
||||
}
|
||||
lib.Log("Listening range: %d...%d", opts.ListenRangeBegin, opts.ListenRangeEnd)
|
||||
|
||||
if opts.EPMDPort == 0 {
|
||||
opts.EPMDPort = defaultEPMDPort
|
||||
}
|
||||
if opts.EPMDPort != 4369 {
|
||||
lib.Log("Using custom EPMD port: %d", opts.EPMDPort)
|
||||
}
|
||||
|
||||
if opts.Hidden {
|
||||
lib.Log("Running as hidden node")
|
||||
}
|
||||
ns := strings.Split(name, "@")
|
||||
if len(ns) != 2 {
|
||||
panic("FQDN for node name is required (example: node@hostname)")
|
||||
}
|
||||
|
||||
if listenPort := node.listen(ns[1], opts.ListenRangeBegin, opts.ListenRangeEnd); listenPort == 0 {
|
||||
panic("Can't listen port")
|
||||
} else {
|
||||
// start EPMD
|
||||
node.EPMD.Init(nodectx, name, listenPort, opts.EPMDPort, opts.Hidden, opts.DisableEPMDServer)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
node.registrar = createRegistrar(&node)
|
||||
node.monitor = createMonitor(&node)
|
||||
|
||||
netKernelSup := &netKernelSup{}
|
||||
node.Spawn("net_kernel_sup", ProcessOptions{}, netKernelSup)
|
||||
|
||||
return &node
|
||||
}
|
||||
|
||||
// Spawn create new process
|
||||
func (n *Node) Spawn(name string, opts ProcessOptions, object interface{}, args ...interface{}) (*Process, error) {
|
||||
|
||||
process, err := n.registrar.RegisterProcessExt(name, object, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
pid := process.Self()
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
fmt.Printf("Warning: recovered process: %v %#v\n", process.self, r)
|
||||
n.registrar.UnregisterProcess(pid)
|
||||
n.monitor.ProcessTerminated(pid, etf.Atom(name), "panic")
|
||||
process.Kill()
|
||||
}
|
||||
close(process.ready)
|
||||
}()
|
||||
|
||||
reason := object.(ProcessBehaviour).loop(process, object, args...)
|
||||
n.registrar.UnregisterProcess(pid)
|
||||
n.monitor.ProcessTerminated(pid, etf.Atom(name), reason)
|
||||
if reason != "kill" {
|
||||
process.Kill()
|
||||
}
|
||||
|
||||
}()
|
||||
|
||||
<-process.ready
|
||||
|
||||
return process, nil
|
||||
}
|
||||
|
||||
// Register register associates the name with pid
|
||||
func (n *Node) Register(name string, pid etf.Pid) error {
|
||||
return n.registrar.RegisterName(name, pid)
|
||||
}
|
||||
|
||||
func (n *Node) Unregister(name string) {
|
||||
n.registrar.UnregisterName(name)
|
||||
}
|
||||
|
||||
// IsProcessAlive returns true if the process with given pid is alive
|
||||
func (n *Node) IsProcessAlive(pid etf.Pid) bool {
|
||||
if pid.Node != etf.Atom(n.FullName) {
|
||||
return false
|
||||
}
|
||||
|
||||
p := n.registrar.GetProcessByPid(pid)
|
||||
if p == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return p.IsAlive()
|
||||
}
|
||||
|
||||
// IsAlive returns true if node is running
|
||||
func (n *Node) IsAlive() bool {
|
||||
return n.context.Err() == nil
|
||||
}
|
||||
|
||||
// Wait waits until node stopped
|
||||
func (n *Node) Wait() {
|
||||
<-n.context.Done()
|
||||
}
|
||||
|
||||
// WaitWithTimeout waits until node stopped. Return ErrTimeout
|
||||
// if given timeout is exceeded
|
||||
func (n *Node) WaitWithTimeout(d time.Duration) error {
|
||||
|
||||
timer := time.NewTimer(d)
|
||||
defer timer.Stop()
|
||||
|
||||
select {
|
||||
case <-timer.C:
|
||||
return ErrTimeout
|
||||
case <-n.context.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessInfo returns the details about given Pid
|
||||
func (n *Node) ProcessInfo(pid etf.Pid) (ProcessInfo, error) {
|
||||
p := n.registrar.GetProcessByPid(pid)
|
||||
if p == nil {
|
||||
return ProcessInfo{}, fmt.Errorf("undefined")
|
||||
}
|
||||
|
||||
return p.Info(), nil
|
||||
}
|
||||
|
||||
func (n *Node) serve(c net.Conn, negotiate bool) error {
|
||||
|
||||
var nodeDesc *dist.NodeDesc
|
||||
|
||||
if negotiate {
|
||||
nodeDesc = dist.NewNodeDesc(n.FullName, n.Cookie, false, c)
|
||||
} else {
|
||||
nodeDesc = dist.NewNodeDesc(n.FullName, n.Cookie, false, nil)
|
||||
}
|
||||
|
||||
send := make(chan []etf.Term, 10)
|
||||
stop := make(chan bool)
|
||||
// run writer routine
|
||||
go func() {
|
||||
defer c.Close()
|
||||
defer func() { n.registrar.UnregisterPeer(nodeDesc.GetRemoteName()) }()
|
||||
|
||||
for {
|
||||
select {
|
||||
case terms := <-send:
|
||||
err := nodeDesc.WriteMessage(c, terms)
|
||||
if err != nil {
|
||||
lib.Log("node error (writing): %s", err.Error())
|
||||
return
|
||||
}
|
||||
case <-n.context.Done():
|
||||
return
|
||||
case <-stop:
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
}()
|
||||
|
||||
// run reader routine
|
||||
go func() {
|
||||
defer c.Close()
|
||||
defer func() { n.registrar.UnregisterPeer(nodeDesc.GetRemoteName()) }()
|
||||
for {
|
||||
terms, err := nodeDesc.ReadMessage(c)
|
||||
if err != nil {
|
||||
lib.Log("node error (reading): %s", err.Error())
|
||||
break
|
||||
}
|
||||
n.handleTerms(terms)
|
||||
}
|
||||
}()
|
||||
|
||||
p := peer{
|
||||
conn: c,
|
||||
send: send,
|
||||
}
|
||||
|
||||
// waiting for handshaking process.
|
||||
err := <-nodeDesc.HandshakeError
|
||||
if err != nil {
|
||||
stop <- true
|
||||
return err
|
||||
}
|
||||
|
||||
// close this connection if we cant register this node for some reason (duplicate?)
|
||||
if err := n.registrar.RegisterPeer(nodeDesc.GetRemoteName(), p); err != nil {
|
||||
stop <- true
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadedApplications returns a list with information about the
|
||||
// applications, which are loaded using ApplicatoinLoad
|
||||
func (n *Node) LoadedApplications() []ApplicationInfo {
|
||||
info := []ApplicationInfo{}
|
||||
for _, a := range n.registrar.ApplicationList() {
|
||||
appInfo := ApplicationInfo{
|
||||
Name: a.Name,
|
||||
Description: a.Description,
|
||||
Version: a.Version,
|
||||
}
|
||||
info = append(info, appInfo)
|
||||
}
|
||||
return info
|
||||
}
|
||||
|
||||
// WhichApplications returns a list with information about the applications that are currently running.
|
||||
func (n *Node) WhichApplications() []ApplicationInfo {
|
||||
info := []ApplicationInfo{}
|
||||
for _, a := range n.registrar.ApplicationList() {
|
||||
if a.process == nil {
|
||||
// list only started apps
|
||||
continue
|
||||
}
|
||||
appInfo := ApplicationInfo{
|
||||
Name: a.Name,
|
||||
Description: a.Description,
|
||||
Version: a.Version,
|
||||
PID: a.process.self,
|
||||
}
|
||||
info = append(info, appInfo)
|
||||
}
|
||||
return info
|
||||
}
|
||||
|
||||
// GetApplicationInfo returns information about application
|
||||
func (n *Node) GetApplicationInfo(name string) (ApplicationInfo, error) {
|
||||
spec := n.registrar.GetApplicationSpecByName(name)
|
||||
if spec == nil {
|
||||
return ApplicationInfo{}, ErrAppUnknown
|
||||
}
|
||||
|
||||
pid := etf.Pid{}
|
||||
if spec.process != nil {
|
||||
pid = spec.process.self
|
||||
}
|
||||
|
||||
return ApplicationInfo{
|
||||
Name: name,
|
||||
Description: spec.Description,
|
||||
Version: spec.Version,
|
||||
PID: pid,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ApplicationLoad loads the application specification for an application
|
||||
// into the node. It also loads the application specifications for any included applications
|
||||
func (n *Node) ApplicationLoad(app interface{}, args ...interface{}) error {
|
||||
|
||||
spec, err := app.(ApplicationBehavior).Load(args...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
spec.app = app.(ApplicationBehavior)
|
||||
for i := range spec.Applications {
|
||||
if e := n.ApplicationLoad(spec.Applications[i], args...); e != nil && e != ErrAppAlreadyLoaded {
|
||||
return e
|
||||
}
|
||||
}
|
||||
|
||||
return n.registrar.RegisterApp(spec.Name, &spec)
|
||||
}
|
||||
|
||||
// ApplicationUnload unloads the application specification for Application from the
|
||||
// node. It also unloads the application specifications for any included applications.
|
||||
func (n *Node) ApplicationUnload(appName string) error {
|
||||
spec := n.registrar.GetApplicationSpecByName(appName)
|
||||
if spec == nil {
|
||||
return ErrAppUnknown
|
||||
}
|
||||
if spec.process != nil {
|
||||
return ErrAppAlreadyStarted
|
||||
}
|
||||
|
||||
n.registrar.UnregisterApp(appName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ApplicationStartPermanent start Application with start type ApplicationStartPermanent
|
||||
// If this application terminates, all other applications and the entire node are also
|
||||
// terminated
|
||||
func (n *Node) ApplicationStartPermanent(appName string, args ...interface{}) (*Process, error) {
|
||||
return n.applicationStart(ApplicationStartPermanent, appName, args...)
|
||||
}
|
||||
|
||||
// ApplicationStartTransient start Application with start type ApplicationStartTransient
|
||||
// If transient application terminates with reason 'normal', this is reported and no
|
||||
// other applications are terminated. Otherwise, all other applications and node
|
||||
// are terminated
|
||||
func (n *Node) ApplicationStartTransient(appName string, args ...interface{}) (*Process, error) {
|
||||
return n.applicationStart(ApplicationStartTransient, appName, args...)
|
||||
}
|
||||
|
||||
// ApplicationStart start Application with start type ApplicationStartTemporary
|
||||
// If an application terminates, this is reported but no other applications
|
||||
// are terminated
|
||||
func (n *Node) ApplicationStart(appName string, args ...interface{}) (*Process, error) {
|
||||
return n.applicationStart(ApplicationStartTemporary, appName, args...)
|
||||
}
|
||||
|
||||
func (n *Node) applicationStart(startType, appName string, args ...interface{}) (*Process, error) {
|
||||
|
||||
spec := n.registrar.GetApplicationSpecByName(appName)
|
||||
if spec == nil {
|
||||
return nil, ErrAppUnknown
|
||||
}
|
||||
|
||||
spec.startType = startType
|
||||
|
||||
// to prevent race condition on starting application we should
|
||||
// make sure that nobodyelse starting it
|
||||
spec.mutex.Lock()
|
||||
defer spec.mutex.Unlock()
|
||||
|
||||
if spec.process != nil {
|
||||
return nil, ErrAppAlreadyStarted
|
||||
}
|
||||
|
||||
for _, depAppName := range spec.Applications {
|
||||
if _, e := n.ApplicationStart(depAppName); e != nil && e != ErrAppAlreadyStarted {
|
||||
return nil, e
|
||||
}
|
||||
}
|
||||
|
||||
// passing 'spec' to the process loop in order to handle children's startup.
|
||||
args = append([]interface{}{spec}, args)
|
||||
appProcess, e := n.Spawn("", ProcessOptions{}, spec.app, args...)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
|
||||
spec.process = appProcess
|
||||
return appProcess, nil
|
||||
}
|
||||
|
||||
// ApplicationStop stop running application
|
||||
func (n *Node) ApplicationStop(name string) error {
|
||||
spec := n.registrar.GetApplicationSpecByName(name)
|
||||
if spec == nil {
|
||||
return ErrAppUnknown
|
||||
}
|
||||
|
||||
if spec.process == nil {
|
||||
return ErrAppIsNotRunning
|
||||
}
|
||||
|
||||
spec.process.Exit(spec.process.Self(), "normal")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Node) handleTerms(terms []etf.Term) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
fmt.Printf("Warning: recovered node.handleTerms: %s\n", r)
|
||||
}
|
||||
}()
|
||||
|
||||
if len(terms) == 0 {
|
||||
// keep alive
|
||||
return
|
||||
}
|
||||
|
||||
lib.Log("Node terms: %#v", terms)
|
||||
|
||||
switch t := terms[0].(type) {
|
||||
case etf.Tuple:
|
||||
switch act := t.Element(1).(type) {
|
||||
case int:
|
||||
switch act {
|
||||
case distProtoREG_SEND:
|
||||
// {6, FromPid, Unused, ToName}
|
||||
if len(terms) == 2 {
|
||||
n.registrar.route(t.Element(2).(etf.Pid), t.Element(4), terms[1])
|
||||
} else {
|
||||
lib.Log("*** ERROR: bad REG_SEND: %#v", terms)
|
||||
}
|
||||
|
||||
case distProtoSEND:
|
||||
// {2, Unused, ToPid}
|
||||
// SEND has no sender pid
|
||||
n.registrar.route(etf.Pid{}, t.Element(3), terms[1])
|
||||
|
||||
case distProtoLINK:
|
||||
// {1, FromPid, ToPid}
|
||||
lib.Log("LINK message (act %d): %#v", act, t)
|
||||
n.monitor.Link(t.Element(2).(etf.Pid), t.Element(3).(etf.Pid))
|
||||
|
||||
case distProtoUNLINK:
|
||||
// {4, FromPid, ToPid}
|
||||
lib.Log("UNLINK message (act %d): %#v", act, t)
|
||||
n.monitor.Unink(t.Element(2).(etf.Pid), t.Element(3).(etf.Pid))
|
||||
|
||||
case distProtoNODE_LINK:
|
||||
lib.Log("NODE_LINK message (act %d): %#v", act, t)
|
||||
|
||||
case distProtoEXIT:
|
||||
// {3, FromPid, ToPid, Reason}
|
||||
lib.Log("EXIT message (act %d): %#v", act, t)
|
||||
terminated := t.Element(2).(etf.Pid)
|
||||
reason := fmt.Sprint(t.Element(4))
|
||||
n.monitor.ProcessTerminated(terminated, etf.Atom(""), string(reason))
|
||||
|
||||
case distProtoEXIT2:
|
||||
lib.Log("EXIT2 message (act %d): %#v", act, t)
|
||||
|
||||
case distProtoMONITOR:
|
||||
// {19, FromPid, ToProc, Ref}, where FromPid = monitoring process
|
||||
// and ToProc = monitored process pid or name (atom)
|
||||
lib.Log("MONITOR message (act %d): %#v", act, t)
|
||||
n.monitor.MonitorProcessWithRef(t.Element(2).(etf.Pid), t.Element(3), t.Element(4).(etf.Ref))
|
||||
|
||||
case distProtoDEMONITOR:
|
||||
// {20, FromPid, ToProc, Ref}, where FromPid = monitoring process
|
||||
// and ToProc = monitored process pid or name (atom)
|
||||
lib.Log("DEMONITOR message (act %d): %#v", act, t)
|
||||
n.monitor.DemonitorProcess(t.Element(4).(etf.Ref))
|
||||
|
||||
case distProtoMONITOR_EXIT:
|
||||
// {21, FromProc, ToPid, Ref, Reason}, where FromProc = monitored process
|
||||
// pid or name (atom), ToPid = monitoring process, and Reason = exit reason for the monitored process
|
||||
lib.Log("MONITOR_EXIT message (act %d): %#v", act, t)
|
||||
terminated := t.Element(2).(etf.Pid)
|
||||
reason := fmt.Sprint(t.Element(5))
|
||||
// FIXME: we must handle case when 'terminated' is atom
|
||||
n.monitor.ProcessTerminated(terminated, etf.Atom(""), string(reason))
|
||||
|
||||
// Not implemented yet, just stubs. TODO.
|
||||
case distProtoSEND_SENDER:
|
||||
lib.Log("SEND_SENDER message (act %d): %#v", act, t)
|
||||
case distProtoSEND_SENDER_TT:
|
||||
lib.Log("SEND_SENDER_TT message (act %d): %#v", act, t)
|
||||
case distProtoPAYLOAD_EXIT:
|
||||
lib.Log("PAYLOAD_EXIT message (act %d): %#v", act, t)
|
||||
case distProtoPAYLOAD_EXIT_TT:
|
||||
lib.Log("PAYLOAD_EXIT_TT message (act %d): %#v", act, t)
|
||||
case distProtoPAYLOAD_EXIT2:
|
||||
lib.Log("PAYLOAD_EXIT2 message (act %d): %#v", act, t)
|
||||
case distProtoPAYLOAD_EXIT2_TT:
|
||||
lib.Log("PAYLOAD_EXIT2_TT message (act %d): %#v", act, t)
|
||||
case distProtoPAYLOAD_MONITOR_P_EXIT:
|
||||
lib.Log("PAYLOAD_MONITOR_P_EXIT message (act %d): %#v", act, t)
|
||||
|
||||
default:
|
||||
lib.Log("Unhandled node message (act %d): %#v", act, t)
|
||||
}
|
||||
case etf.Atom:
|
||||
switch act {
|
||||
case etf.Atom("$connection"):
|
||||
// Ready channel waiting for registration of this connection
|
||||
err := (t[2]).(chan error)
|
||||
err <- nil
|
||||
}
|
||||
default:
|
||||
lib.Log("UNHANDLED ACT: %#v", t.Element(1))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ProvideRPC register given module/function as RPC method
|
||||
func (n *Node) ProvideRPC(module string, function string, fun rpcFunction) error {
|
||||
lib.Log("RPC provide: %s:%s %#v", module, function, fun)
|
||||
message := etf.Tuple{
|
||||
etf.Atom("$provide"),
|
||||
etf.Atom(module),
|
||||
etf.Atom(function),
|
||||
fun,
|
||||
}
|
||||
rex := n.registrar.GetProcessByName("rex")
|
||||
if rex == nil {
|
||||
return fmt.Errorf("RPC module is disabled")
|
||||
}
|
||||
|
||||
if v, err := rex.Call(rex.Self(), message); v != etf.Atom("ok") || err != nil {
|
||||
return fmt.Errorf("value: %s err: %s", v, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RevokeRPC unregister given module/function
|
||||
func (n *Node) RevokeRPC(module, function string) error {
|
||||
lib.Log("RPC revoke: %s:%s", module, function)
|
||||
|
||||
rex := n.registrar.GetProcessByName("rex")
|
||||
if rex == nil {
|
||||
return fmt.Errorf("RPC module is disabled")
|
||||
}
|
||||
|
||||
message := etf.Tuple{
|
||||
etf.Atom("$revoke"),
|
||||
etf.Atom(module),
|
||||
etf.Atom(function),
|
||||
}
|
||||
|
||||
if v, err := rex.Call(rex.Self(), message); v != etf.Atom("ok") || err != nil {
|
||||
return fmt.Errorf("value: %s err: %s", v, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetProcessByName returns Process associated with given name
|
||||
func (n *Node) GetProcessByName(name string) *Process {
|
||||
return n.registrar.GetProcessByName(name)
|
||||
}
|
||||
|
||||
// GetProcessByPid returns Process by given pid
|
||||
func (n *Node) GetProcessByPid(pid etf.Pid) *Process {
|
||||
return n.registrar.GetProcessByPid(pid)
|
||||
}
|
||||
|
||||
// GetProcessList returns array of running process
|
||||
func (n *Node) GetProcessList() []*Process {
|
||||
return n.registrar.ProcessList()
|
||||
}
|
||||
|
||||
// MakeRef returns atomic reference etf.Ref within this node
|
||||
func (n *Node) MakeRef() (ref etf.Ref) {
|
||||
ref.Node = etf.Atom(n.FullName)
|
||||
ref.Creation = 1
|
||||
nt := atomic.AddInt64(&n.uniqID, 1)
|
||||
id1 := uint32(uint64(nt) & ((2 << 17) - 1))
|
||||
id2 := uint32(uint64(nt) >> 46)
|
||||
ref.Id = []uint32{id1, id2, 0}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (n *Node) VersionERTS() string {
|
||||
return fmt.Sprintf("%s-%s-%s", versionERTSprefix, version, runtime.Version())
|
||||
}
|
||||
|
||||
func (n *Node) VersionOTP() int {
|
||||
return versionOTP
|
||||
}
|
||||
|
||||
func (n *Node) connect(to etf.Atom) error {
|
||||
var port int
|
||||
var err error
|
||||
var dialer = net.Dialer{
|
||||
Control: setSocketOptions,
|
||||
}
|
||||
if port, err = n.ResolvePort(string(to)); port < 0 {
|
||||
return fmt.Errorf("Can't resolve port: %s", err)
|
||||
}
|
||||
ns := strings.Split(string(to), "@")
|
||||
|
||||
c, err := dialer.DialContext(n.context, "tcp", net.JoinHostPort(ns[1], strconv.Itoa(int(port))))
|
||||
if err != nil {
|
||||
lib.Log("Error calling net.Dialer.DialerContext : %s", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
if err := n.serve(c, true); err != nil {
|
||||
c.Close()
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Node) listen(name string, listenRangeBegin, listenRangeEnd uint16) uint16 {
|
||||
|
||||
lc := net.ListenConfig{Control: setSocketOptions}
|
||||
|
||||
for p := listenRangeBegin; p <= listenRangeEnd; p++ {
|
||||
l, err := lc.Listen(n.context, "tcp", net.JoinHostPort(name, strconv.Itoa(int(p))))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
c, err := l.Accept()
|
||||
|
||||
lib.Log("Accepted new connection from %s", c.RemoteAddr().String())
|
||||
if err != nil {
|
||||
lib.Log(err.Error())
|
||||
} else {
|
||||
if err := n.serve(c, false); err != nil {
|
||||
lib.Log("Can't serve connection due to: %s", err)
|
||||
c.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
return p
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func setSocketOptions(network string, address string, c syscall.RawConn) error {
|
||||
var fn = func(s uintptr) {
|
||||
var setErr error
|
||||
setErr = syscall.SetsockoptInt(int(s), syscall.IPPROTO_TCP, syscall.TCP_KEEPINTVL, 5)
|
||||
if setErr != nil {
|
||||
log.Fatal(setErr)
|
||||
}
|
||||
}
|
||||
if err := c.Control(fn); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
|
@ -0,0 +1,60 @@
|
|||
package ergonode
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNode(t *testing.T) {
|
||||
opts := NodeOptions{
|
||||
ListenRangeBegin: 25001,
|
||||
ListenRangeEnd: 25001,
|
||||
EPMDPort: 24999,
|
||||
}
|
||||
|
||||
node := CreateNode("node@localhost", "cookies", opts)
|
||||
|
||||
if conn, err := net.Dial("tcp", ":25001"); err != nil {
|
||||
fmt.Println("Connect to the node' listening port FAILED")
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
defer conn.Close()
|
||||
}
|
||||
|
||||
if conn, err := net.Dial("tcp", ":24999"); err != nil {
|
||||
fmt.Println("Connect to the node' listening EPMD port FAILED")
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
defer conn.Close()
|
||||
}
|
||||
|
||||
p, e := node.Spawn("", ProcessOptions{}, &GenServer{})
|
||||
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
// empty GenServer{} should die immediately (via panic and recovering)
|
||||
if node.IsProcessAlive(p.Self()) {
|
||||
t.Fatal("IsProcessAlive: expect 'false', but got 'true'")
|
||||
}
|
||||
|
||||
gs1 := &testGenServer{
|
||||
err: make(chan error, 2),
|
||||
}
|
||||
p, e = node.Spawn("", ProcessOptions{}, gs1)
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
|
||||
if !node.IsProcessAlive(p.Self()) {
|
||||
t.Fatal("IsProcessAlive: expect 'true', but got 'false'")
|
||||
}
|
||||
|
||||
_, ee := node.ProcessInfo(p.Self())
|
||||
if ee != nil {
|
||||
t.Fatal(ee)
|
||||
}
|
||||
|
||||
node.Stop()
|
||||
}
|
|
@ -0,0 +1,204 @@
|
|||
package ergonode
|
||||
|
||||
// https://github.com/erlang/otp/blob/master/lib/observer/src/observer_procinfo.erl
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/halturin/ergonode/etf"
|
||||
"github.com/halturin/ergonode/lib"
|
||||
)
|
||||
|
||||
var m runtime.MemStats
|
||||
|
||||
type observerBackend struct {
|
||||
GenServer
|
||||
process *Process
|
||||
}
|
||||
|
||||
// Init initializes process state using arbitrary arguments
|
||||
// Init(...) -> state
|
||||
func (o *observerBackend) Init(p *Process, args ...interface{}) (state interface{}) {
|
||||
lib.Log("OBSERVER: Init: %#v", args)
|
||||
o.process = p
|
||||
|
||||
funProcLibInitialCall := func(a ...etf.Term) etf.Term {
|
||||
return etf.Tuple{etf.Atom("proc_lib"), etf.Atom("init_p"), 5}
|
||||
}
|
||||
p.Node.ProvideRPC("proc_lib", "translate_initial_call", funProcLibInitialCall)
|
||||
|
||||
funAppmonInfo := func(a ...etf.Term) etf.Term {
|
||||
from := a[0] // pid
|
||||
am, e := p.Node.Spawn("", ProcessOptions{}, &appMon{}, from)
|
||||
if e != nil {
|
||||
return etf.Tuple{etf.Atom("error")}
|
||||
}
|
||||
return etf.Tuple{etf.Atom("ok"), am.Self()}
|
||||
}
|
||||
p.Node.ProvideRPC("appmon_info", "start_link2", funAppmonInfo)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// HandleCast -> ("noreply", state) - noreply
|
||||
// ("stop", reason) - stop with reason
|
||||
func (o *observerBackend) HandleCast(message etf.Term, state interface{}) (string, interface{}) {
|
||||
lib.Log("OBSERVER: HandleCast: %#v", message)
|
||||
return "noreply", state
|
||||
}
|
||||
|
||||
// HandleCall serves incoming messages sending via gen_server:call
|
||||
// HandleCall -> ("reply", message, state) - reply
|
||||
// ("noreply", _, state) - noreply
|
||||
// ("stop", reason, _) - normal stop
|
||||
func (o *observerBackend) HandleCall(from etf.Tuple, message etf.Term, state interface{}) (string, etf.Term, interface{}) {
|
||||
lib.Log("OBSERVER: HandleCall: %v, From: %#v", message, from)
|
||||
function := message.(etf.Tuple).Element(1).(etf.Atom)
|
||||
// args := message.(etf.Tuple).Element(2).(etf.List)
|
||||
switch function {
|
||||
case etf.Atom("sys_info"):
|
||||
//etf.Tuple{"call", "observer_backend", "sys_info",
|
||||
// etf.List{}, etf.Pid{Node:"erl-examplenode@127.0.0.1", Id:0x46, Serial:0x0, Creation:0x2}}
|
||||
reply := etf.Term(o.sysInfo())
|
||||
return "reply", reply, state
|
||||
case etf.Atom("get_table_list"):
|
||||
// TODO: add here implementation if we decide support ETS tables
|
||||
// args should be like:
|
||||
// etf.List{"ets", etf.List{etf.Tuple{"sys_hidden", "true"}, etf.Tuple{"unread_hidden", "true"}}}
|
||||
reply := etf.Term(etf.List{})
|
||||
return "reply", reply, state
|
||||
case etf.Atom("get_port_list"):
|
||||
reply := etf.Term(etf.List{})
|
||||
return "reply", reply, state
|
||||
}
|
||||
|
||||
reply := etf.Term("ok")
|
||||
return "reply", reply, state
|
||||
}
|
||||
|
||||
// HandleInfo serves all another incoming messages (Pid ! message)
|
||||
// HandleInfo -> ("noreply", state) - noreply
|
||||
// ("stop", reason) - normal stop
|
||||
func (o *observerBackend) HandleInfo(message etf.Term, state interface{}) (string, interface{}) {
|
||||
lib.Log("OBSERVER: HandleInfo: %#v", message)
|
||||
return "noreply", state
|
||||
}
|
||||
|
||||
// Terminate called when process died
|
||||
func (o *observerBackend) Terminate(reason string, state interface{}) {
|
||||
lib.Log("OBSERVER: Terminate: %#v", reason)
|
||||
}
|
||||
|
||||
func (o *observerBackend) sysInfo() etf.List {
|
||||
// observer_backend:sys_info()
|
||||
processCount := etf.Tuple{etf.Atom("process_count"), len(o.process.Node.GetProcessList())}
|
||||
processLimit := etf.Tuple{etf.Atom("process_limit"), 262144}
|
||||
atomCount := etf.Tuple{etf.Atom("atom_count"), 0}
|
||||
atomLimit := etf.Tuple{etf.Atom("atom_limit"), 1}
|
||||
etsCount := etf.Tuple{etf.Atom("ets_count"), 0}
|
||||
etsLimit := etf.Tuple{etf.Atom("ets_limit"), 1}
|
||||
portCount := etf.Tuple{etf.Atom("port_count"), 0}
|
||||
portLimit := etf.Tuple{etf.Atom("port_limit"), 1}
|
||||
ut := time.Now().Unix() - o.process.Node.StartedAt.Unix()
|
||||
uptime := etf.Tuple{etf.Atom("uptime"), ut * 1000}
|
||||
runQueue := etf.Tuple{etf.Atom("run_queue"), 0}
|
||||
ioInput := etf.Tuple{etf.Atom("io_input"), 0}
|
||||
ioOutput := etf.Tuple{etf.Atom("io_output"), 0}
|
||||
logicalProcessors := etf.Tuple{etf.Atom("logical_processors"), runtime.NumCPU()}
|
||||
logicalProcessorsOnline := etf.Tuple{etf.Atom("logical_processors_online"), runtime.NumCPU()}
|
||||
logicalProcessorsAvailable := etf.Tuple{etf.Atom("logical_processors_available"), runtime.NumCPU()}
|
||||
schedulers := etf.Tuple{etf.Atom("schedulers"), 1}
|
||||
schedulersOnline := etf.Tuple{etf.Atom("schedulers_online"), 1}
|
||||
schedulersAvailable := etf.Tuple{etf.Atom("schedulers_available"), 1}
|
||||
otpRelease := etf.Tuple{etf.Atom("otp_release"), o.process.Node.VersionOTP()}
|
||||
version := etf.Tuple{etf.Atom("version"), etf.Atom(o.process.Node.VersionERTS())}
|
||||
systemArchitecture := etf.Tuple{etf.Atom("system_architecture"), etf.Atom(runtime.GOARCH)}
|
||||
kernelPoll := etf.Tuple{etf.Atom("kernel_poll"), true}
|
||||
smpSupport := etf.Tuple{etf.Atom("smp_support"), true}
|
||||
threads := etf.Tuple{etf.Atom("threads"), true}
|
||||
threadsPoolSize := etf.Tuple{etf.Atom("threads_pool_size"), 1}
|
||||
i := int(1)
|
||||
wordsizeInternal := etf.Tuple{etf.Atom("wordsize_internal"), unsafe.Sizeof(i)}
|
||||
wordsizeExternal := etf.Tuple{etf.Atom("wordsize_external"), unsafe.Sizeof(i)}
|
||||
tmp := etf.Tuple{etf.Atom("instance"), 0,
|
||||
etf.List{
|
||||
etf.Tuple{etf.Atom("mbcs"), etf.List{
|
||||
etf.Tuple{etf.Atom("blocks_size"), 1, 1, 1},
|
||||
etf.Tuple{etf.Atom("carriers_size"), 1, 1, 1},
|
||||
}},
|
||||
etf.Tuple{etf.Atom("sbcs"), etf.List{
|
||||
etf.Tuple{etf.Atom("blocks_size"), 0, 0, 0},
|
||||
etf.Tuple{etf.Atom("carriers_size"), 0, 0, 0},
|
||||
}},
|
||||
}}
|
||||
|
||||
allocInfo := etf.Tuple{etf.Atom("alloc_info"), etf.List{
|
||||
etf.Tuple{etf.Atom("temp_alloc"), etf.List{tmp}},
|
||||
etf.Tuple{etf.Atom("sl_alloc"), etf.List{tmp}},
|
||||
etf.Tuple{etf.Atom("std_alloc"), etf.List{tmp}},
|
||||
etf.Tuple{etf.Atom("ll_alloc"), etf.List{tmp}},
|
||||
etf.Tuple{etf.Atom("eheap_alloc"), etf.List{tmp}},
|
||||
etf.Tuple{etf.Atom("ets_alloc"), etf.List{tmp}},
|
||||
etf.Tuple{etf.Atom("fix_alloc"), etf.List{tmp}},
|
||||
etf.Tuple{etf.Atom("literal_alloc"), etf.List{tmp}},
|
||||
etf.Tuple{etf.Atom("binary_alloc"), etf.List{tmp}},
|
||||
etf.Tuple{etf.Atom("driver_alloc"), etf.List{tmp}},
|
||||
}}
|
||||
|
||||
// Meminfo = erlang:memory().
|
||||
runtime.ReadMemStats(&m)
|
||||
|
||||
total := etf.Tuple{etf.Atom("total"), m.HeapAlloc}
|
||||
system := etf.Tuple{etf.Atom("system"), m.HeapAlloc}
|
||||
processes := etf.Tuple{etf.Atom("processes"), m.HeapAlloc}
|
||||
processesUsed := etf.Tuple{etf.Atom("processes_used"), m.HeapAlloc}
|
||||
atom := etf.Tuple{etf.Atom("atom"), 0}
|
||||
atomUsed := etf.Tuple{etf.Atom("atom_used"), 0}
|
||||
binary := etf.Tuple{etf.Atom("binary"), 0}
|
||||
code := etf.Tuple{etf.Atom("code"), 0}
|
||||
ets := etf.Tuple{etf.Atom("ets"), 0}
|
||||
|
||||
info := etf.List{
|
||||
processCount,
|
||||
processLimit,
|
||||
atomCount,
|
||||
atomLimit,
|
||||
etsCount,
|
||||
etsLimit,
|
||||
portCount,
|
||||
portLimit,
|
||||
uptime,
|
||||
runQueue,
|
||||
ioInput,
|
||||
ioOutput,
|
||||
logicalProcessors,
|
||||
logicalProcessorsOnline,
|
||||
logicalProcessorsAvailable,
|
||||
schedulers,
|
||||
schedulersOnline,
|
||||
schedulersAvailable,
|
||||
otpRelease,
|
||||
version,
|
||||
systemArchitecture,
|
||||
kernelPoll,
|
||||
smpSupport,
|
||||
threads,
|
||||
threadsPoolSize,
|
||||
wordsizeInternal,
|
||||
wordsizeExternal,
|
||||
allocInfo,
|
||||
// Meminfo
|
||||
total,
|
||||
system,
|
||||
processes,
|
||||
processesUsed,
|
||||
atom,
|
||||
atomUsed,
|
||||
binary,
|
||||
code,
|
||||
ets,
|
||||
}
|
||||
return info
|
||||
}
|
|
@ -0,0 +1,377 @@
|
|||
package ergonode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/halturin/ergonode/etf"
|
||||
"github.com/halturin/ergonode/lib"
|
||||
)
|
||||
|
||||
type ProcessType = string
|
||||
|
||||
const (
|
||||
DefaultProcessMailboxSize = 100
|
||||
)
|
||||
|
||||
type Process struct {
|
||||
sync.RWMutex
|
||||
|
||||
mailBox chan etf.Tuple
|
||||
ready chan bool
|
||||
gracefulExit chan gracefulExitRequest
|
||||
direct chan directMessage
|
||||
self etf.Pid
|
||||
groupLeader *Process
|
||||
Context context.Context
|
||||
Kill context.CancelFunc
|
||||
Exit ProcessExitFunc
|
||||
name string
|
||||
Node *Node
|
||||
|
||||
object interface{}
|
||||
state interface{}
|
||||
reply chan etf.Tuple
|
||||
|
||||
env map[string]interface{}
|
||||
|
||||
parent *Process
|
||||
reductions uint64 // we use this term to count total number of processed messages from mailBox
|
||||
currentFunction string
|
||||
|
||||
trapExit bool
|
||||
}
|
||||
|
||||
type directMessage struct {
|
||||
id string
|
||||
message interface{}
|
||||
err error
|
||||
reply chan directMessage
|
||||
}
|
||||
|
||||
type gracefulExitRequest struct {
|
||||
from etf.Pid
|
||||
reason string
|
||||
}
|
||||
|
||||
// ProcessInfo struct with process details
|
||||
type ProcessInfo struct {
|
||||
PID etf.Pid
|
||||
Name string
|
||||
CurrentFunction string
|
||||
Status string
|
||||
MessageQueueLen int
|
||||
Links []etf.Pid
|
||||
Monitors []etf.Pid
|
||||
MonitoredBy []etf.Pid
|
||||
Dictionary etf.Map
|
||||
TrapExit bool
|
||||
GroupLeader etf.Pid
|
||||
Reductions uint64
|
||||
}
|
||||
|
||||
type ProcessOptions struct {
|
||||
MailboxSize uint16
|
||||
GroupLeader *Process
|
||||
parent *Process
|
||||
}
|
||||
|
||||
// ProcessExitFunc initiate a graceful stopping process
|
||||
type ProcessExitFunc func(from etf.Pid, reason string)
|
||||
|
||||
// ProcessBehaviour interface contains methods you should implement to make own process behaviour
|
||||
type ProcessBehaviour interface {
|
||||
loop(*Process, interface{}, ...interface{}) string // method which implements control flow of process
|
||||
}
|
||||
|
||||
// Self returns self Pid
|
||||
func (p *Process) Self() etf.Pid {
|
||||
return p.self
|
||||
}
|
||||
|
||||
// Name returns registered name of the process
|
||||
func (p *Process) Name() string {
|
||||
return p.name
|
||||
}
|
||||
|
||||
// Info returns detailed information about the process
|
||||
func (p *Process) Info() ProcessInfo {
|
||||
gl := p.self
|
||||
if p.groupLeader != nil {
|
||||
gl = p.groupLeader.Self()
|
||||
}
|
||||
links := p.Node.monitor.GetLinks(p.self)
|
||||
monitors := p.Node.monitor.GetMonitors(p.self)
|
||||
monitoredBy := p.Node.monitor.GetMonitoredBy(p.self)
|
||||
return ProcessInfo{
|
||||
PID: p.self,
|
||||
Name: p.name,
|
||||
CurrentFunction: p.currentFunction,
|
||||
GroupLeader: gl,
|
||||
Links: links,
|
||||
Monitors: monitors,
|
||||
MonitoredBy: monitoredBy,
|
||||
Status: "running",
|
||||
MessageQueueLen: len(p.mailBox),
|
||||
TrapExit: p.trapExit,
|
||||
Reductions: p.reductions,
|
||||
}
|
||||
}
|
||||
|
||||
// Call makes outgoing sync request in fashion of 'gen_call'.
|
||||
// 'to' can be Pid, registered local name or a tuple {RegisteredName, NodeName}
|
||||
func (p *Process) Call(to interface{}, message etf.Term) (etf.Term, error) {
|
||||
return p.CallWithTimeout(to, message, DefaultCallTimeout)
|
||||
}
|
||||
|
||||
// CallWithTimeout makes outgoing sync request in fashiod of 'gen_call' with given timeout
|
||||
func (p *Process) CallWithTimeout(to interface{}, message etf.Term, timeout int) (etf.Term, error) {
|
||||
ref := p.Node.MakeRef()
|
||||
from := etf.Tuple{p.self, ref}
|
||||
msg := etf.Term(etf.Tuple{etf.Atom("$gen_call"), from, message})
|
||||
p.Send(to, msg)
|
||||
|
||||
// to prevent of timer leaks due to its not GCed until the timer fires
|
||||
timer := time.NewTimer(time.Second * time.Duration(timeout))
|
||||
defer timer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case m := <-p.reply:
|
||||
ref1 := m[0].(etf.Ref)
|
||||
val := m[1].(etf.Term)
|
||||
// check message Ref
|
||||
if len(ref.Id) == 3 && ref.Id[0] == ref1.Id[0] && ref.Id[1] == ref1.Id[1] && ref.Id[2] == ref1.Id[2] {
|
||||
return val, nil
|
||||
}
|
||||
// ignore this message. waiting for the next one
|
||||
case <-timer.C:
|
||||
return nil, fmt.Errorf("timeout")
|
||||
case <-p.Context.Done():
|
||||
return nil, fmt.Errorf("stopped")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// CallRPC evaluate rpc call with given node/MFA
|
||||
func (p *Process) CallRPC(node, module, function string, args ...etf.Term) (etf.Term, error) {
|
||||
return p.CallRPCWithTimeout(DefaultCallTimeout, node, module, function, args...)
|
||||
}
|
||||
|
||||
// CallRPCWithTimeout evaluate rpc call with given node/MFA and timeout
|
||||
func (p *Process) CallRPCWithTimeout(timeout int, node, module, function string, args ...etf.Term) (etf.Term, error) {
|
||||
lib.Log("[%s] RPC calling: %s:%s:%s", p.Node.FullName, node, module, function)
|
||||
message := etf.Tuple{
|
||||
etf.Atom("call"),
|
||||
etf.Atom(module),
|
||||
etf.Atom(function),
|
||||
etf.List(args),
|
||||
}
|
||||
to := etf.Tuple{etf.Atom("rex"), etf.Atom(node)}
|
||||
return p.CallWithTimeout(to, message, timeout)
|
||||
}
|
||||
|
||||
// CastRPC evaluate rpc cast with given node/MFA
|
||||
func (p *Process) CastRPC(node, module, function string, args ...etf.Term) {
|
||||
lib.Log("[%s] RPC casting: %s:%s:%s", p.Node.FullName, node, module, function)
|
||||
message := etf.Tuple{
|
||||
etf.Atom("cast"),
|
||||
etf.Atom(module),
|
||||
etf.Atom(function),
|
||||
etf.List(args),
|
||||
}
|
||||
to := etf.Tuple{etf.Atom("rex"), etf.Atom(node)}
|
||||
p.Cast(to, message)
|
||||
}
|
||||
|
||||
// Send sends a message. 'to' can be a Pid, registered local name
|
||||
// or a tuple {RegisteredName, NodeName}
|
||||
func (p *Process) Send(to interface{}, message etf.Term) {
|
||||
p.Node.registrar.route(p.self, to, message)
|
||||
}
|
||||
|
||||
// SendAfter starts a timer. When the timer expires, the message sends to the process identified by 'to'.
|
||||
// 'to' can be a Pid, registered local name or a tuple {RegisteredName, NodeName}.
|
||||
// Returns cancel function in order to discard sending a message
|
||||
func (p *Process) SendAfter(to interface{}, message etf.Term, after time.Duration) context.CancelFunc {
|
||||
//TODO: should we control the number of timers/goroutines have been created this way?
|
||||
ctx, cancel := context.WithCancel(p.Context)
|
||||
go func() {
|
||||
// to prevent of timer leaks due to its not GCed until the timer fires
|
||||
timer := time.NewTimer(after)
|
||||
defer timer.Stop()
|
||||
|
||||
select {
|
||||
case <-timer.C:
|
||||
p.Node.registrar.route(p.self, to, message)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}()
|
||||
return cancel
|
||||
}
|
||||
|
||||
// CastAfter simple wrapper for SendAfter to send '$gen_cast' message
|
||||
func (p *Process) CastAfter(to interface{}, message etf.Term, after time.Duration) context.CancelFunc {
|
||||
msg := etf.Term(etf.Tuple{etf.Atom("$gen_cast"), message})
|
||||
return p.SendAfter(to, msg, after)
|
||||
}
|
||||
|
||||
// Cast sends a message in fashion of 'gen_cast'.
|
||||
// 'to' can be a Pid, registered local name
|
||||
// or a tuple {RegisteredName, NodeName}
|
||||
func (p *Process) Cast(to interface{}, message etf.Term) {
|
||||
msg := etf.Term(etf.Tuple{etf.Atom("$gen_cast"), message})
|
||||
p.Node.registrar.route(p.self, to, msg)
|
||||
}
|
||||
|
||||
// MonitorProcess creates monitor between the processes. When a process monitor
|
||||
// is triggered, a 'DOWN' message sends that has the following
|
||||
// pattern: {'DOWN', MonitorRef, Type, Object, Info}
|
||||
func (p *Process) MonitorProcess(to etf.Pid) etf.Ref {
|
||||
return p.Node.monitor.MonitorProcess(p.self, to)
|
||||
}
|
||||
|
||||
// Link creates a link between the calling process and another process
|
||||
func (p *Process) Link(with etf.Pid) {
|
||||
p.Node.monitor.Link(p.self, with)
|
||||
}
|
||||
|
||||
// Unlink removes the link, if there is one, between the calling process and the process referred to by Pid.
|
||||
func (p *Process) Unlink(with etf.Pid) {
|
||||
p.Node.monitor.Unink(p.self, with)
|
||||
}
|
||||
|
||||
// MonitorNode creates monitor between the current process and node. If Node fails or does not exist,
|
||||
// the message {nodedown, Node} is delivered to the process.
|
||||
func (p *Process) MonitorNode(name string) etf.Ref {
|
||||
return p.Node.monitor.MonitorNode(p.self, name)
|
||||
}
|
||||
|
||||
// DemonitorProcess removes monitor
|
||||
func (p *Process) DemonitorProcess(ref etf.Ref) {
|
||||
p.Node.monitor.DemonitorProcess(ref)
|
||||
}
|
||||
|
||||
// DemonitorNode removes monitor
|
||||
func (p *Process) DemonitorNode(ref etf.Ref) {
|
||||
p.Node.monitor.DemonitorNode(ref)
|
||||
}
|
||||
|
||||
// ListEnv returns map of configured environment variables.
|
||||
// Process' environment is also inherited from environment variables
|
||||
// of groupLeader (if its started as a child of Application/Supervisor)
|
||||
func (p *Process) ListEnv() map[string]interface{} {
|
||||
var env map[string]interface{}
|
||||
if p.groupLeader == nil {
|
||||
env = make(map[string]interface{})
|
||||
} else {
|
||||
env = p.groupLeader.ListEnv()
|
||||
}
|
||||
|
||||
p.RLock()
|
||||
defer p.RUnlock()
|
||||
for key, value := range p.env {
|
||||
env[key] = value
|
||||
}
|
||||
return env
|
||||
}
|
||||
|
||||
// SetEnv set environment variable with given name
|
||||
func (p *Process) SetEnv(name string, value interface{}) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
if p.env == nil {
|
||||
p.env = make(map[string]interface{})
|
||||
}
|
||||
p.env[name] = value
|
||||
}
|
||||
|
||||
// GetEnv returns value associated with given environment name.
|
||||
func (p *Process) GetEnv(name string) interface{} {
|
||||
p.RLock()
|
||||
defer p.RUnlock()
|
||||
|
||||
if value, ok := p.env[name]; ok {
|
||||
return value
|
||||
}
|
||||
|
||||
if p.groupLeader != nil {
|
||||
return p.groupLeader.GetEnv(name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wait waits until process stopped
|
||||
func (p *Process) Wait() {
|
||||
<-p.Context.Done() // closed once context canceled
|
||||
}
|
||||
|
||||
// WaitWithTimeout waits until process stopped. Return ErrTimeout
|
||||
// if given timeout is exceeded
|
||||
func (p *Process) WaitWithTimeout(d time.Duration) error {
|
||||
timer := time.NewTimer(d)
|
||||
defer timer.Stop()
|
||||
|
||||
select {
|
||||
case <-timer.C:
|
||||
return ErrTimeout
|
||||
case <-p.Context.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// IsAlive returns whether the process is alive
|
||||
func (p *Process) IsAlive() bool {
|
||||
return p.Context.Err() == nil
|
||||
}
|
||||
|
||||
// GetChildren returns list of children pid (Application, Supervisor)
|
||||
func (p *Process) GetChildren() []etf.Pid {
|
||||
c, err := p.directRequest("getChildren", nil)
|
||||
if err == nil {
|
||||
return c.([]etf.Pid)
|
||||
}
|
||||
return []etf.Pid{}
|
||||
}
|
||||
|
||||
// GetState returns string representation of the process state (GenServer)
|
||||
func (p *Process) GetState() string {
|
||||
p.RLock()
|
||||
defer p.RUnlock()
|
||||
return fmt.Sprintf("%#v", p.state)
|
||||
}
|
||||
|
||||
func (p *Process) directRequest(id string, request interface{}) (interface{}, error) {
|
||||
reply := make(chan directMessage)
|
||||
t := time.Second * time.Duration(5)
|
||||
m := directMessage{
|
||||
id: id,
|
||||
message: request,
|
||||
reply: reply,
|
||||
}
|
||||
timer := time.NewTimer(t)
|
||||
defer timer.Stop()
|
||||
// sending request
|
||||
select {
|
||||
case p.direct <- m:
|
||||
timer.Reset(t)
|
||||
case <-timer.C:
|
||||
return nil, ErrProcessBusy
|
||||
}
|
||||
|
||||
// receiving response
|
||||
select {
|
||||
case response := <-reply:
|
||||
if response.err != nil {
|
||||
return nil, response.err
|
||||
}
|
||||
|
||||
return response.message, nil
|
||||
case <-timer.C:
|
||||
return nil, ErrTimeout
|
||||
}
|
||||
}
|
|
@ -0,0 +1,610 @@
|
|||
package ergonode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/halturin/ergonode/etf"
|
||||
"github.com/halturin/ergonode/lib"
|
||||
)
|
||||
|
||||
const (
|
||||
startPID = 1000
|
||||
)
|
||||
|
||||
type registerProcessRequest struct {
|
||||
name string
|
||||
process *Process
|
||||
err chan error
|
||||
}
|
||||
|
||||
type registerNameRequest struct {
|
||||
name string
|
||||
pid etf.Pid
|
||||
err chan error
|
||||
}
|
||||
|
||||
type registerPeerRequest struct {
|
||||
name string
|
||||
peer peer
|
||||
err chan error
|
||||
}
|
||||
|
||||
type registerAppRequest struct {
|
||||
name string
|
||||
spec *ApplicationSpec
|
||||
err chan error
|
||||
}
|
||||
|
||||
type routeByPidRequest struct {
|
||||
from etf.Pid
|
||||
pid etf.Pid
|
||||
message etf.Term
|
||||
retries int
|
||||
}
|
||||
|
||||
type routeByNameRequest struct {
|
||||
from etf.Pid
|
||||
name string
|
||||
message etf.Term
|
||||
retries int
|
||||
}
|
||||
|
||||
type routeByTupleRequest struct {
|
||||
from etf.Pid
|
||||
tuple etf.Tuple
|
||||
message etf.Term
|
||||
retries int
|
||||
}
|
||||
|
||||
type routeRawRequest struct {
|
||||
nodename string
|
||||
message etf.Term
|
||||
retries int
|
||||
}
|
||||
|
||||
type requestProcessDetails struct {
|
||||
name string
|
||||
pid etf.Pid
|
||||
reply chan *Process
|
||||
}
|
||||
|
||||
type requestApplicationSpec struct {
|
||||
name string
|
||||
reply chan *ApplicationSpec
|
||||
}
|
||||
|
||||
type requestProcessList struct {
|
||||
reply chan []*Process
|
||||
}
|
||||
|
||||
type requestApplicationList struct {
|
||||
reply chan []*ApplicationSpec
|
||||
}
|
||||
|
||||
type registrarChannels struct {
|
||||
process chan registerProcessRequest
|
||||
unregisterProcess chan etf.Pid
|
||||
name chan registerNameRequest
|
||||
unregisterName chan string
|
||||
peer chan registerPeerRequest
|
||||
unregisterPeer chan string
|
||||
app chan registerAppRequest
|
||||
unregisterApp chan string
|
||||
|
||||
routeByPid chan routeByPidRequest
|
||||
routeByName chan routeByNameRequest
|
||||
routeByTuple chan routeByTupleRequest
|
||||
routeRaw chan routeRawRequest
|
||||
|
||||
commands chan interface{}
|
||||
}
|
||||
|
||||
type registrar struct {
|
||||
nextPID uint32
|
||||
nodeName string
|
||||
creation byte
|
||||
|
||||
node *Node
|
||||
|
||||
channels registrarChannels
|
||||
|
||||
names map[string]etf.Pid
|
||||
processes map[etf.Pid]*Process
|
||||
peers map[string]peer
|
||||
apps map[string]*ApplicationSpec
|
||||
}
|
||||
|
||||
func createRegistrar(node *Node) *registrar {
|
||||
r := registrar{
|
||||
nextPID: startPID,
|
||||
nodeName: node.FullName,
|
||||
creation: byte(1),
|
||||
node: node,
|
||||
channels: registrarChannels{
|
||||
process: make(chan registerProcessRequest, 10),
|
||||
unregisterProcess: make(chan etf.Pid, 10),
|
||||
name: make(chan registerNameRequest, 10),
|
||||
unregisterName: make(chan string, 10),
|
||||
peer: make(chan registerPeerRequest, 10),
|
||||
unregisterPeer: make(chan string, 10),
|
||||
app: make(chan registerAppRequest, 10),
|
||||
unregisterApp: make(chan string, 10),
|
||||
|
||||
routeByPid: make(chan routeByPidRequest, 100),
|
||||
routeByName: make(chan routeByNameRequest, 100),
|
||||
routeByTuple: make(chan routeByTupleRequest, 100),
|
||||
routeRaw: make(chan routeRawRequest, 100),
|
||||
|
||||
commands: make(chan interface{}, 100),
|
||||
},
|
||||
|
||||
names: make(map[string]etf.Pid),
|
||||
processes: make(map[etf.Pid]*Process),
|
||||
peers: make(map[string]peer),
|
||||
apps: make(map[string]*ApplicationSpec),
|
||||
}
|
||||
go r.run()
|
||||
return &r
|
||||
}
|
||||
|
||||
func (r *registrar) createNewPID() etf.Pid {
|
||||
// http://erlang.org/doc/apps/erts/erl_ext_dist.html#pid_ext
|
||||
// https://stackoverflow.com/questions/243363/can-someone-explain-the-structure-of-a-pid-in-erlang
|
||||
i := atomic.AddUint32(&r.nextPID, 1)
|
||||
return etf.Pid{
|
||||
Node: etf.Atom(r.nodeName),
|
||||
Id: i,
|
||||
Serial: 1,
|
||||
Creation: byte(r.creation),
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (r *registrar) run() {
|
||||
for {
|
||||
select {
|
||||
case p := <-r.channels.process:
|
||||
if p.name != "" {
|
||||
if _, exist := r.names[p.name]; exist {
|
||||
p.err <- ErrNameIsTaken
|
||||
continue
|
||||
}
|
||||
r.names[p.name] = p.process.self
|
||||
}
|
||||
|
||||
r.processes[p.process.self] = p.process
|
||||
p.err <- nil
|
||||
|
||||
case up := <-r.channels.unregisterProcess:
|
||||
if p, ok := r.processes[up]; ok {
|
||||
lib.Log("[%s] REGISTRAR unregistering process: %v", r.node.FullName, p.self)
|
||||
delete(r.processes, up)
|
||||
if (p.name) != "" {
|
||||
lib.Log("[%s] REGISTRAR unregistering name (%v): %s", r.node.FullName, p.self, p.name)
|
||||
delete(r.names, p.name)
|
||||
}
|
||||
|
||||
// delete names registered with this pid
|
||||
for name, pid := range r.names {
|
||||
if p.self == pid {
|
||||
delete(r.names, name)
|
||||
}
|
||||
}
|
||||
|
||||
// delete associated process with this app
|
||||
for _, spec := range r.apps {
|
||||
if spec.process != nil && spec.process.self == p.self {
|
||||
spec.process = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case n := <-r.channels.name:
|
||||
lib.Log("[%s] registering name %v", r.node.FullName, n)
|
||||
if _, ok := r.names[n.name]; ok {
|
||||
// already registered
|
||||
n.err <- ErrNameIsTaken
|
||||
continue
|
||||
}
|
||||
r.names[n.name] = n.pid
|
||||
n.err <- nil
|
||||
|
||||
case un := <-r.channels.unregisterName:
|
||||
lib.Log("[%s] unregistering name %v", r.node.FullName, un)
|
||||
delete(r.names, un)
|
||||
|
||||
case p := <-r.channels.peer:
|
||||
lib.Log("[%s] registering peer %v", r.node.FullName, p)
|
||||
if _, ok := r.peers[p.name]; ok {
|
||||
// already registered
|
||||
p.err <- ErrNameIsTaken
|
||||
continue
|
||||
}
|
||||
r.peers[p.name] = p.peer
|
||||
p.err <- nil
|
||||
|
||||
case up := <-r.channels.unregisterPeer:
|
||||
lib.Log("[%s] unregistering peer %v", r.node.FullName, up)
|
||||
if _, ok := r.peers[up]; ok {
|
||||
r.node.monitor.NodeDown(up)
|
||||
delete(r.peers, up)
|
||||
}
|
||||
|
||||
case a := <-r.channels.app:
|
||||
lib.Log("[%s] registering app %v", r.node.FullName, a)
|
||||
if _, ok := r.apps[a.name]; ok {
|
||||
// already loaded
|
||||
a.err <- ErrAppAlreadyLoaded
|
||||
continue
|
||||
}
|
||||
r.apps[a.name] = a.spec
|
||||
a.err <- nil
|
||||
|
||||
case ua := <-r.channels.unregisterApp:
|
||||
lib.Log("[%s] unregistering app %v", r.node.FullName, ua)
|
||||
delete(r.apps, ua)
|
||||
|
||||
case <-r.node.context.Done():
|
||||
lib.Log("[%s] Finalizing (KILL) registrar (total number of processes: %d)", r.node.FullName, len(r.processes))
|
||||
for _, p := range r.processes {
|
||||
p.Kill()
|
||||
}
|
||||
return
|
||||
|
||||
case bp := <-r.channels.routeByPid:
|
||||
lib.Log("[%s] sending message by pid %v", r.node.FullName, bp.pid)
|
||||
if bp.retries > 2 {
|
||||
// drop this message after 3 attempts to deliver this message
|
||||
continue
|
||||
}
|
||||
|
||||
if string(bp.pid.Node) == r.nodeName {
|
||||
// local route
|
||||
if p, ok := r.processes[bp.pid]; ok {
|
||||
p.mailBox <- etf.Tuple{bp.from, bp.message}
|
||||
}
|
||||
continue
|
||||
}
|
||||
peer, ok := r.peers[string(bp.pid.Node)]
|
||||
if !ok {
|
||||
// initiate connection and make yet another attempt to deliver this message
|
||||
go func() {
|
||||
if err := r.node.connect(bp.pid.Node); err != nil {
|
||||
lib.Log("[%s] can't connect to %v: %s", r.node.FullName, bp.pid.Node, err)
|
||||
}
|
||||
|
||||
bp.retries++
|
||||
r.channels.routeByPid <- bp
|
||||
}()
|
||||
continue
|
||||
}
|
||||
// peer.send <- []etf.Term{etf.Tuple{REG_SEND, bp.from, etf.Atom(""), bp.pid}, bp.message}
|
||||
peer.send <- []etf.Term{etf.Tuple{distProtoSEND, etf.Atom(""), bp.pid}, bp.message}
|
||||
|
||||
case bn := <-r.channels.routeByName:
|
||||
lib.Log("[%s] sending message by name %v", r.node.FullName, bn.name)
|
||||
if pid, ok := r.names[bn.name]; ok {
|
||||
r.route(bn.from, pid, bn.message)
|
||||
}
|
||||
|
||||
case bt := <-r.channels.routeByTuple:
|
||||
lib.Log("[%s] sending message by tuple %v", r.node.FullName, bt.tuple)
|
||||
if bt.retries > 2 {
|
||||
// drop this message after 3 attempts to deliver this message
|
||||
continue
|
||||
}
|
||||
|
||||
toNode := etf.Atom("")
|
||||
switch x := bt.tuple.Element(2).(type) {
|
||||
case etf.Atom:
|
||||
toNode = x
|
||||
default:
|
||||
toNode = etf.Atom(bt.tuple.Element(2).(string))
|
||||
}
|
||||
|
||||
toProcessName := bt.tuple.Element(1)
|
||||
if toNode == etf.Atom(r.nodeName) {
|
||||
r.route(bt.from, toProcessName, bt.message)
|
||||
continue
|
||||
}
|
||||
|
||||
peer, ok := r.peers[string(toNode)]
|
||||
if !ok {
|
||||
// initiate connection and make yet another attempt to deliver this message
|
||||
go func() {
|
||||
r.node.connect(toNode)
|
||||
bt.retries++
|
||||
r.channels.routeByTuple <- bt
|
||||
}()
|
||||
|
||||
continue
|
||||
}
|
||||
peer.send <- []etf.Term{etf.Tuple{distProtoREG_SEND, bt.from, etf.Atom(""), toProcessName}, bt.message}
|
||||
|
||||
case rw := <-r.channels.routeRaw:
|
||||
if rw.retries > 2 {
|
||||
// drop this message after 3 attempts of delivering
|
||||
continue
|
||||
}
|
||||
peer, ok := r.peers[rw.nodename]
|
||||
if !ok {
|
||||
// initiate connection and make yet another attempt to deliver this message
|
||||
go func() {
|
||||
if err := r.node.connect(etf.Atom(rw.nodename)); err != nil {
|
||||
lib.Log("[%s] can't connect to %v: %s", r.node.FullName, rw.nodename, err)
|
||||
}
|
||||
|
||||
rw.retries++
|
||||
r.channels.routeRaw <- rw
|
||||
}()
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
peer.send <- []etf.Term{rw.message}
|
||||
case cmd := <-r.channels.commands:
|
||||
r.handleCommand(cmd)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *registrar) RegisterProcess(object interface{}) (*Process, error) {
|
||||
opts := ProcessOptions{
|
||||
MailboxSize: DefaultProcessMailboxSize, // size of channel for regular messages
|
||||
}
|
||||
return r.RegisterProcessExt("", object, opts)
|
||||
}
|
||||
|
||||
func (r *registrar) RegisterProcessExt(name string, object interface{}, opts ProcessOptions) (*Process, error) {
|
||||
|
||||
mailboxSize := DefaultProcessMailboxSize
|
||||
if opts.MailboxSize > 0 {
|
||||
mailboxSize = int(opts.MailboxSize)
|
||||
}
|
||||
|
||||
parentContext := r.node.context
|
||||
if opts.parent != nil {
|
||||
parentContext = opts.parent.Context
|
||||
}
|
||||
ctx, kill := context.WithCancel(parentContext)
|
||||
|
||||
pid := r.createNewPID()
|
||||
|
||||
exitChannel := make(chan gracefulExitRequest)
|
||||
exit := func(from etf.Pid, reason string) {
|
||||
lib.Log("[%s] EXIT: %#v with reason: %s", r.node.FullName, pid, reason)
|
||||
ex := gracefulExitRequest{
|
||||
from: from,
|
||||
reason: reason,
|
||||
}
|
||||
exitChannel <- ex
|
||||
}
|
||||
|
||||
process := &Process{
|
||||
mailBox: make(chan etf.Tuple, mailboxSize),
|
||||
ready: make(chan bool),
|
||||
gracefulExit: exitChannel,
|
||||
direct: make(chan directMessage),
|
||||
self: pid,
|
||||
groupLeader: opts.GroupLeader,
|
||||
Context: ctx,
|
||||
Kill: kill,
|
||||
Exit: exit,
|
||||
name: name,
|
||||
Node: r.node,
|
||||
reply: make(chan etf.Tuple, 2),
|
||||
object: object,
|
||||
}
|
||||
|
||||
req := registerProcessRequest{
|
||||
name: name,
|
||||
process: process,
|
||||
err: make(chan error),
|
||||
}
|
||||
|
||||
r.channels.process <- req
|
||||
if err := <-req.err; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return process, nil
|
||||
}
|
||||
|
||||
// UnregisterProcess unregister process by Pid
|
||||
func (r *registrar) UnregisterProcess(pid etf.Pid) {
|
||||
r.channels.unregisterProcess <- pid
|
||||
}
|
||||
|
||||
// RegisterName register associates the name with pid
|
||||
func (r *registrar) RegisterName(name string, pid etf.Pid) error {
|
||||
req := registerNameRequest{
|
||||
name: name,
|
||||
pid: pid,
|
||||
err: make(chan error),
|
||||
}
|
||||
defer close(req.err)
|
||||
r.channels.name <- req
|
||||
return <-req.err
|
||||
}
|
||||
|
||||
// UnregisterName unregister named process
|
||||
func (r *registrar) UnregisterName(name string) {
|
||||
r.channels.unregisterName <- name
|
||||
}
|
||||
|
||||
func (r *registrar) RegisterPeer(name string, p peer) error {
|
||||
req := registerPeerRequest{
|
||||
name: name,
|
||||
peer: p,
|
||||
err: make(chan error),
|
||||
}
|
||||
defer close(req.err)
|
||||
r.channels.peer <- req
|
||||
return <-req.err
|
||||
}
|
||||
|
||||
func (r *registrar) UnregisterPeer(name string) {
|
||||
r.channels.unregisterPeer <- name
|
||||
}
|
||||
|
||||
func (r *registrar) RegisterApp(name string, spec *ApplicationSpec) error {
|
||||
req := registerAppRequest{
|
||||
name: name,
|
||||
spec: spec,
|
||||
err: make(chan error),
|
||||
}
|
||||
defer close(req.err)
|
||||
r.channels.app <- req
|
||||
return <-req.err
|
||||
}
|
||||
|
||||
func (r *registrar) UnregisterApp(name string) {
|
||||
r.channels.unregisterApp <- name
|
||||
}
|
||||
|
||||
func (r *registrar) GetApplicationSpecByName(name string) *ApplicationSpec {
|
||||
reply := make(chan *ApplicationSpec)
|
||||
req := requestApplicationSpec{
|
||||
name: name,
|
||||
reply: reply,
|
||||
}
|
||||
r.channels.commands <- req
|
||||
return <-reply
|
||||
}
|
||||
|
||||
// GetProcessByPid returns Process struct for the given Pid. Returns nil if it doesn't exist (not found)
|
||||
func (r *registrar) GetProcessByPid(pid etf.Pid) *Process {
|
||||
reply := make(chan *Process)
|
||||
req := requestProcessDetails{
|
||||
pid: pid,
|
||||
reply: reply,
|
||||
}
|
||||
r.channels.commands <- req
|
||||
if p := <-reply; p != nil {
|
||||
return p
|
||||
}
|
||||
// unknown process
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetProcessByPid returns Process struct for the given name. Returns nil if it doesn't exist (not found)
|
||||
func (r *registrar) GetProcessByName(name string) *Process {
|
||||
reply := make(chan *Process)
|
||||
req := requestProcessDetails{
|
||||
name: name,
|
||||
reply: reply,
|
||||
}
|
||||
r.channels.commands <- req
|
||||
if p := <-reply; p != nil {
|
||||
return p
|
||||
}
|
||||
// unknown process
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r registrar) ProcessList() []*Process {
|
||||
req := requestProcessList{
|
||||
reply: make(chan []*Process),
|
||||
}
|
||||
r.channels.commands <- req
|
||||
return <-req.reply
|
||||
}
|
||||
|
||||
func (r registrar) ApplicationList() []*ApplicationSpec {
|
||||
req := requestApplicationList{
|
||||
reply: make(chan []*ApplicationSpec),
|
||||
}
|
||||
r.channels.commands <- req
|
||||
return <-req.reply
|
||||
}
|
||||
|
||||
// route routes message to a local/remote process
|
||||
func (r *registrar) route(from etf.Pid, to etf.Term, message etf.Term) {
|
||||
switch tto := to.(type) {
|
||||
case etf.Pid:
|
||||
req := routeByPidRequest{
|
||||
from: from,
|
||||
pid: tto,
|
||||
message: message,
|
||||
}
|
||||
r.channels.routeByPid <- req
|
||||
|
||||
case etf.Tuple:
|
||||
if len(tto) == 2 {
|
||||
req := routeByTupleRequest{
|
||||
from: from,
|
||||
tuple: tto,
|
||||
message: message,
|
||||
}
|
||||
r.channels.routeByTuple <- req
|
||||
}
|
||||
|
||||
case string:
|
||||
req := routeByNameRequest{
|
||||
from: from,
|
||||
name: tto,
|
||||
message: message,
|
||||
}
|
||||
r.channels.routeByName <- req
|
||||
|
||||
case etf.Atom:
|
||||
req := routeByNameRequest{
|
||||
from: from,
|
||||
name: string(tto),
|
||||
message: message,
|
||||
}
|
||||
r.channels.routeByName <- req
|
||||
default:
|
||||
lib.Log("[%s] unknow sender type %#v", r.node.FullName, tto)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *registrar) routeRaw(nodename etf.Atom, message etf.Term) {
|
||||
req := routeRawRequest{
|
||||
nodename: string(nodename),
|
||||
message: message,
|
||||
}
|
||||
r.channels.routeRaw <- req
|
||||
}
|
||||
|
||||
func (r *registrar) handleCommand(cmd interface{}) {
|
||||
switch c := cmd.(type) {
|
||||
case requestProcessDetails:
|
||||
pid := c.pid
|
||||
if c.name != "" {
|
||||
// requesting Process by name
|
||||
if p, ok := r.names[c.name]; ok {
|
||||
pid = p
|
||||
}
|
||||
}
|
||||
|
||||
if p, ok := r.processes[pid]; ok {
|
||||
c.reply <- p
|
||||
} else {
|
||||
c.reply <- nil
|
||||
}
|
||||
|
||||
case requestProcessList:
|
||||
list := []*Process{}
|
||||
for _, p := range r.processes {
|
||||
list = append(list, p)
|
||||
}
|
||||
c.reply <- list
|
||||
|
||||
case requestApplicationSpec:
|
||||
if spec, ok := r.apps[c.name]; ok {
|
||||
c.reply <- spec
|
||||
return
|
||||
}
|
||||
c.reply <- nil
|
||||
|
||||
case requestApplicationList:
|
||||
list := []*ApplicationSpec{}
|
||||
for _, a := range r.apps {
|
||||
list = append(list, a)
|
||||
}
|
||||
c.reply <- list
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,162 @@
|
|||
package ergonode
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/halturin/ergonode/etf"
|
||||
)
|
||||
|
||||
type TestRegistrarGenserver struct {
|
||||
GenServer
|
||||
}
|
||||
|
||||
func (trg *TestRegistrarGenserver) Init(p *Process, args ...interface{}) (state interface{}) {
|
||||
return nil
|
||||
}
|
||||
func (trg *TestRegistrarGenserver) HandleCast(message etf.Term, state interface{}) (string, interface{}) {
|
||||
// fmt.Printf("TestRegistrarGenserver ({%s, %s}): HandleCast: %#v\n", trg.process.name, trg.process.Node.FullName, message)
|
||||
return "noreply", state
|
||||
}
|
||||
func (trg *TestRegistrarGenserver) HandleCall(from etf.Tuple, message etf.Term, state interface{}) (string, etf.Term, interface{}) {
|
||||
// fmt.Printf("TestRegistrarGenserver ({%s, %s}): HandleCall: %#v, From: %#v\n", trg.process.name, trg.process.Node.FullName, message, from)
|
||||
return "reply", message, state
|
||||
}
|
||||
func (trg *TestRegistrarGenserver) HandleInfo(message etf.Term, state interface{}) (string, interface{}) {
|
||||
// fmt.Printf("TestRegistrarGenserver ({%s, %s}): HandleInfo: %#v\n", trg.process.name, trg.process.Node.FullName, message)
|
||||
return "noreply", state
|
||||
}
|
||||
func (trg *TestRegistrarGenserver) Terminate(reason string, state interface{}) {
|
||||
// fmt.Printf("\nTestRegistrarGenserver ({%s, %s}): Terminate: %#v\n", trg.process.name, trg.process.Node.FullName, reason)
|
||||
}
|
||||
|
||||
func TestRegistrar(t *testing.T) {
|
||||
fmt.Printf("\n=== Test Registrar\n")
|
||||
fmt.Printf("Starting nodes: nodeR1@localhost, nodeR2@localhost: ")
|
||||
node1 := CreateNode("nodeR1@localhost", "cookies", NodeOptions{})
|
||||
node2 := CreateNode("nodeR2@localhost", "cookies", NodeOptions{})
|
||||
if node1 == nil || node2 == nil {
|
||||
t.Fatal("can't start nodes")
|
||||
} else {
|
||||
fmt.Println("OK")
|
||||
}
|
||||
|
||||
gs := &TestRegistrarGenserver{}
|
||||
fmt.Printf("Starting TestRegistrarGenserver and registering as 'gs1' on %s: ", node1.FullName)
|
||||
node1gs1, _ := node1.Spawn("gs1", ProcessOptions{}, gs, nil)
|
||||
if _, ok := node1.registrar.processes[node1gs1.Self()]; !ok {
|
||||
message := fmt.Sprintf("missing process %v on %s", node1gs1.Self(), node1.FullName)
|
||||
t.Fatal(message)
|
||||
}
|
||||
fmt.Println("OK")
|
||||
|
||||
fmt.Printf("...registering name 'test' related to %v: ", node1gs1.Self())
|
||||
if e := node1.Register("test", node1gs1.Self()); e != nil {
|
||||
t.Fatal(e)
|
||||
} else {
|
||||
if e := node1.Register("test", node1gs1.Self()); e == nil {
|
||||
t.Fatal("registered duplicate name")
|
||||
}
|
||||
}
|
||||
fmt.Println("OK")
|
||||
fmt.Printf("...unregistering name 'test' related to %v: ", node1gs1.Self())
|
||||
node1.Unregister("test")
|
||||
if e := node1.Register("test", node1gs1.Self()); e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
fmt.Println("OK")
|
||||
|
||||
fmt.Printf("Starting TestRegistrarGenserver and registering as 'gs2' on %s: ", node2.FullName)
|
||||
node2gs2, _ := node2.Spawn("gs2", ProcessOptions{}, gs, nil)
|
||||
if _, ok := node2.registrar.processes[node2gs2.Self()]; !ok {
|
||||
message := fmt.Sprintf("missing process %v on %s", node2gs2.Self(), node2.FullName)
|
||||
t.Fatal(message)
|
||||
}
|
||||
fmt.Println("OK")
|
||||
|
||||
// tests below are about monitor/link, tbh :). let't it be here for a while
|
||||
|
||||
ref := node1gs1.MonitorProcess(node2gs2.Self())
|
||||
// setting remote monitor is async.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
if pids, ok := node1.monitor.processes[node2gs2.Self()]; !ok {
|
||||
message := fmt.Sprintf("missing monitor %v on %s", node2gs2.Self(), node1.FullName)
|
||||
t.Fatal(message)
|
||||
} else {
|
||||
found := false
|
||||
for i := range pids {
|
||||
if pids[i].pid == node1gs1.Self() {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
message := fmt.Sprintf("missing monitoring by %v on %s", node1gs1.Self(), node1.FullName)
|
||||
t.Fatal(message)
|
||||
}
|
||||
}
|
||||
|
||||
node1gs1.DemonitorProcess(ref)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
if pids, ok := node1.monitor.processes[node2gs2.Self()]; ok {
|
||||
message := fmt.Sprintf("monitor %v on %s is still present", node2gs2.Self(), node1.FullName)
|
||||
t.Fatal(message)
|
||||
} else {
|
||||
found := false
|
||||
for i := range pids {
|
||||
if pids[i].pid == node1gs1.Self() {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if found {
|
||||
message := fmt.Sprintf("monitoring by %v on %s is still present", node1gs1.Self(), node1.FullName)
|
||||
t.Fatal(message)
|
||||
}
|
||||
}
|
||||
|
||||
node1gs1.Link(node2gs2.Self())
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
if pids, ok := node1.monitor.links[node2gs2.Self()]; !ok {
|
||||
message := fmt.Sprintf("missing link %v on %s", node2gs2.Self(), node1.FullName)
|
||||
t.Fatal(message)
|
||||
} else {
|
||||
found := false
|
||||
for i := range pids {
|
||||
if pids[i] == node1gs1.Self() {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
message := fmt.Sprintf("missing link by %v on %s", node1gs1.Self(), node1.FullName)
|
||||
t.Fatal(message)
|
||||
}
|
||||
}
|
||||
if pids, ok := node1.monitor.links[node1gs1.Self()]; !ok {
|
||||
message := fmt.Sprintf("missing link %v on %s", node1gs1.Self(), node1.FullName)
|
||||
t.Fatal(message)
|
||||
} else {
|
||||
found := false
|
||||
for i := range pids {
|
||||
if pids[i] == node2gs2.Self() {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
message := fmt.Sprintf("missing link by %v on %s", node2gs2.Self(), node1.FullName)
|
||||
t.Fatal(message)
|
||||
}
|
||||
}
|
||||
|
||||
x := node1.registrar.createNewPID()
|
||||
xID := x.Id
|
||||
for i := xID; i < xID+10; i++ {
|
||||
x = node1.registrar.createNewPID()
|
||||
}
|
||||
if xID+10 != x.Id {
|
||||
t.Fatalf("malformed PID creation sequence")
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,189 @@
|
|||
package ergonode
|
||||
|
||||
// https://github.com/erlang/otp/blob/master/lib/kernel/src/rpc.erl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/halturin/ergonode/etf"
|
||||
"github.com/halturin/ergonode/lib"
|
||||
)
|
||||
|
||||
type rpcFunction func(...etf.Term) etf.Term
|
||||
|
||||
type modFun struct {
|
||||
module string
|
||||
function string
|
||||
}
|
||||
|
||||
var (
|
||||
allowedModFun = []string{
|
||||
"observer_backend",
|
||||
}
|
||||
)
|
||||
|
||||
type rex struct {
|
||||
GenServer
|
||||
process *Process
|
||||
methods map[modFun]rpcFunction
|
||||
}
|
||||
|
||||
// Init initializes process state using arbitrary arguments
|
||||
// Init(...) -> state
|
||||
func (r *rex) Init(p *Process, args ...interface{}) (state interface{}) {
|
||||
lib.Log("REX: Init: %#v", args)
|
||||
r.process = p
|
||||
r.methods = make(map[modFun]rpcFunction, 0)
|
||||
|
||||
for i := range allowedModFun {
|
||||
mf := modFun{
|
||||
allowedModFun[i],
|
||||
"*",
|
||||
}
|
||||
r.methods[mf] = nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// HandleCast -> ("noreply", state) - noreply
|
||||
// ("stop", reason) - stop with reason
|
||||
func (r *rex) HandleCast(message etf.Term, state interface{}) (string, interface{}) {
|
||||
lib.Log("REX: HandleCast: %#v", message)
|
||||
return "noreply", state
|
||||
}
|
||||
|
||||
// HandleCall serves incoming messages sending via gen_server:call
|
||||
// HandleCall -> ("reply", message, state) - reply
|
||||
// ("noreply", _, state) - noreply
|
||||
// ("stop", reason, _) - normal stop
|
||||
func (r *rex) HandleCall(from etf.Tuple, message etf.Term, state interface{}) (string, etf.Term, interface{}) {
|
||||
lib.Log("REX: HandleCall: %#v, From: %#v", message, from)
|
||||
switch m := message.(type) {
|
||||
case etf.Tuple:
|
||||
//etf.Tuple{"call", "observer_backend", "sys_info",
|
||||
// etf.List{}, etf.Pid{Node:"erl-examplenode@127.0.0.1", Id:0x46, Serial:0x0, Creation:0x2}}
|
||||
switch m.Element(1) {
|
||||
case etf.Atom("call"):
|
||||
module := m.Element(2).(etf.Atom)
|
||||
function := m.Element(3).(etf.Atom)
|
||||
args := m.Element(4).(etf.List)
|
||||
reply, state1 := r.handleRPC(module, function, args, state)
|
||||
if reply != nil {
|
||||
return "reply", reply, state1
|
||||
}
|
||||
|
||||
to := etf.Tuple{string(module), r.process.Node.FullName}
|
||||
m := etf.Tuple{m.Element(3), m.Element(4)}
|
||||
reply, err := r.process.Call(to, m)
|
||||
if err != nil {
|
||||
reply = etf.Term(etf.Tuple{etf.Atom("error"), err})
|
||||
}
|
||||
return "reply", reply, state
|
||||
|
||||
case etf.Atom("$provide"):
|
||||
module := m.Element(2).(etf.Atom)
|
||||
function := m.Element(3).(etf.Atom)
|
||||
fun := m.Element(4).(rpcFunction)
|
||||
mf := modFun{
|
||||
module: string(module),
|
||||
function: string(function),
|
||||
}
|
||||
if _, ok := r.methods[mf]; ok {
|
||||
return "reply", etf.Atom("taken"), state
|
||||
}
|
||||
|
||||
r.methods[mf] = fun
|
||||
return "reply", etf.Atom("ok"), state
|
||||
|
||||
case etf.Atom("$revoke"):
|
||||
module := m.Element(2).(etf.Atom)
|
||||
function := m.Element(3).(etf.Atom)
|
||||
mf := modFun{
|
||||
module: string(module),
|
||||
function: string(function),
|
||||
}
|
||||
|
||||
if _, ok := r.methods[mf]; ok {
|
||||
delete(r.methods, mf)
|
||||
return "reply", etf.Atom("ok"), state
|
||||
}
|
||||
|
||||
return "reply", etf.Atom("unknown"), state
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
reply := etf.Term(etf.Tuple{etf.Atom("badrpc"), etf.Atom("unknown")})
|
||||
return "reply", reply, state
|
||||
}
|
||||
|
||||
// HandleInfo serves all another incoming messages (Pid ! message)
|
||||
// HandleInfo -> ("noreply", state) - noreply
|
||||
// ("stop", reason) - normal stop
|
||||
func (r *rex) HandleInfo(message etf.Term, state interface{}) (string, interface{}) {
|
||||
lib.Log("REX: HandleInfo: %#v", message)
|
||||
return "noreply", state
|
||||
}
|
||||
|
||||
// Terminate called when process died
|
||||
func (r *rex) Terminate(reason string, state interface{}) {
|
||||
lib.Log("REX: Terminate: %#v", reason)
|
||||
}
|
||||
|
||||
func (r *rex) handleRPC(module, function etf.Atom, args etf.List, state interface{}) (reply, state1 interface{}) {
|
||||
defer func() {
|
||||
if x := recover(); x != nil {
|
||||
err := fmt.Sprintf("panic reason: %s", x)
|
||||
// recovered
|
||||
reply = etf.Tuple{
|
||||
etf.Atom("badrpc"),
|
||||
etf.Tuple{
|
||||
etf.Atom("EXIT"),
|
||||
etf.Tuple{
|
||||
etf.Atom("panic"),
|
||||
etf.List{
|
||||
etf.Tuple{module, function, args, etf.List{err}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
}()
|
||||
state1 = state
|
||||
mf := modFun{
|
||||
module: string(module),
|
||||
function: string(function),
|
||||
}
|
||||
// calling dynamically declared rpc method
|
||||
if function, ok := r.methods[mf]; ok {
|
||||
reply = function(args...)
|
||||
return
|
||||
}
|
||||
|
||||
// calling a local module if its been registered as a process)
|
||||
if r.process.Node.GetProcessByName(mf.module) != nil {
|
||||
return nil, state
|
||||
}
|
||||
|
||||
// unknown request. return error
|
||||
reply = etf.Tuple{
|
||||
etf.Atom("badrpc"),
|
||||
etf.Tuple{
|
||||
etf.Atom("EXIT"),
|
||||
etf.Tuple{
|
||||
etf.Atom("undef"),
|
||||
etf.List{
|
||||
etf.Tuple{
|
||||
module,
|
||||
function,
|
||||
args,
|
||||
etf.List{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return reply, state
|
||||
}
|
83
rpc.go
83
rpc.go
|
@ -1,83 +0,0 @@
|
|||
package ergonode
|
||||
|
||||
import (
|
||||
"github.com/halturin/ergonode/etf"
|
||||
"github.com/halturin/ergonode/lib"
|
||||
)
|
||||
|
||||
type rpcFunction func(etf.List) etf.Term
|
||||
|
||||
type modFun struct {
|
||||
module string
|
||||
function string
|
||||
}
|
||||
|
||||
type rpcRex struct {
|
||||
GenServer
|
||||
callMap map[modFun]rpcFunction
|
||||
}
|
||||
|
||||
func (currNode *Node) RpcProvide(modName string, funName string, fun rpcFunction) (err error) {
|
||||
lib.Log("Provide: %s:%s %#v", modName, funName, fun)
|
||||
currNode.sysProcs.rpcRex.callMap[modFun{modName, funName}] = fun
|
||||
return
|
||||
}
|
||||
|
||||
func (currNode *Node) RpcRevoke(modName, funName string) {
|
||||
lib.Log("Revoke: %s:%s", modName, funName)
|
||||
}
|
||||
|
||||
func (rpcs *rpcRex) Init(args ...interface{}) interface{} {
|
||||
lib.Log("REX: Init: %#v", args)
|
||||
rpcs.Node.Register(etf.Atom("rex"), rpcs.Self)
|
||||
rpcs.callMap = make(map[modFun]rpcFunction, 0)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rpcs *rpcRex) HandleCast(message *etf.Term, state interface{}) (code int, stateout interface{}) {
|
||||
lib.Log("REX: HandleCast: %#v", *message)
|
||||
stateout = state
|
||||
code = 0
|
||||
return
|
||||
}
|
||||
|
||||
func (rpcs *rpcRex) HandleCall(from *etf.Tuple, message *etf.Term, state interface{}) (code int, reply *etf.Term, stateout interface{}) {
|
||||
lib.Log("REX: HandleCall: %#v, From: %#v", *message, *from)
|
||||
var replyTerm etf.Term
|
||||
stateout = state
|
||||
code = 1
|
||||
valid := false
|
||||
switch req := (*message).(type) {
|
||||
case etf.Tuple:
|
||||
if len(req) > 0 {
|
||||
switch act := req[0].(type) {
|
||||
case etf.Atom:
|
||||
if string(act) == "call" {
|
||||
valid = true
|
||||
if fun, ok := rpcs.callMap[modFun{string(req[1].(etf.Atom)), string(req[2].(etf.Atom))}]; ok {
|
||||
replyTerm = fun(req[3].(etf.List))
|
||||
} else {
|
||||
replyTerm = etf.Term(etf.Tuple{etf.Atom("badrpc"), etf.Tuple{etf.Atom("EXIT"), etf.Tuple{etf.Atom("undef"), etf.List{etf.Tuple{req[1], req[2], req[3], etf.List{}}}}}})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if !valid {
|
||||
replyTerm = etf.Term(etf.Tuple{etf.Atom("badrpc"), etf.Atom("unknown")})
|
||||
}
|
||||
reply = &replyTerm
|
||||
return
|
||||
}
|
||||
|
||||
func (rpcs *rpcRex) HandleInfo(message *etf.Term, state interface{}) (code int, stateout interface{}) {
|
||||
lib.Log("REX: HandleInfo: %#v", *message)
|
||||
stateout = state
|
||||
code = 0
|
||||
return
|
||||
}
|
||||
|
||||
func (rpcs *rpcRex) Terminate(reason int, state interface{}) {
|
||||
lib.Log("REX: Terminate: %#v", reason)
|
||||
}
|
|
@ -0,0 +1,124 @@
|
|||
package ergonode
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/halturin/ergonode/etf"
|
||||
)
|
||||
|
||||
type testRPCGenServer struct {
|
||||
GenServer
|
||||
process *Process
|
||||
// v chan interface{}
|
||||
}
|
||||
|
||||
func (trpc *testRPCGenServer) Init(p *Process, args ...interface{}) (state interface{}) {
|
||||
// trpc.v <- p.Self()
|
||||
trpc.process = p
|
||||
return nil
|
||||
}
|
||||
func (trpc *testRPCGenServer) HandleCast(message etf.Term, state interface{}) (string, interface{}) {
|
||||
// fmt.Printf("testRPCGenServer ({%s, %s}): HandleCast: %#v\n", trpc.process.name, trpc.process.Node.FullName, message)
|
||||
// trpc.v <- message
|
||||
return "noreply", state
|
||||
}
|
||||
func (trpc *testRPCGenServer) HandleCall(from etf.Tuple, message etf.Term, state interface{}) (string, etf.Term, interface{}) {
|
||||
// fmt.Printf("testRPCGenServer ({%s, %s}): HandleCall: %#v, From: %#v\n", trpc.process.name, trpc.process.Node.FullName, message, from)
|
||||
return "reply", message, state
|
||||
}
|
||||
func (trpc *testRPCGenServer) HandleInfo(message etf.Term, state interface{}) (string, interface{}) {
|
||||
// fmt.Printf("testRPCGenServer ({%s, %s}): HandleInfo: %#v\n", trpc.process.name, trpc.process.Node.FullName, message)
|
||||
// trpc.v <- message
|
||||
return "noreply", state
|
||||
}
|
||||
func (trpc *testRPCGenServer) Terminate(reason string, state interface{}) {
|
||||
// fmt.Printf("\ntestRPCGenServer ({%s, %s}): Terminate: %#v\n", trpc.process.name, trpc.process.Node.FullName, reason)
|
||||
}
|
||||
|
||||
func TestRPC(t *testing.T) {
|
||||
fmt.Printf("\n=== Test RPC\n")
|
||||
|
||||
node1 := CreateNode("nodeRPC@localhost", "cookies", NodeOptions{})
|
||||
gs1 := &testRPCGenServer{}
|
||||
node1gs1, _ := node1.Spawn("gs1", ProcessOptions{}, gs1, nil)
|
||||
|
||||
testFun1 := func(a ...etf.Term) etf.Term {
|
||||
return a[len(a)-1]
|
||||
}
|
||||
|
||||
fmt.Printf("Registering RPC method 'testMod.testFun' on %s: ", node1.FullName)
|
||||
time.Sleep(100 * time.Millisecond) // waiting for start 'rex' gen_server
|
||||
if e := node1.ProvideRPC("testMod", "testFun", testFun1); e != nil {
|
||||
message := fmt.Sprintf("%s", e)
|
||||
t.Fatal(message)
|
||||
} else {
|
||||
fmt.Println("OK")
|
||||
}
|
||||
|
||||
fmt.Printf("Call RPC method 'testMod.testFun' with 1 arg on %s: ", node1.FullName)
|
||||
if v, e := node1gs1.CallRPC("nodeRPC@localhost", "testMod", "testFun", 12345); e != nil || v != 12345 {
|
||||
message := fmt.Sprintf("%s %#v", e, v)
|
||||
t.Fatal(message)
|
||||
}
|
||||
fmt.Println("OK")
|
||||
|
||||
fmt.Printf("Call RPC method 'testMod.testFun' with 3 arg on %s: ", node1.FullName)
|
||||
if v, e := node1gs1.CallRPC("nodeRPC@localhost", "testMod", "testFun", 12345, 5.678, node1gs1.Self()); e != nil || v != node1gs1.Self() {
|
||||
message := fmt.Sprintf("%s %#v", e, v)
|
||||
t.Fatal(message)
|
||||
}
|
||||
fmt.Println("OK")
|
||||
|
||||
fmt.Printf("Revoking RPC method 'testMod.testFun' on %s: ", node1.FullName)
|
||||
if e := node1.RevokeRPC("testMod", "testFun"); e != nil {
|
||||
message := fmt.Sprintf("%s", e)
|
||||
t.Fatal(message)
|
||||
} else {
|
||||
fmt.Println("OK")
|
||||
}
|
||||
|
||||
fmt.Printf("Call revoked RPC method 'testMod.testFun' with 1 arg on %s: ", node1.FullName)
|
||||
expected1 := etf.Tuple{etf.Atom("badrpc"),
|
||||
etf.Tuple{etf.Atom("EXIT"),
|
||||
etf.Tuple{etf.Atom("undef"),
|
||||
etf.List{
|
||||
etf.Tuple{
|
||||
etf.Atom("testMod"),
|
||||
etf.Atom("testFun"),
|
||||
etf.List{12345}, etf.List{}}}}}}
|
||||
if v, e := node1gs1.CallRPC("nodeRPC@localhost", "testMod", "testFun", 12345); e != nil {
|
||||
message := fmt.Sprintf("%s %#v", e, v)
|
||||
t.Fatal(message)
|
||||
} else {
|
||||
if !reflect.DeepEqual(v, expected1) {
|
||||
message := fmt.Sprintf("expected: %#v got: %#v", expected1, v)
|
||||
t.Fatal(message)
|
||||
}
|
||||
}
|
||||
fmt.Println("OK")
|
||||
|
||||
fmt.Printf("Call RPC unknown method 'xxx.xxx' on %s: ", node1.FullName)
|
||||
expected2 := etf.Tuple{etf.Atom("badrpc"),
|
||||
etf.Tuple{etf.Atom("EXIT"),
|
||||
etf.Tuple{etf.Atom("undef"),
|
||||
etf.List{
|
||||
etf.Tuple{
|
||||
etf.Atom("xxx"),
|
||||
etf.Atom("xxx"),
|
||||
etf.List{12345}, etf.List{}}}}}}
|
||||
|
||||
if v, e := node1gs1.CallRPC("nodeRPC@localhost", "xxx", "xxx", 12345); e != nil {
|
||||
message := fmt.Sprintf("%s %#v", e, v)
|
||||
t.Fatal(message)
|
||||
} else {
|
||||
if !reflect.DeepEqual(v, expected2) {
|
||||
message := fmt.Sprintf("expected: %#v got: %#v", expected2, v)
|
||||
t.Fatal(message)
|
||||
}
|
||||
}
|
||||
fmt.Println("OK")
|
||||
|
||||
}
|
|
@ -0,0 +1,494 @@
|
|||
package ergonode
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/halturin/ergonode/etf"
|
||||
"github.com/halturin/ergonode/lib"
|
||||
)
|
||||
|
||||
type SupervisorStrategy struct {
|
||||
Type SupervisorStrategyType
|
||||
Intensity uint16
|
||||
Period uint16
|
||||
}
|
||||
|
||||
type SupervisorStrategyType = string
|
||||
type SupervisorChildRestart = string
|
||||
type SupervisorChild = string
|
||||
|
||||
const (
|
||||
// Restart strategies:
|
||||
|
||||
// SupervisorRestartIntensity
|
||||
SupervisorRestartIntensity = uint16(10)
|
||||
|
||||
// SupervisorRestartPeriod
|
||||
SupervisorRestartPeriod = uint16(10)
|
||||
|
||||
// SupervisorStrategyOneForOne If one child process terminates and is to be restarted, only
|
||||
// that child process is affected. This is the default restart strategy.
|
||||
SupervisorStrategyOneForOne = "one_for_one"
|
||||
|
||||
// SupervisorStrategyOneForAll If one child process terminates and is to be restarted, all other
|
||||
// child processes are terminated and then all child processes are restarted.
|
||||
SupervisorStrategyOneForAll = "one_for_all"
|
||||
|
||||
// SupervisorStrategyRestForOne If one child process terminates and is to be restarted,
|
||||
// the 'rest' of the child processes (that is, the child
|
||||
// processes after the terminated child process in the start order)
|
||||
// are terminated. Then the terminated child process and all
|
||||
// child processes after it are restarted
|
||||
SupervisorStrategyRestForOne = "rest_for_one"
|
||||
|
||||
// SupervisorStrategySimpleOneForOne A simplified one_for_one supervisor, where all
|
||||
// child processes are dynamically added instances
|
||||
// of the same process type, that is, running the same code.
|
||||
SupervisorStrategySimpleOneForOne = "simple_one_for_one"
|
||||
|
||||
// Restart types:
|
||||
|
||||
// SupervisorChildRestartPermanent child process is always restarted
|
||||
SupervisorChildRestartPermanent = "permanent"
|
||||
|
||||
// SupervisorChildRestartTemporary child process is never restarted
|
||||
// (not even when the supervisor restart strategy is rest_for_one
|
||||
// or one_for_all and a sibling death causes the temporary process
|
||||
// to be terminated)
|
||||
SupervisorChildRestartTemporary = "temporary"
|
||||
|
||||
// SupervisorChildRestartTransient child process is restarted only if
|
||||
// it terminates abnormally, that is, with an exit reason other
|
||||
// than normal, shutdown, or {shutdown,Term}.
|
||||
SupervisorChildRestartTransient = "transient"
|
||||
|
||||
supervisorChildStateStart = 0
|
||||
supervisorChildStateRunning = 1
|
||||
supervisorChildStateDisabled = -1
|
||||
|
||||
// shutdown defines how a child process must be terminated. (TODO: not implemented yet)
|
||||
|
||||
// SupervisorChildShutdownBrutal means that the child process is
|
||||
// unconditionally terminated using process' Kill method
|
||||
SupervisorChildShutdownBrutal = -1
|
||||
|
||||
// SupervisorChildShutdownInfinity means that the supervisor will
|
||||
// wait for an exit signal as long as child takes
|
||||
SupervisorChildShutdownInfinity = 0 // default shutdown behavior
|
||||
|
||||
// SupervisorChildShutdownTimeout5sec predefined timeout value
|
||||
SupervisorChildShutdownTimeout5sec = 5
|
||||
)
|
||||
|
||||
type supervisorChildState int
|
||||
|
||||
// SupervisorChildShutdown is an integer time-out value means that the supervisor tells
|
||||
// the child process to terminate by calling Stop method and then
|
||||
// wait for an exit signal with reason shutdown back from the
|
||||
// child process. If no exit signal is received within the
|
||||
// specified number of seconds, the child process is unconditionally
|
||||
// terminated using Kill method.
|
||||
// There are predefined values:
|
||||
// SupervisorChildShutdownBrutal (-1)
|
||||
// SupervisorChildShutdownInfinity (0) - default value
|
||||
// SupervisorChildShutdownTimeout5sec (5)
|
||||
type SupervisorChildShutdown int
|
||||
|
||||
// SupervisorBehavior interface
|
||||
type SupervisorBehavior interface {
|
||||
Init(args ...interface{}) SupervisorSpec
|
||||
}
|
||||
|
||||
type SupervisorSpec struct {
|
||||
Name string
|
||||
Children []SupervisorChildSpec
|
||||
Strategy SupervisorStrategy
|
||||
restarts []int64
|
||||
}
|
||||
|
||||
type SupervisorChildSpec struct {
|
||||
Name string
|
||||
Child interface{}
|
||||
Args []interface{}
|
||||
Restart SupervisorChildRestart
|
||||
Shutdown SupervisorChildShutdown
|
||||
state supervisorChildState // for internal usage
|
||||
process *Process
|
||||
}
|
||||
|
||||
// Supervisor is implementation of ProcessBehavior interface
|
||||
type Supervisor struct {
|
||||
spec *SupervisorSpec
|
||||
}
|
||||
|
||||
func (sv *Supervisor) loop(svp *Process, object interface{}, args ...interface{}) string {
|
||||
|
||||
spec := object.(SupervisorBehavior).Init(args...)
|
||||
lib.Log("Supervisor spec %#v\n", spec)
|
||||
svp.ready <- true
|
||||
|
||||
sv.spec = &spec
|
||||
|
||||
if spec.Strategy.Type != SupervisorStrategySimpleOneForOne {
|
||||
startChildren(svp, &spec)
|
||||
}
|
||||
|
||||
svp.currentFunction = "Supervisor:loop"
|
||||
waitTerminatingProcesses := []etf.Pid{}
|
||||
|
||||
for {
|
||||
var message etf.Term
|
||||
var fromPid etf.Pid
|
||||
select {
|
||||
case ex := <-svp.gracefulExit:
|
||||
for i := range spec.Children {
|
||||
if spec.Children[i].process != nil {
|
||||
p := spec.Children[i].process
|
||||
p.Exit(svp.Self(), ex.reason)
|
||||
}
|
||||
}
|
||||
return ex.reason
|
||||
|
||||
case msg := <-svp.mailBox:
|
||||
fromPid = msg.Element(1).(etf.Pid)
|
||||
message = msg.Element(2)
|
||||
|
||||
case <-svp.Context.Done():
|
||||
return "kill"
|
||||
case direct := <-svp.direct:
|
||||
sv.handleDirect(direct)
|
||||
continue
|
||||
}
|
||||
|
||||
svp.reductions++
|
||||
|
||||
lib.Log("[%#v]. Message from %#v\n", svp.self, fromPid)
|
||||
|
||||
switch m := message.(type) {
|
||||
|
||||
case etf.Tuple:
|
||||
|
||||
switch m.Element(1) {
|
||||
|
||||
case etf.Atom("EXIT"):
|
||||
terminated := m.Element(2).(etf.Pid)
|
||||
reason := m.Element(3).(etf.Atom)
|
||||
if len(waitTerminatingProcesses) > 0 {
|
||||
|
||||
for i := range waitTerminatingProcesses {
|
||||
if waitTerminatingProcesses[i] == terminated {
|
||||
waitTerminatingProcesses[i] = waitTerminatingProcesses[0]
|
||||
waitTerminatingProcesses = waitTerminatingProcesses[1:]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(waitTerminatingProcesses) == 0 {
|
||||
// it was the last one. lets restart all terminated children
|
||||
startChildren(svp, &spec)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
switch spec.Strategy.Type {
|
||||
|
||||
case SupervisorStrategyOneForAll:
|
||||
for i := range spec.Children {
|
||||
if spec.Children[i].state != supervisorChildStateRunning {
|
||||
continue
|
||||
}
|
||||
|
||||
p := spec.Children[i].process
|
||||
if p == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
spec.Children[i].process = nil
|
||||
if p.Self() == terminated {
|
||||
if haveToDisableChild(spec.Children[i].Restart, reason) {
|
||||
spec.Children[i].state = supervisorChildStateDisabled
|
||||
} else {
|
||||
spec.Children[i].state = supervisorChildStateStart
|
||||
}
|
||||
|
||||
if len(spec.Children) == i+1 && len(waitTerminatingProcesses) == 0 {
|
||||
// it was the last one. nothing to waiting for
|
||||
startChildren(svp, &spec)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if haveToDisableChild(spec.Children[i].Restart, "restart") {
|
||||
spec.Children[i].state = supervisorChildStateDisabled
|
||||
} else {
|
||||
spec.Children[i].state = supervisorChildStateStart
|
||||
}
|
||||
p.Exit(p.Self(), "restart")
|
||||
|
||||
waitTerminatingProcesses = append(waitTerminatingProcesses, p.Self())
|
||||
}
|
||||
|
||||
case SupervisorStrategyRestForOne:
|
||||
isRest := false
|
||||
for i := range spec.Children {
|
||||
p := spec.Children[i].process
|
||||
if p == nil {
|
||||
continue
|
||||
}
|
||||
if p.Self() == terminated {
|
||||
isRest = true
|
||||
spec.Children[i].process = nil
|
||||
if haveToDisableChild(spec.Children[i].Restart, reason) {
|
||||
spec.Children[i].state = supervisorChildStateDisabled
|
||||
} else {
|
||||
spec.Children[i].state = supervisorChildStateStart
|
||||
}
|
||||
|
||||
if len(spec.Children) == i+1 && len(waitTerminatingProcesses) == 0 {
|
||||
// it was the last one. nothing to waiting for
|
||||
startChildren(svp, &spec)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if isRest && spec.Children[i].state == supervisorChildStateRunning {
|
||||
p.Exit(p.Self(), "restart")
|
||||
spec.Children[i].process = nil
|
||||
waitTerminatingProcesses = append(waitTerminatingProcesses, p.Self())
|
||||
if haveToDisableChild(spec.Children[i].Restart, "restart") {
|
||||
spec.Children[i].state = supervisorChildStateDisabled
|
||||
} else {
|
||||
spec.Children[i].state = supervisorChildStateStart
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case SupervisorStrategyOneForOne:
|
||||
for i := range spec.Children {
|
||||
p := spec.Children[i].process
|
||||
if p == nil {
|
||||
continue
|
||||
}
|
||||
if p.Self() == terminated {
|
||||
spec.Children[i].process = nil
|
||||
if haveToDisableChild(spec.Children[i].Restart, reason) {
|
||||
spec.Children[i].state = supervisorChildStateDisabled
|
||||
} else {
|
||||
spec.Children[i].state = supervisorChildStateStart
|
||||
}
|
||||
|
||||
startChildren(svp, &spec)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
case SupervisorStrategySimpleOneForOne:
|
||||
for i := range spec.Children {
|
||||
p := spec.Children[i].process
|
||||
if p == nil {
|
||||
continue
|
||||
}
|
||||
if p.Self() == terminated {
|
||||
|
||||
if haveToDisableChild(spec.Children[i].Restart, reason) {
|
||||
// wont be restarted due to restart strategy
|
||||
spec.Children[i] = spec.Children[0]
|
||||
spec.Children = spec.Children[1:]
|
||||
break
|
||||
}
|
||||
|
||||
process := startChild(svp, spec.Children[i].Name, spec.Children[i].Child, spec.Children[i].Args...)
|
||||
spec.Children[i].process = process
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case etf.Atom("$startByName"):
|
||||
// dynamically start child process
|
||||
specName := m.Element(2).(string)
|
||||
args := m.Element(3)
|
||||
reply := m.Element(4).(chan etf.Tuple)
|
||||
|
||||
s := lookupSpecByName(specName, spec.Children)
|
||||
if s == nil {
|
||||
reply <- etf.Tuple{etf.Atom("error"), "unknown_spec"}
|
||||
continue
|
||||
}
|
||||
specChild := *s
|
||||
specChild.process = nil
|
||||
specChild.state = supervisorChildStateStart
|
||||
|
||||
m := etf.Tuple{
|
||||
etf.Atom("$startBySpec"),
|
||||
specChild,
|
||||
args,
|
||||
reply,
|
||||
}
|
||||
svp.mailBox <- etf.Tuple{etf.Pid{}, m}
|
||||
|
||||
case etf.Atom("$startBySpec"):
|
||||
specChild := m.Element(2).(SupervisorChildSpec)
|
||||
args := m.Element(3).([]interface{})
|
||||
reply := m.Element(4).(chan etf.Tuple)
|
||||
|
||||
if len(args) > 0 {
|
||||
specChild.Args = args
|
||||
}
|
||||
|
||||
process := startChild(svp, "", specChild.Child, specChild.Args...)
|
||||
specChild.process = process
|
||||
specChild.Name = ""
|
||||
spec.Children = append(spec.Children, specChild)
|
||||
|
||||
reply <- etf.Tuple{etf.Atom("ok"), process.self}
|
||||
default:
|
||||
lib.Log("m: %#v", m)
|
||||
}
|
||||
|
||||
default:
|
||||
lib.Log("m: %#v", m)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// StartChild dynamically starts a child process with given name of child spec which is defined by Init call.
|
||||
// Created process will use the same object (GenServer/Supervisor) you have defined in spec as a Child since it
|
||||
// keeps pointer. You might use this object as a shared among the process you will create using this spec.
|
||||
func (sv *Supervisor) StartChild(parent *Process, specName string, args ...interface{}) (etf.Pid, error) {
|
||||
reply := make(chan etf.Tuple)
|
||||
m := etf.Tuple{
|
||||
etf.Atom("$startByName"),
|
||||
specName,
|
||||
args,
|
||||
reply,
|
||||
}
|
||||
parent.mailBox <- etf.Tuple{etf.Pid{}, m}
|
||||
r := <-reply
|
||||
switch r.Element(1) {
|
||||
case etf.Atom("ok"):
|
||||
return r.Element(2).(etf.Pid), nil
|
||||
case etf.Atom("error"):
|
||||
return etf.Pid{}, fmt.Errorf("%s", r.Element(2).(string))
|
||||
default:
|
||||
panic("internal error at Supervisor.StartChild")
|
||||
}
|
||||
}
|
||||
|
||||
// StartChildWithSpec dynamically starts a child process with given child spec
|
||||
func (sv *Supervisor) StartChildWithSpec(parent *Process, spec SupervisorChildSpec, args ...interface{}) (etf.Pid, error) {
|
||||
reply := make(chan etf.Tuple)
|
||||
m := etf.Tuple{
|
||||
etf.Atom("$startBySpec"),
|
||||
spec,
|
||||
args,
|
||||
reply,
|
||||
}
|
||||
parent.mailBox <- etf.Tuple{etf.Pid{}, m}
|
||||
r := <-reply
|
||||
switch r.Element(1) {
|
||||
case etf.Atom("ok"):
|
||||
return r.Element(2).(etf.Pid), nil
|
||||
default:
|
||||
return etf.Pid{}, fmt.Errorf(r.Element(1).(string))
|
||||
}
|
||||
}
|
||||
|
||||
func (sv *Supervisor) handleDirect(m directMessage) {
|
||||
switch m.id {
|
||||
case "getChildren":
|
||||
children := []etf.Pid{}
|
||||
for i := range sv.spec.Children {
|
||||
if sv.spec.Children[i].process == nil {
|
||||
continue
|
||||
}
|
||||
children = append(children, sv.spec.Children[i].process.self)
|
||||
}
|
||||
|
||||
m.message = children
|
||||
m.reply <- m
|
||||
|
||||
default:
|
||||
if m.reply != nil {
|
||||
m.message = ErrUnsupportedRequest
|
||||
m.reply <- m
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func startChildren(parent *Process, spec *SupervisorSpec) {
|
||||
spec.restarts = append(spec.restarts, time.Now().Unix())
|
||||
if len(spec.restarts) > int(spec.Strategy.Intensity) {
|
||||
period := time.Now().Unix() - spec.restarts[0]
|
||||
if period <= int64(spec.Strategy.Period) {
|
||||
fmt.Printf("ERROR: Restart intensity is exceeded (%d restarts for %d seconds)\n",
|
||||
spec.Strategy.Intensity, spec.Strategy.Period)
|
||||
parent.Kill()
|
||||
return
|
||||
}
|
||||
spec.restarts = spec.restarts[1:]
|
||||
}
|
||||
|
||||
for i := range spec.Children {
|
||||
switch spec.Children[i].state {
|
||||
case supervisorChildStateDisabled:
|
||||
spec.Children[i].process = nil
|
||||
case supervisorChildStateRunning:
|
||||
continue
|
||||
case supervisorChildStateStart:
|
||||
spec.Children[i].state = supervisorChildStateRunning
|
||||
process := startChild(parent, spec.Children[i].Name, spec.Children[i].Child, spec.Children[i].Args...)
|
||||
spec.Children[i].process = process
|
||||
default:
|
||||
panic("Incorrect supervisorChildState")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func startChild(parent *Process, name string, child interface{}, args ...interface{}) *Process {
|
||||
opts := ProcessOptions{}
|
||||
|
||||
if parent.groupLeader == nil {
|
||||
// leader is not set
|
||||
opts.GroupLeader = parent
|
||||
} else {
|
||||
opts.GroupLeader = parent.groupLeader
|
||||
}
|
||||
opts.parent = parent
|
||||
process, err := parent.Node.Spawn(name, opts, child, args...)
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
process.parent = parent
|
||||
parent.Link(process.self)
|
||||
|
||||
return process
|
||||
}
|
||||
|
||||
func haveToDisableChild(restart SupervisorChildRestart, reason etf.Atom) bool {
|
||||
switch restart {
|
||||
case SupervisorChildRestartTransient:
|
||||
if reason == etf.Atom("shutdown") || reason == etf.Atom("normal") {
|
||||
return true
|
||||
}
|
||||
|
||||
case SupervisorChildRestartTemporary:
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func lookupSpecByName(specName string, spec []SupervisorChildSpec) *SupervisorChildSpec {
|
||||
for i := range spec {
|
||||
if spec[i].Name == specName {
|
||||
return &spec[i]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,303 @@
|
|||
package ergonode
|
||||
|
||||
// - Supervisor
|
||||
|
||||
// - one for all (permanent)
|
||||
// start node1
|
||||
// start supevisor sv1 with genservers gs1,gs2,gs3
|
||||
// gs1.stop(normal) (sv1 stoping gs1)
|
||||
// (sv1 stoping gs2,gs3)
|
||||
// (sv1 starting gs1,gs2,gs3)
|
||||
// gs2.stop(shutdown) (sv1 stoping gs2)
|
||||
// (sv1 stoping gs1,gs3)
|
||||
// (sv1 starting gs1,gs2,gs3)
|
||||
// gs3.stop(panic) (sv1 stoping gs3)
|
||||
// (sv1 stoping gs1,gs2)
|
||||
// (sv1 starting gs1,gs2,gs3)
|
||||
|
||||
// - one for all (transient)
|
||||
// start node1
|
||||
// start supevisor sv1 with genservers gs1,gs2,gs3
|
||||
// gs3.stop(panic) (sv1 stoping gs3)
|
||||
// (sv1 stopping gs1, gs2)
|
||||
// (sv1 starting gs1, gs2, gs3)
|
||||
|
||||
// gs1.stop(normal) (sv1 stoping gs1)
|
||||
// ( gs2, gs3 - still working)
|
||||
// gs2.stop(shutdown) (sv1 stoping gs2)
|
||||
// (gs3 - still working)
|
||||
|
||||
// - one for all (temoporary)
|
||||
// start node1
|
||||
// start supevisor sv1 with genservers gs1,gs2,gs3
|
||||
|
||||
// gs3.stop(panic) (sv1 stoping gs3)
|
||||
// (sv1 stopping gs1, gs2)
|
||||
|
||||
// start again gs1, gs2, gs3 via sv1
|
||||
// gs1.stop(normal) (sv1 stopping gs1)
|
||||
// (gs2, gs3 are still running)
|
||||
// gs2.stop(shutdown) (sv1 stopping gs2)
|
||||
// (gs3 are still running)
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/halturin/ergonode/etf"
|
||||
// "time"
|
||||
// "github.com/halturin/ergonode/etf"
|
||||
)
|
||||
|
||||
type testSupervisorOneForAll struct {
|
||||
Supervisor
|
||||
ch chan interface{}
|
||||
}
|
||||
|
||||
type ChildrenTestCase struct {
|
||||
reason string
|
||||
statuses []string
|
||||
events int
|
||||
}
|
||||
|
||||
func TestSupervisorOneForAll(t *testing.T) {
|
||||
fmt.Printf("\n=== Test Supervisor - one for all\n")
|
||||
fmt.Printf("Starting node nodeSvOneForAll@localhost: ")
|
||||
node := CreateNode("nodeSvOneForAll@localhost", "cookies", NodeOptions{})
|
||||
if node == nil {
|
||||
t.Fatal("can't start node")
|
||||
} else {
|
||||
fmt.Println("OK")
|
||||
}
|
||||
|
||||
// ===================================================================================================
|
||||
// test SupervisorChildRestartPermanent
|
||||
fmt.Printf("Starting supervisor 'testSupervisorPermanent' (%s)... ", SupervisorChildRestartPermanent)
|
||||
sv := &testSupervisorOneForAll{
|
||||
ch: make(chan interface{}, 10),
|
||||
}
|
||||
processSV, _ := node.Spawn("testSupervisorPermanent", ProcessOptions{}, sv, SupervisorChildRestartPermanent, sv.ch)
|
||||
children := make([]etf.Pid, 3)
|
||||
children, err := waitNeventsSupervisorChildren(sv.ch, 3, children)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
fmt.Println("OK")
|
||||
}
|
||||
|
||||
// testing permanent
|
||||
testCases := []ChildrenTestCase{
|
||||
ChildrenTestCase{
|
||||
reason: "normal",
|
||||
statuses: []string{"new", "new", "new"},
|
||||
events: 6, // waiting for 3 terminates and 3 starts
|
||||
},
|
||||
ChildrenTestCase{
|
||||
reason: "abnormal",
|
||||
statuses: []string{"new", "new", "new"},
|
||||
events: 6,
|
||||
},
|
||||
ChildrenTestCase{
|
||||
reason: "shutdown",
|
||||
statuses: []string{"new", "new", "new"},
|
||||
events: 6,
|
||||
},
|
||||
}
|
||||
for i := range children {
|
||||
fmt.Printf("... stopping child %d with '%s' reason and waiting for restarting all of them ... ", i+1, testCases[i].reason)
|
||||
processSV.Cast(children[i], testCases[i].reason) // stopping child
|
||||
|
||||
if children1, err := waitNeventsSupervisorChildren(sv.ch, testCases[i].events, children); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
if checkExpectedChildrenStatus(children, children1, testCases[i].statuses) {
|
||||
fmt.Println("OK")
|
||||
children = children1
|
||||
} else {
|
||||
e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", testCases[i].statuses, children, children1)
|
||||
t.Fatal(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Stopping supervisor 'testSupervisorPermanent' (%s)... ", SupervisorChildRestartPermanent)
|
||||
processSV.Exit(processSV.Self(), "x")
|
||||
if children1, err := waitNeventsSupervisorChildren(sv.ch, 3, children); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
statuses := []string{"empty", "empty", "empty"}
|
||||
if checkExpectedChildrenStatus(children[:], children1[:], statuses) {
|
||||
fmt.Println("OK")
|
||||
children = children1
|
||||
} else {
|
||||
e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1)
|
||||
t.Fatal(e)
|
||||
}
|
||||
}
|
||||
|
||||
// ===================================================================================================
|
||||
// test SupervisorChildRestartTransient
|
||||
fmt.Printf("Starting supervisor 'testSupervisorTransient' (%s)... ", SupervisorChildRestartTransient)
|
||||
sv = &testSupervisorOneForAll{
|
||||
ch: make(chan interface{}, 10),
|
||||
}
|
||||
processSV, _ = node.Spawn("testSupervisorTransient", ProcessOptions{}, sv, SupervisorChildRestartTransient, sv.ch)
|
||||
children, err = waitNeventsSupervisorChildren(sv.ch, 3, children)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
fmt.Println("OK")
|
||||
}
|
||||
|
||||
// testing transient
|
||||
testCases = []ChildrenTestCase{
|
||||
ChildrenTestCase{
|
||||
reason: "normal",
|
||||
statuses: []string{"empty", "new", "new"},
|
||||
events: 5, // waiting for 3 terminates and 2 starts
|
||||
},
|
||||
ChildrenTestCase{
|
||||
reason: "abnormal",
|
||||
statuses: []string{"empty", "new", "new"},
|
||||
events: 4, // waiting for 2 terminates and 2 starts
|
||||
},
|
||||
ChildrenTestCase{
|
||||
reason: "shutdown",
|
||||
statuses: []string{"empty", "new", "empty"},
|
||||
events: 3, // waiting for 2 terminates and 1 start
|
||||
},
|
||||
}
|
||||
for i := range children {
|
||||
fmt.Printf("... stopping child %d with '%s' reason and waiting for restarting all of them ... ", i+1, testCases[i].reason)
|
||||
processSV.Cast(children[i], testCases[i].reason) // stopping child
|
||||
|
||||
if children1, err := waitNeventsSupervisorChildren(sv.ch, testCases[i].events, children); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
if checkExpectedChildrenStatus(children[:], children1[:], testCases[i].statuses) {
|
||||
fmt.Println("OK")
|
||||
children = children1
|
||||
} else {
|
||||
e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", testCases[i].statuses, children, children1)
|
||||
t.Fatal(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Stopping supervisor 'testSupervisorTransient' (%s)... ", SupervisorChildRestartTransient)
|
||||
processSV.Exit(processSV.Self(), "x")
|
||||
if children1, err := waitNeventsSupervisorChildren(sv.ch, 1, children); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
statuses := []string{"empty", "empty", "empty"}
|
||||
if checkExpectedChildrenStatus(children[:], children1[:], statuses) {
|
||||
fmt.Println("OK")
|
||||
children = children1
|
||||
} else {
|
||||
e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1)
|
||||
t.Fatal(e)
|
||||
}
|
||||
}
|
||||
|
||||
// ===================================================================================================
|
||||
// test SupervisorChildRestartTemporary
|
||||
|
||||
// testing temporary
|
||||
// A temporary child process is never restarted (even when the supervisor's
|
||||
// restart strategy is rest_for_one or one_for_all and a sibling's death
|
||||
// causes the temporary process to be terminated).
|
||||
testCases = []ChildrenTestCase{
|
||||
ChildrenTestCase{
|
||||
reason: "normal",
|
||||
statuses: []string{"empty", "empty", "empty"},
|
||||
events: 3, // waiting for 3 terminates
|
||||
},
|
||||
ChildrenTestCase{
|
||||
reason: "abnormal",
|
||||
statuses: []string{"empty", "empty", "empty"},
|
||||
events: 3, // waiting for 3 terminates
|
||||
},
|
||||
ChildrenTestCase{
|
||||
reason: "shutdown",
|
||||
statuses: []string{"empty", "empty", "empty"},
|
||||
events: 3, // waiting for 3 terminate
|
||||
},
|
||||
}
|
||||
|
||||
for i := range testCases {
|
||||
fmt.Printf("Starting supervisor 'testSupervisorTemporary' (%s)... ", SupervisorChildRestartTemporary)
|
||||
sv = &testSupervisorOneForAll{
|
||||
ch: make(chan interface{}, 10),
|
||||
}
|
||||
processSV, _ = node.Spawn("testSupervisorTemporary", ProcessOptions{}, sv, SupervisorChildRestartTemporary, sv.ch)
|
||||
children, err = waitNeventsSupervisorChildren(sv.ch, 3, children)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
fmt.Println("OK")
|
||||
}
|
||||
|
||||
fmt.Printf("... stopping child %d with '%s' reason and waiting for restarting all of them ... ", i+1, testCases[i].reason)
|
||||
processSV.Cast(children[i], testCases[i].reason) // stopping child
|
||||
|
||||
if children1, err := waitNeventsSupervisorChildren(sv.ch, testCases[i].events, children); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
if checkExpectedChildrenStatus(children[:], children1[:], testCases[i].statuses) {
|
||||
fmt.Println("OK")
|
||||
children = children1
|
||||
} else {
|
||||
e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", testCases[i].statuses, children, children1)
|
||||
t.Fatal(e)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Stopping supervisor 'testSupervisorTemporary' (%s)... ", SupervisorChildRestartTemporary)
|
||||
processSV.Exit(processSV.Self(), "x")
|
||||
if children1, err := waitNeventsSupervisorChildren(sv.ch, 0, children); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
statuses := []string{"empty", "empty", "empty"}
|
||||
if checkExpectedChildrenStatus(children[:], children1[:], statuses) {
|
||||
fmt.Println("OK")
|
||||
children = children1
|
||||
} else {
|
||||
e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1)
|
||||
t.Fatal(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (ts *testSupervisorOneForAll) Init(args ...interface{}) SupervisorSpec {
|
||||
restart := args[0].(string)
|
||||
ch := args[1].(chan interface{})
|
||||
return SupervisorSpec{
|
||||
Children: []SupervisorChildSpec{
|
||||
SupervisorChildSpec{
|
||||
Name: "testGS1",
|
||||
Child: &testSupervisorGenServer{},
|
||||
Restart: restart,
|
||||
Args: []interface{}{ch, 0},
|
||||
},
|
||||
SupervisorChildSpec{
|
||||
Name: "testGS2",
|
||||
Child: &testSupervisorGenServer{},
|
||||
Restart: restart,
|
||||
Args: []interface{}{ch, 1},
|
||||
},
|
||||
SupervisorChildSpec{
|
||||
Name: "testGS3",
|
||||
Child: &testSupervisorGenServer{},
|
||||
Restart: restart,
|
||||
Args: []interface{}{ch, 2},
|
||||
},
|
||||
},
|
||||
Strategy: SupervisorStrategy{
|
||||
Type: SupervisorStrategyOneForAll,
|
||||
Intensity: 10,
|
||||
Period: 5,
|
||||
},
|
||||
}
|
||||
}
|
|
@ -0,0 +1,347 @@
|
|||
package ergonode
|
||||
|
||||
// - Supervisor
|
||||
|
||||
// - one for one (permanent)
|
||||
// start node1
|
||||
// start supevisor sv1 with genservers gs1,gs2,gs3
|
||||
// gs1.stop(normal) (sv1 restarting gs1)
|
||||
// gs2.stop(shutdown) (sv1 restarting gs2)
|
||||
// gs3.stop(panic) (sv1 restarting gs3)
|
||||
|
||||
// - one for one (transient)
|
||||
// start node1
|
||||
// start supevisor sv1 with genservers gs1,gs2,gs3
|
||||
// gs1.stop(normal) (sv1 wont restart gs1)
|
||||
// gs2.stop(shutdown) (sv1 wont restart gs2)
|
||||
// gs3.stop(panic) (sv1 restarting gs3 only)
|
||||
|
||||
// - one for one (temporary)
|
||||
// start node1
|
||||
// start supevisor sv1 with genservers gs1,gs2,gs3
|
||||
// gs1.stop(normal) (sv1 wont restart gs1)
|
||||
// gs2.stop(shutdown) (sv1 wont restart gs2)
|
||||
// gs3.stop(panic) (sv1 wont gs3 only)
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/halturin/ergonode/etf"
|
||||
)
|
||||
|
||||
type testSupervisorOneForOne struct {
|
||||
Supervisor
|
||||
ch chan interface{}
|
||||
}
|
||||
|
||||
type testSupervisorGenServer struct {
|
||||
GenServer
|
||||
// process Process
|
||||
// ch chan interface{}
|
||||
// order int
|
||||
}
|
||||
|
||||
type testMessageStarted struct {
|
||||
pid etf.Pid
|
||||
name string
|
||||
order int
|
||||
}
|
||||
|
||||
type testMessageTerminated struct {
|
||||
name string
|
||||
order int
|
||||
pid etf.Pid
|
||||
}
|
||||
|
||||
type testSupervisorGenServerState struct {
|
||||
process *Process
|
||||
ch chan interface{}
|
||||
order int
|
||||
}
|
||||
|
||||
func (tsv *testSupervisorGenServer) Init(p *Process, args ...interface{}) (state interface{}) {
|
||||
st := testSupervisorGenServerState{
|
||||
process: p,
|
||||
ch: args[0].(chan interface{}),
|
||||
order: args[1].(int),
|
||||
}
|
||||
|
||||
// fmt.Printf("\ntestSupervisorGenServer ({%s, %s}) %d: Init\n", st.process.name, st.process.Node.FullName, st.order)
|
||||
st.ch <- testMessageStarted{
|
||||
pid: p.Self(),
|
||||
name: p.Name(),
|
||||
order: st.order,
|
||||
}
|
||||
|
||||
return &st
|
||||
}
|
||||
func (tsv *testSupervisorGenServer) HandleCast(message etf.Term, state interface{}) (string, interface{}) {
|
||||
// fmt.Printf("testSupervisorGenServer ({%s, %s}): HandleCast: %#v\n", tsv.process.name, tsv.process.Node.FullName, message)
|
||||
// tsv.v <- message
|
||||
return "stop", message
|
||||
// return "noreply", state
|
||||
}
|
||||
func (tsv *testSupervisorGenServer) HandleCall(from etf.Tuple, message etf.Term, state interface{}) (string, etf.Term, interface{}) {
|
||||
// fmt.Printf("testSupervisorGenServer ({%s, %s}): HandleCall: %#v, From: %#v\n", tsv.process.name, tsv.process.Node.FullName, message, from)
|
||||
return "reply", message, state
|
||||
}
|
||||
func (tsv *testSupervisorGenServer) HandleInfo(message etf.Term, state interface{}) (string, interface{}) {
|
||||
// fmt.Printf("testSupervisorGenServer ({%s, %s}): HandleInfo: %#v\n", tsv.process.name, tsv.process.Node.FullName, message)
|
||||
// tsv.v <- message
|
||||
return "noreply", state
|
||||
}
|
||||
func (tsv *testSupervisorGenServer) Terminate(reason string, state interface{}) {
|
||||
// fmt.Printf("\ntestSupervisorGenServer ({%s, %s}): Terminate: %#v\n", tsv.process.name, tsv.process.Node.FullName, reason)
|
||||
st := state.(*testSupervisorGenServerState)
|
||||
st.ch <- testMessageTerminated{
|
||||
name: st.process.Name(),
|
||||
pid: st.process.Self(),
|
||||
order: st.order,
|
||||
}
|
||||
}
|
||||
|
||||
func TestSupervisorOneForOne(t *testing.T) {
|
||||
var err error
|
||||
|
||||
fmt.Printf("\n=== Test Supervisor - one for one\n")
|
||||
fmt.Printf("Starting node nodeSvOneForOne@localhost: ")
|
||||
node := CreateNode("nodeSvOneForOne@localhost", "cookies", NodeOptions{})
|
||||
if node == nil {
|
||||
t.Fatal("can't start node")
|
||||
} else {
|
||||
fmt.Println("OK")
|
||||
}
|
||||
|
||||
// ===================================================================================================
|
||||
// test SupervisorChildRestartPermanent
|
||||
fmt.Printf("Starting supervisor 'testSupervisorPermanent' (%s)... ", SupervisorChildRestartPermanent)
|
||||
sv := &testSupervisorOneForOne{
|
||||
ch: make(chan interface{}, 10),
|
||||
}
|
||||
processSV, _ := node.Spawn("testSupervisorPermanent", ProcessOptions{}, sv, SupervisorChildRestartPermanent, sv.ch)
|
||||
children := make([]etf.Pid, 3)
|
||||
|
||||
children, err = waitNeventsSupervisorChildren(sv.ch, 3, children)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
fmt.Println("OK")
|
||||
}
|
||||
|
||||
fmt.Printf("... stopping children with 'normal' reason and waiting for their starting ... ")
|
||||
for i := range children {
|
||||
processSV.Cast(children[i], "normal") // stopping child
|
||||
}
|
||||
|
||||
if children1, err := waitNeventsSupervisorChildren(sv.ch, 6, children); err != nil { // waiting for 3 terminates and 3 starts
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
statuses := []string{"new", "new", "new"}
|
||||
if checkExpectedChildrenStatus(children[:], children1[:], statuses) {
|
||||
fmt.Println("OK")
|
||||
children = children1
|
||||
} else {
|
||||
e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1)
|
||||
t.Fatal(e)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Stopping supervisor 'testSupervisorPermanent' (%s)... ", SupervisorChildRestartPermanent)
|
||||
processSV.Exit(processSV.Self(), "x")
|
||||
if children1, err := waitNeventsSupervisorChildren(sv.ch, 3, children); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
statuses := []string{"empty", "empty", "empty"}
|
||||
if checkExpectedChildrenStatus(children[:], children1[:], statuses) {
|
||||
fmt.Println("OK")
|
||||
children = children1
|
||||
} else {
|
||||
e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1)
|
||||
t.Fatal(e)
|
||||
}
|
||||
}
|
||||
|
||||
// ===================================================================================================
|
||||
// test SupervisorChildRestartTransient
|
||||
fmt.Printf("Starting supervisor 'testSupervisorTransient' (%s)... ", SupervisorChildRestartTransient)
|
||||
sv = &testSupervisorOneForOne{
|
||||
ch: make(chan interface{}, 10),
|
||||
}
|
||||
processSV, _ = node.Spawn("testSupervisorTransient", ProcessOptions{}, sv, SupervisorChildRestartTransient, sv.ch)
|
||||
children = make([]etf.Pid, 3)
|
||||
|
||||
children, err = waitNeventsSupervisorChildren(sv.ch, 3, children)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
fmt.Println("OK")
|
||||
}
|
||||
fmt.Printf("... stopping children with 'abnormal' reason and waiting for their starting ... ")
|
||||
for i := range children {
|
||||
processSV.Cast(children[i], "abnormal") // stopping child
|
||||
}
|
||||
|
||||
if children1, err := waitNeventsSupervisorChildren(sv.ch, 6, children); err != nil { // waiting for 3 terminates and 3 starts
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
statuses := []string{"new", "new", "new"}
|
||||
if checkExpectedChildrenStatus(children[:], children1[:], statuses) {
|
||||
fmt.Println("OK")
|
||||
children = children1
|
||||
} else {
|
||||
e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1)
|
||||
t.Fatal(e)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("... stopping children with 'normal' reason and they are haven't be restarted ... ")
|
||||
for i := range children {
|
||||
processSV.Cast(children[i], "normal") // stopping child
|
||||
}
|
||||
|
||||
if children1, err := waitNeventsSupervisorChildren(sv.ch, 3, children); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
statuses := []string{"empty", "empty", "empty"}
|
||||
if checkExpectedChildrenStatus(children[:], children1[:], statuses) {
|
||||
fmt.Println("OK")
|
||||
children = children1
|
||||
} else {
|
||||
e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1)
|
||||
t.Fatal(e)
|
||||
}
|
||||
}
|
||||
fmt.Printf("Stopping supervisor 'testSupervisorTransient' (%s)... ", SupervisorChildRestartTransient)
|
||||
processSV.Exit(processSV.Self(), "x")
|
||||
fmt.Println("OK")
|
||||
|
||||
// ===================================================================================================
|
||||
// test SupervisorChildRestartTemporary
|
||||
|
||||
fmt.Printf("Starting supervisor 'testSupervisorTemporary' (%s)... ", SupervisorChildRestartTemporary)
|
||||
sv = &testSupervisorOneForOne{
|
||||
ch: make(chan interface{}, 10),
|
||||
}
|
||||
processSV, _ = node.Spawn("testSupervisorTemporary", ProcessOptions{}, sv, SupervisorChildRestartTemporary, sv.ch)
|
||||
children = make([]etf.Pid, 3)
|
||||
|
||||
children, err = waitNeventsSupervisorChildren(sv.ch, 3, children)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
fmt.Println("OK")
|
||||
}
|
||||
|
||||
fmt.Printf("... stopping children with 'normal', 'abnornal','shutdown' reasons and they are haven't be restarted ... ")
|
||||
processSV.Cast(children[0], "normal") // stopping child
|
||||
processSV.Cast(children[1], "abnormal") // stopping child
|
||||
processSV.Cast(children[2], "shutdown") // stopping child
|
||||
|
||||
if children1, err := waitNeventsSupervisorChildren(sv.ch, 3, children); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
statuses := []string{"empty", "empty", "empty"}
|
||||
if checkExpectedChildrenStatus(children[:], children1[:], statuses) {
|
||||
fmt.Println("OK")
|
||||
children = children1
|
||||
} else {
|
||||
e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1)
|
||||
t.Fatal(e)
|
||||
}
|
||||
}
|
||||
fmt.Printf("Stopping supervisor 'testSupervisorTemporary' (%s)... ", SupervisorChildRestartTemporary)
|
||||
processSV.Exit(processSV.Self(), "x")
|
||||
fmt.Println("OK")
|
||||
|
||||
}
|
||||
|
||||
func (ts *testSupervisorOneForOne) Init(args ...interface{}) SupervisorSpec {
|
||||
restart := args[0].(string)
|
||||
ch := args[1].(chan interface{})
|
||||
return SupervisorSpec{
|
||||
Children: []SupervisorChildSpec{
|
||||
SupervisorChildSpec{
|
||||
Name: "testGS1",
|
||||
Child: &testSupervisorGenServer{},
|
||||
Restart: restart,
|
||||
Args: []interface{}{ch, 0},
|
||||
},
|
||||
SupervisorChildSpec{
|
||||
Name: "testGS2",
|
||||
Child: &testSupervisorGenServer{},
|
||||
Restart: restart,
|
||||
Args: []interface{}{ch, 1},
|
||||
},
|
||||
SupervisorChildSpec{
|
||||
Name: "testGS3",
|
||||
Child: &testSupervisorGenServer{},
|
||||
Restart: restart,
|
||||
Args: []interface{}{ch, 2},
|
||||
},
|
||||
},
|
||||
Strategy: SupervisorStrategy{
|
||||
Type: SupervisorStrategyOneForOne,
|
||||
Intensity: 10,
|
||||
Period: 5,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func waitNeventsSupervisorChildren(ch chan interface{}, n int, children []etf.Pid) ([]etf.Pid, error) {
|
||||
// n - number of events that have to be awaited
|
||||
// start for-loop with 'n+1' to handle exceeded number of events
|
||||
childrenNew := make([]etf.Pid, len(children))
|
||||
copy(childrenNew, children)
|
||||
for i := 0; i < n+1; i++ {
|
||||
select {
|
||||
case c := <-ch:
|
||||
switch child := c.(type) {
|
||||
case testMessageTerminated:
|
||||
// fmt.Println("TERM", child)
|
||||
childrenNew[child.order] = etf.Pid{} // set empty pid
|
||||
case testMessageStarted:
|
||||
// fmt.Println("START", child)
|
||||
childrenNew[child.order] = child.pid
|
||||
}
|
||||
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
if i == n {
|
||||
return childrenNew, nil
|
||||
}
|
||||
if i < n {
|
||||
return childrenNew, fmt.Errorf("expected %d events, but got %d. TIMEOUT", n, i)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return childrenNew, fmt.Errorf("expected %d events, but got %d. ", n, n+1)
|
||||
}
|
||||
|
||||
func checkExpectedChildrenStatus(children, children1 []etf.Pid, statuses []string) bool {
|
||||
empty := etf.Pid{}
|
||||
for i := 0; i < len(statuses); i++ {
|
||||
switch statuses[i] {
|
||||
case "new":
|
||||
if children1[i] == empty { // is the epmty pid (child has been stopped)
|
||||
return false
|
||||
}
|
||||
if children[i] == children1[i] { // this value has to be different
|
||||
return false
|
||||
}
|
||||
|
||||
case "epmty":
|
||||
if children1[i] != empty {
|
||||
return false
|
||||
}
|
||||
|
||||
case "old":
|
||||
if children[i] == children1[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return true
|
||||
}
|
|
@ -0,0 +1,303 @@
|
|||
package ergonode
|
||||
|
||||
// - Supervisor
|
||||
|
||||
// - rest for one (permanent)
|
||||
// start node1
|
||||
// start supevisor sv1 with genservers gs1,gs2,gs3
|
||||
// gs1.stop(normal) (sv1 stoping gs1)
|
||||
// (sv1 stoping gs2,gs3)
|
||||
// (sv1 starting gs1,gs2,gs3)
|
||||
// gs2.stop(shutdown) (sv1 stoping gs2)
|
||||
// (sv1 stoping gs1,gs3)
|
||||
// (sv1 starting gs1,gs2,gs3)
|
||||
// gs3.stop(panic) (sv1 stoping gs3)
|
||||
// (sv1 stoping gs1,gs2)
|
||||
// (sv1 starting gs1,gs2,gs3)
|
||||
//
|
||||
// - rest for one (transient)
|
||||
// start node1
|
||||
// start supevisor sv1 with genservers gs1,gs2,gs3
|
||||
// gs3.stop(panic) (sv1 stoping gs3)
|
||||
// (sv1 stopping gs1, gs2)
|
||||
// (sv1 starting gs1, gs2, gs3)
|
||||
|
||||
// gs1.stop(normal) (sv1 stoping gs1)
|
||||
// ( gs2, gs3 - still working)
|
||||
// gs2.stop(shutdown) (sv1 stoping gs2)
|
||||
// (gs3 - still working)
|
||||
//
|
||||
// - rest for one (temoporary)
|
||||
// start node1
|
||||
// start supevisor sv1 with genservers gs1,gs2,gs3
|
||||
|
||||
// gs3.stop(panic) (sv1 stoping gs3)
|
||||
// (sv1 stopping gs1, gs2)
|
||||
|
||||
// start again gs1, gs2, gs3 via sv1
|
||||
// gs1.stop(normal) (sv1 stopping gs1)
|
||||
// (gs2, gs3 are still running)
|
||||
// gs2.stop(shutdown) (sv1 stopping gs2)
|
||||
// (gs3 are still running)
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/halturin/ergonode/etf"
|
||||
// "time"
|
||||
// "github.com/halturin/ergonode/etf"
|
||||
)
|
||||
|
||||
type testSupervisorRestForOne struct {
|
||||
Supervisor
|
||||
ch chan interface{}
|
||||
}
|
||||
|
||||
func TestSupervisorRestForOne(t *testing.T) {
|
||||
var err error
|
||||
fmt.Printf("\n=== Test Supervisor - rest for one\n")
|
||||
fmt.Printf("Starting node nodeSvRestForOne@localhost: ")
|
||||
node := CreateNode("nodeSvRestForOne@localhost", "cookies", NodeOptions{})
|
||||
if node == nil {
|
||||
t.Fatal("can't start node")
|
||||
} else {
|
||||
fmt.Println("OK")
|
||||
}
|
||||
|
||||
// ===================================================================================================
|
||||
// test SupervisorChildRestartPermanent
|
||||
fmt.Printf("Starting supervisor 'testSupervisorPermanent' (%s)... ", SupervisorChildRestartPermanent)
|
||||
sv := &testSupervisorRestForOne{
|
||||
ch: make(chan interface{}, 10),
|
||||
}
|
||||
processSV, _ := node.Spawn("testSupervisorPermanent", ProcessOptions{}, sv, SupervisorChildRestartPermanent, sv.ch)
|
||||
children := make([]etf.Pid, 3)
|
||||
|
||||
children, err = waitNeventsSupervisorChildren(sv.ch, 3, children)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
fmt.Println("OK")
|
||||
}
|
||||
|
||||
// testing permanent
|
||||
testCases := []ChildrenTestCase{
|
||||
ChildrenTestCase{
|
||||
reason: "normal",
|
||||
statuses: []string{"new", "new", "new"},
|
||||
events: 6, // waiting for 3 terminates and 3 starts
|
||||
},
|
||||
ChildrenTestCase{
|
||||
reason: "abnormal",
|
||||
statuses: []string{"old", "new", "new"},
|
||||
events: 4, // waiting for 2 terminates and 2 starts
|
||||
},
|
||||
ChildrenTestCase{
|
||||
reason: "shutdown",
|
||||
statuses: []string{"old", "old", "new"},
|
||||
events: 2, // waiting for 1 terminates and 1 starts
|
||||
},
|
||||
}
|
||||
for i := range children {
|
||||
fmt.Printf("... stopping child %d with '%s' reason and waiting for restarting rest of them ... ", i+1, testCases[i].reason)
|
||||
processSV.Cast(children[i], testCases[i].reason) // stopping child
|
||||
|
||||
if children1, err := waitNeventsSupervisorChildren(sv.ch, testCases[i].events, children); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
if checkExpectedChildrenStatus(children[:], children1[:], testCases[i].statuses) {
|
||||
fmt.Println("OK")
|
||||
children = children1
|
||||
} else {
|
||||
e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", testCases[i].statuses, children, children1)
|
||||
t.Fatal(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Stopping supervisor 'testSupervisorPermanent' (%s)... ", SupervisorChildRestartPermanent)
|
||||
processSV.Exit(processSV.Self(), "x")
|
||||
if children1, err := waitNeventsSupervisorChildren(sv.ch, 3, children); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
statuses := []string{"empty", "empty", "empty"}
|
||||
if checkExpectedChildrenStatus(children[:], children1[:], statuses) {
|
||||
fmt.Println("OK")
|
||||
children = children1
|
||||
} else {
|
||||
e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1)
|
||||
t.Fatal(e)
|
||||
}
|
||||
}
|
||||
|
||||
// ===================================================================================================
|
||||
// test SupervisorChildRestartTransient
|
||||
fmt.Printf("Starting supervisor 'testSupervisorTransient' (%s)... ", SupervisorChildRestartTransient)
|
||||
sv = &testSupervisorRestForOne{
|
||||
ch: make(chan interface{}, 10),
|
||||
}
|
||||
processSV, _ = node.Spawn("testSupervisorTransient", ProcessOptions{}, sv, SupervisorChildRestartTransient, sv.ch)
|
||||
children = make([]etf.Pid, 3)
|
||||
|
||||
children, err = waitNeventsSupervisorChildren(sv.ch, 3, children)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
fmt.Println("OK")
|
||||
}
|
||||
|
||||
// testing transient
|
||||
testCases = []ChildrenTestCase{
|
||||
ChildrenTestCase{
|
||||
reason: "abnormal",
|
||||
statuses: []string{"new", "new", "new"},
|
||||
events: 6, // waiting for 3 terminates and 3 starts
|
||||
},
|
||||
ChildrenTestCase{
|
||||
reason: "normal",
|
||||
statuses: []string{"old", "empty", "new"},
|
||||
events: 3, // waiting for 2 terminates and 1 starts
|
||||
},
|
||||
ChildrenTestCase{
|
||||
reason: "shutdown",
|
||||
statuses: []string{"old", "empty", "empty"},
|
||||
events: 1, // waiting for 1 terminates
|
||||
},
|
||||
}
|
||||
for i := range children {
|
||||
fmt.Printf("... stopping child %d with '%s' reason and waiting for restarting rest of them ... ", i+1, testCases[i].reason)
|
||||
processSV.Cast(children[i], testCases[i].reason) // stopping child
|
||||
|
||||
if children1, err := waitNeventsSupervisorChildren(sv.ch, testCases[i].events, children); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
if checkExpectedChildrenStatus(children[:], children1[:], testCases[i].statuses) {
|
||||
fmt.Println("OK")
|
||||
children = children1
|
||||
} else {
|
||||
e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", testCases[i].statuses, children, children1)
|
||||
t.Fatal(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Stopping supervisor 'testSupervisorTransient' (%s)... ", SupervisorChildRestartTransient)
|
||||
processSV.Exit(processSV.Self(), "x")
|
||||
if children1, err := waitNeventsSupervisorChildren(sv.ch, 1, children); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
statuses := []string{"empty", "empty", "empty"}
|
||||
if checkExpectedChildrenStatus(children[:], children1[:], statuses) {
|
||||
fmt.Println("OK")
|
||||
children = children1
|
||||
} else {
|
||||
e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1)
|
||||
t.Fatal(e)
|
||||
}
|
||||
}
|
||||
|
||||
// ===================================================================================================
|
||||
// test SupervisorChildRestartTemporary
|
||||
|
||||
// testing temporary
|
||||
// A temporary child process is never restarted (even when the supervisor's
|
||||
// restart strategy is rest_for_one or one_for_all and a sibling's death
|
||||
// causes the temporary process to be terminated).
|
||||
testCases = []ChildrenTestCase{
|
||||
ChildrenTestCase{
|
||||
reason: "normal",
|
||||
statuses: []string{"empty", "empty", "empty"},
|
||||
events: 3, // waiting for 3 terminates
|
||||
},
|
||||
ChildrenTestCase{
|
||||
reason: "abnormal",
|
||||
statuses: []string{"old", "empty", "empty"},
|
||||
events: 2, // waiting for 2 terminates
|
||||
},
|
||||
ChildrenTestCase{
|
||||
reason: "shutdown",
|
||||
statuses: []string{"old", "old", "empty"},
|
||||
events: 1, // waiting for 1 terminate
|
||||
},
|
||||
}
|
||||
|
||||
for i := range testCases {
|
||||
fmt.Printf("Starting supervisor 'testSupervisorTemporary' (%s)... ", SupervisorChildRestartTemporary)
|
||||
sv = &testSupervisorRestForOne{
|
||||
ch: make(chan interface{}, 10),
|
||||
}
|
||||
processSV, _ = node.Spawn("testSupervisorTemporary", ProcessOptions{}, sv, SupervisorChildRestartTemporary, sv.ch)
|
||||
children = make([]etf.Pid, 3)
|
||||
|
||||
children, err = waitNeventsSupervisorChildren(sv.ch, 3, children)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
fmt.Println("OK")
|
||||
}
|
||||
|
||||
fmt.Printf("... stopping child %d with '%s' reason and without restarting ... ", i+1, testCases[i].reason)
|
||||
processSV.Cast(children[i], testCases[i].reason) // stopping child
|
||||
|
||||
if children1, err := waitNeventsSupervisorChildren(sv.ch, testCases[i].events, children); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
if checkExpectedChildrenStatus(children[:], children1[:], testCases[i].statuses) {
|
||||
fmt.Println("OK")
|
||||
children = children1
|
||||
} else {
|
||||
e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", testCases[i].statuses, children, children1)
|
||||
t.Fatal(e)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Stopping supervisor 'testSupervisorTemporary' (%s)... ", SupervisorChildRestartTemporary)
|
||||
processSV.Exit(processSV.Self(), "x")
|
||||
if children1, err := waitNeventsSupervisorChildren(sv.ch, 3-testCases[i].events, children); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
statuses := []string{"empty", "empty", "empty"}
|
||||
if checkExpectedChildrenStatus(children[:], children1[:], statuses) {
|
||||
fmt.Println("OK")
|
||||
children = children1
|
||||
} else {
|
||||
e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1)
|
||||
t.Fatal(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (ts *testSupervisorRestForOne) Init(args ...interface{}) SupervisorSpec {
|
||||
restart := args[0].(string)
|
||||
ch := args[1].(chan interface{})
|
||||
return SupervisorSpec{
|
||||
Children: []SupervisorChildSpec{
|
||||
SupervisorChildSpec{
|
||||
Name: "testGS1",
|
||||
Child: &testSupervisorGenServer{},
|
||||
Restart: restart,
|
||||
Args: []interface{}{ch, 0},
|
||||
},
|
||||
SupervisorChildSpec{
|
||||
Name: "testGS2",
|
||||
Child: &testSupervisorGenServer{},
|
||||
Restart: restart,
|
||||
Args: []interface{}{ch, 1},
|
||||
},
|
||||
SupervisorChildSpec{
|
||||
Name: "testGS3",
|
||||
Child: &testSupervisorGenServer{},
|
||||
Restart: restart,
|
||||
Args: []interface{}{ch, 2},
|
||||
},
|
||||
},
|
||||
Strategy: SupervisorStrategy{
|
||||
Type: SupervisorStrategyRestForOne,
|
||||
Intensity: 10,
|
||||
Period: 5,
|
||||
},
|
||||
}
|
||||
}
|
|
@ -0,0 +1,199 @@
|
|||
package ergonode
|
||||
|
||||
// - Supervisor
|
||||
|
||||
// - simple one for one (permanent)
|
||||
// start node1
|
||||
// start supevisor sv1 with genservers gs1,gs2,gs3
|
||||
// .... TODO: describe
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/halturin/ergonode/etf"
|
||||
)
|
||||
|
||||
type testSupervisorSimpleOneForOne struct {
|
||||
Supervisor
|
||||
ch chan interface{}
|
||||
}
|
||||
|
||||
func TestSupervisorSimpleOneForOne(t *testing.T) {
|
||||
fmt.Printf("\n=== Test Supervisor - simple one for one\n")
|
||||
fmt.Printf("Starting node nodeSvSimpleOneForOne@localhost: ")
|
||||
node := CreateNode("nodeSvSimpleOneForOne@localhost", "cookies", NodeOptions{})
|
||||
if node == nil {
|
||||
t.Fatal("can't start node")
|
||||
} else {
|
||||
fmt.Println("OK")
|
||||
}
|
||||
|
||||
testCases := []ChildrenTestCase{
|
||||
ChildrenTestCase{
|
||||
reason: "abnormal",
|
||||
statuses: []string{"new", "new", "new", "new", "empty", "empty"},
|
||||
events: 10, // waiting for 6 terminates and 4 starts
|
||||
},
|
||||
ChildrenTestCase{
|
||||
reason: "normal",
|
||||
statuses: []string{"new", "new", "empty", "empty", "empty", "empty"},
|
||||
events: 8, // waiting for 6 terminates and 2 starts
|
||||
},
|
||||
ChildrenTestCase{
|
||||
reason: "shutdown",
|
||||
statuses: []string{"new", "new", "empty", "empty", "empty", "empty"},
|
||||
events: 8, // the same as 'normal' reason
|
||||
},
|
||||
}
|
||||
|
||||
for c := range testCases {
|
||||
fmt.Printf("Starting supervisor 'testSupervisor' (reason: %s)... ", testCases[c].reason)
|
||||
sv := &testSupervisorSimpleOneForOne{
|
||||
ch: make(chan interface{}, 15),
|
||||
}
|
||||
processSV, _ := node.Spawn("testSupervisor", ProcessOptions{}, sv, sv.ch)
|
||||
children := make([]etf.Pid, 6)
|
||||
children1, err := waitNeventsSupervisorChildren(sv.ch, 0, children)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
// they should be equal after start
|
||||
statuses := []string{"empty", "empty", "empty", "empty", "empty", "empty"}
|
||||
if checkExpectedChildrenStatus(children, children1, statuses) {
|
||||
fmt.Println("OK")
|
||||
children = children1
|
||||
} else {
|
||||
e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1)
|
||||
t.Fatal(e)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("... starting 6 children ... ")
|
||||
|
||||
// start children
|
||||
for i := 0; i < 6; i = i + 2 {
|
||||
p, _ := sv.StartChild(processSV, fmt.Sprintf("testGS%d", i/2+1), sv.ch, i)
|
||||
children[i] = p
|
||||
// start twice
|
||||
p, _ = sv.StartChild(processSV, fmt.Sprintf("testGS%d", i/2+1), sv.ch, i+1)
|
||||
children[i+1] = p
|
||||
}
|
||||
if children1, err := waitNeventsSupervisorChildren(sv.ch, 6, children); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
// they should be equal after start
|
||||
statuses := []string{"old", "old", "old", "old", "old", "old"}
|
||||
if checkExpectedChildrenStatus(children, children1, statuses) {
|
||||
fmt.Println("OK")
|
||||
} else {
|
||||
e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1)
|
||||
t.Fatal(e)
|
||||
}
|
||||
}
|
||||
|
||||
// kill them all with reason = testCases[c].reason
|
||||
fmt.Printf("... stopping children with '%s' reason and waiting for restarting some of them ... ", testCases[c].reason)
|
||||
|
||||
for k := range children {
|
||||
processSV.Cast(children[k], testCases[c].reason)
|
||||
}
|
||||
|
||||
if children1, err := waitNeventsSupervisorChildren(sv.ch, testCases[c].events, children); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
if checkExpectedChildrenStatus(children, children1, testCases[c].statuses) {
|
||||
fmt.Println("OK")
|
||||
children = children1
|
||||
} else {
|
||||
e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", testCases[c].statuses, children, children1)
|
||||
t.Fatal(e)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Stopping supervisor 'testSupervisor' (reason: %s)... ", testCases[c].reason)
|
||||
processSV.Exit(processSV.Self(), "x")
|
||||
if children1, err := waitNeventsSupervisorChildren(sv.ch, testCases[c].events-len(children), children); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
statuses := []string{"empty", "empty", "empty", "empty", "empty", "empty"}
|
||||
if checkExpectedChildrenStatus(children, children1, statuses) {
|
||||
fmt.Println("OK")
|
||||
} else {
|
||||
e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1)
|
||||
t.Fatal(e)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (ts *testSupervisorSimpleOneForOne) Init(args ...interface{}) SupervisorSpec {
|
||||
ch := args[0].(chan interface{})
|
||||
return SupervisorSpec{
|
||||
Children: []SupervisorChildSpec{
|
||||
SupervisorChildSpec{
|
||||
Name: "testGS1",
|
||||
Child: &testSupervisorGenServer{},
|
||||
Restart: SupervisorChildRestartPermanent,
|
||||
Args: []interface{}{ch, 0},
|
||||
},
|
||||
SupervisorChildSpec{
|
||||
Name: "testGS2",
|
||||
Child: &testSupervisorGenServer{},
|
||||
Restart: SupervisorChildRestartTransient,
|
||||
Args: []interface{}{ch, 1},
|
||||
},
|
||||
SupervisorChildSpec{
|
||||
Name: "testGS3",
|
||||
Child: &testSupervisorGenServer{},
|
||||
Restart: SupervisorChildRestartTemporary,
|
||||
Args: []interface{}{ch, 2},
|
||||
},
|
||||
},
|
||||
Strategy: SupervisorStrategy{
|
||||
Type: SupervisorStrategySimpleOneForOne,
|
||||
Intensity: 10,
|
||||
Period: 5,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// func waitNeventsSupervisorSimpleOneForOneChildren(ch chan interface{}, n int, children []etf.Pid) ([]etf.Pid, error) {
|
||||
// // n - number of events that have to be awaited
|
||||
// // start for-loop with 'n+1' to handle exceeded number of events
|
||||
// for i := 0; i < n+1; i++ {
|
||||
// select {
|
||||
// case c := <-ch:
|
||||
// switch child := c.(type) {
|
||||
// case testMessageTerminated:
|
||||
// for n := range children {
|
||||
// if children[n] == child.pid {
|
||||
// children[n] = etf.Pid{}
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
// fmt.Println("TERM", child)
|
||||
// case testMessageStarted:
|
||||
// fmt.Println("START", child)
|
||||
// for n := range children {
|
||||
// if children[n] == child.pid {
|
||||
// panic("pid already exist")
|
||||
// }
|
||||
// }
|
||||
// children = append(children, child.pid)
|
||||
// }
|
||||
|
||||
// case <-time.After(100 * time.Millisecond):
|
||||
// if i == n {
|
||||
// return children, nil
|
||||
// }
|
||||
// if i < n {
|
||||
// return children, fmt.Errorf("expected %d events, but got %d. TIMEOUT", n, i)
|
||||
// }
|
||||
|
||||
// }
|
||||
// }
|
||||
// return children, fmt.Errorf("expected %d events, but got %d. ", n, n+1)
|
||||
// }
|
|
@ -0,0 +1,53 @@
|
|||
package ergonode
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/halturin/ergonode/etf"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrAppAlreadyLoaded = fmt.Errorf("Application is already loaded")
|
||||
ErrAppAlreadyStarted = fmt.Errorf("Application is already started")
|
||||
ErrAppUnknown = fmt.Errorf("Unknown application name")
|
||||
ErrAppIsNotRunning = fmt.Errorf("Application is not running")
|
||||
|
||||
ErrProcessBusy = fmt.Errorf("Process is busy")
|
||||
|
||||
ErrNameIsTaken = fmt.Errorf("Name is taken")
|
||||
|
||||
ErrUnsupportedRequest = fmt.Errorf("Unsupported request")
|
||||
ErrTimeout = fmt.Errorf("Timed out")
|
||||
)
|
||||
|
||||
// Distributed operations codes (http://www.erlang.org/doc/apps/erts/erl_dist_protocol.html)
|
||||
const (
|
||||
distProtoLINK = 1
|
||||
distProtoSEND = 2
|
||||
distProtoEXIT = 3
|
||||
distProtoUNLINK = 4
|
||||
distProtoNODE_LINK = 5
|
||||
distProtoREG_SEND = 6
|
||||
distProtoGROUP_LEADER = 7
|
||||
distProtoEXIT2 = 8
|
||||
distProtoSEND_TT = 12
|
||||
distProtoEXIT_TT = 13
|
||||
distProtoREG_SEND_TT = 16
|
||||
distProtoEXIT2_TT = 18
|
||||
distProtoMONITOR = 19
|
||||
distProtoDEMONITOR = 20
|
||||
distProtoMONITOR_EXIT = 21
|
||||
distProtoSEND_SENDER = 22
|
||||
distProtoSEND_SENDER_TT = 23
|
||||
distProtoPAYLOAD_EXIT = 24
|
||||
distProtoPAYLOAD_EXIT_TT = 25
|
||||
distProtoPAYLOAD_EXIT2 = 26
|
||||
distProtoPAYLOAD_EXIT2_TT = 27
|
||||
distProtoPAYLOAD_MONITOR_P_EXIT = 28
|
||||
)
|
||||
|
||||
type peer struct {
|
||||
conn net.Conn
|
||||
send chan []etf.Term
|
||||
}
|
Loading…
Reference in New Issue