Skip to content

Commit 4a52b59

Browse files
authored
Merge pull request #199 from NodeFactoryIo/Release-0.5.4
Release 0.5.4
2 parents bbeee18 + 9dc4ab9 commit 4a52b59

6 files changed

Lines changed: 52 additions & 26 deletions

File tree

.version

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
version=0.5.3
1+
version=0.5.4

CHANGELOG.md

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,13 @@
11
# Changelog
22

3-
## [unreleased]((https://github.com/NodeFactoryIo/vedran/tree/HEAD))
4-
[Full Changelog](https://github.com/NodeFactoryIo/vedran/compare/v0.5.3...HEAD)
3+
## [v0.5.4]((https://github.com/NodeFactoryIo/vedran/tree/v0.5.4))
4+
[Full Changelog](https://github.com/NodeFactoryIo/vedran/compare/v0.5.3...v0.5.4)
55

66
### Added
77

88
### Fix
99
- Fix tunnel tcp connections not closing after requests finish [\#197](https://github.com/NodeFactoryIo/vedran/pull/197) ([mpetrun5](https://github.com/mpetrun5))
10+
- Ping handling upgrade [\#198](https://github.com/NodeFactoryIo/vedran/pull/198) ([MakMuftic](https://github.com/MakMuftic))
1011

1112
### Changed
1213

docker-compose.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,11 +26,11 @@ services:
2626
container_name: "vedran"
2727

2828
vedran-daemon:
29-
image: nodefactory/vedran-daemon:v0.3.2
29+
image: nodefactory/vedran-daemon:v0.3.4
3030
depends_on:
3131
- vedran
3232
- polkadot
33-
command: --id test-id --lb http://vedran:4000 --node-rpc http://polkadot:9933 --node-ws http://polkadot:9944 --node-metrics http://polkadot:9615 --payout-address 1Z4GTfUN2QHkSeHqdBUDawgbEWzqtfRG8ouJZ26z3cm7ePN --log-level info
33+
command: --id test-id --lb http://vedran:4000 --node-rpc http://polkadot:9933 --node-ws ws://polkadot:9944 --node-metrics http://polkadot:9615 --payout-address 1Z4GTfUN2QHkSeHqdBUDawgbEWzqtfRG8ouJZ26z3cm7ePN --log-level info
3434

3535
prometheus:
3636
image: prom/prometheus

internal/controllers/ping.go

Lines changed: 18 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ import (
1010
log "github.com/sirupsen/logrus"
1111
)
1212

13-
const pingOffset = 5
13+
const pingOffset = 8
1414

1515
func (c ApiController) PingHandler(w http.ResponseWriter, r *http.Request) {
1616
request := r.Context().Value(auth.RequestContextKey).(*auth.RequestContext)
@@ -20,18 +20,24 @@ func (c ApiController) PingHandler(w http.ResponseWriter, r *http.Request) {
2020
log.Errorf("Unable to calculate node downtime, error: %v", err)
2121
}
2222

23-
if math.Abs(downtimeDuration.Seconds()) > (stats.PingIntervalInSeconds + pingOffset) {
24-
downtime := models.Downtime{
25-
Start: lastPingTime,
26-
End: request.Timestamp,
27-
NodeId: request.NodeId,
23+
// if two pings come one after another (in 2 second interval)
24+
// this means that one ping stuck in network and
25+
// there is no need to write multiple downtimes
26+
if math.Abs(request.Timestamp.Sub(lastPingTime).Seconds()) > 2 {
27+
// check if there were downtime
28+
if math.Abs(downtimeDuration.Seconds()) > (stats.PingIntervalInSeconds + pingOffset) {
29+
downtime := models.Downtime{
30+
Start: lastPingTime,
31+
End: request.Timestamp,
32+
NodeId: request.NodeId,
33+
}
34+
err = c.repositories.DowntimeRepo.Save(&downtime)
35+
if err != nil {
36+
log.Errorf("Unable to save node downtime, error: %v", err)
37+
}
38+
39+
log.Debugf("Saved node %s downtime of: %f", request.NodeId, math.Abs(downtimeDuration.Seconds()))
2840
}
29-
err = c.repositories.DowntimeRepo.Save(&downtime)
30-
if err != nil {
31-
log.Errorf("Unable to save node downtime, error: %v", err)
32-
}
33-
34-
log.Debugf("Saved node %s downtime of: %f", request.NodeId, math.Abs(downtimeDuration.Seconds()))
3541
}
3642

3743
// save ping to database

internal/controllers/ping_test.go

Lines changed: 19 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,8 @@ func TestApiController_PingHandler(t *testing.T) {
2727
downtimeSaveErr error
2828
calculateDowntimeErr error
2929
downtimeDuration time.Duration
30+
requestTimestamp time.Time
31+
lastPingTimestamp time.Time
3032
}{
3133
{
3234
name: "Returns 200 if downtime calculation fails",
@@ -37,26 +39,32 @@ func TestApiController_PingHandler(t *testing.T) {
3739
downtimeSaveCallCount: 0,
3840
downtimeDuration: time.Duration(0),
3941
calculateDowntimeErr: fmt.Errorf("ERROR"),
42+
requestTimestamp: time.Now(),
43+
lastPingTimestamp: time.Now().Add(-5 * time.Second),
4044
},
4145
{
42-
name: "Returns 200 if donwtime save fails",
46+
name: "Returns 200 if downtime save fails",
4347
statusCode: 200,
4448
pingSaveCallCount: 1,
4549
pingSaveErr: nil,
4650
downtimeSaveErr: fmt.Errorf("ERROR"),
4751
downtimeSaveCallCount: 1,
48-
downtimeDuration: time.Duration(time.Second * 11),
52+
downtimeDuration: time.Duration(time.Second * 19),
4953
calculateDowntimeErr: nil,
54+
requestTimestamp: time.Now(),
55+
lastPingTimestamp: time.Now().Add(-19 * time.Second),
5056
},
5157
{
52-
name: "Saves downtime if downtime duration more than 5 seconds",
58+
name: "Saves downtime if downtime duration more than 18 seconds",
5359
statusCode: 200,
5460
pingSaveCallCount: 1,
5561
pingSaveErr: nil,
5662
downtimeSaveErr: nil,
5763
downtimeSaveCallCount: 1,
58-
downtimeDuration: time.Duration(time.Second * 11),
64+
downtimeDuration: time.Duration(time.Second * 19),
5965
calculateDowntimeErr: nil,
66+
requestTimestamp: time.Now(),
67+
lastPingTimestamp: time.Now().Add(-19 * time.Second),
6068
},
6169
{
6270
name: "Returns 500 if saving ping fails",
@@ -67,6 +75,8 @@ func TestApiController_PingHandler(t *testing.T) {
6775
downtimeSaveCallCount: 0,
6876
downtimeDuration: time.Duration(time.Second * 8),
6977
calculateDowntimeErr: nil,
78+
requestTimestamp: time.Now(),
79+
lastPingTimestamp: time.Now().Add(-5 * time.Second),
7080
},
7181
{
7282
name: "Returns 200 and does not save downtime if downtime duration less than 5 + 5 seconds",
@@ -77,13 +87,14 @@ func TestApiController_PingHandler(t *testing.T) {
7787
downtimeSaveCallCount: 0,
7888
downtimeDuration: time.Duration(time.Second * 8),
7989
calculateDowntimeErr: nil,
90+
requestTimestamp: time.Now(),
91+
lastPingTimestamp: time.Now().Add(-5 * time.Second),
8092
},
8193
}
8294

8395
for _, test := range tests {
8496
t.Run(test.name, func(t *testing.T) {
8597

86-
timestamp := time.Now()
8798
// create mock controller
8899
nodeRepoMock := mocks.NodeRepository{}
89100
recordRepoMock := mocks.RecordRepository{}
@@ -92,10 +103,10 @@ func TestApiController_PingHandler(t *testing.T) {
92103
pingRepoMock := mocks.PingRepository{}
93104
pingRepoMock.On("Save", &models.Ping{
94105
NodeId: "1",
95-
Timestamp: timestamp,
106+
Timestamp: test.requestTimestamp,
96107
}).Return(test.pingSaveErr)
97108
pingRepoMock.On("CalculateDowntime", mock.Anything, mock.Anything).Return(
98-
time.Now(), test.downtimeDuration, test.calculateDowntimeErr)
109+
test.lastPingTimestamp, test.downtimeDuration, test.calculateDowntimeErr)
99110

100111
downtimeRepoMock := mocks.DowntimeRepository{}
101112
downtimeRepoMock.On("Save", mock.Anything).Return(test.downtimeSaveErr)
@@ -113,7 +124,7 @@ func TestApiController_PingHandler(t *testing.T) {
113124
req, _ := http.NewRequest("POST", "/api/v1/node", bytes.NewReader(nil))
114125
c := &auth.RequestContext{
115126
NodeId: "1",
116-
Timestamp: timestamp,
127+
Timestamp: test.requestTimestamp,
117128
}
118129
ctx := context.WithValue(req.Context(), auth.RequestContextKey, c)
119130
req = req.WithContext(ctx)

internal/schedule/checkactive/schedule.go

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ func StartScheduledTask(repos *repositories.Repos) {
3434
func scheduledTask(repos *repositories.Repos, actions actions.Actions) {
3535
log.Debug("Started task: check all active nodes")
3636
activeNodes := repos.NodeRepo.GetAllActiveNodes()
37-
37+
var activeNodesAfterCheck []string
3838
for _, node := range *activeNodes {
3939

4040
pingActive, err := active.CheckIfPingActive(node.ID, repos)
@@ -60,6 +60,14 @@ func scheduledTask(repos *repositories.Repos, actions actions.Actions) {
6060
log.Errorf("Unable to remove node %s from active because of %v", node.ID, err)
6161
}
6262
log.Debugf("Node %s metrics lagging more than 10 blocks, removed node from active", node.ID)
63+
} else {
64+
activeNodesAfterCheck = append(activeNodesAfterCheck, node.ID)
6365
}
6466
}
67+
68+
if len(activeNodesAfterCheck) == 0 {
69+
log.Debug("There is no active nodes currently")
70+
} else {
71+
log.Debugf("Currently active nodes: %v", activeNodesAfterCheck)
72+
}
6573
}

0 commit comments

Comments
 (0)