forked from PelionIoT/devicedb
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.go
More file actions
1841 lines (1477 loc) · 79.6 KB
/
main.go
File metadata and controls
1841 lines (1477 loc) · 79.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
package main
//
// Copyright (c) 2019 ARM Limited.
//
// SPDX-License-Identifier: MIT
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//
import (
"flag"
"fmt"
"os"
"github.com/armPelionEdge/devicedb/cluster"
"strings"
"strconv"
"errors"
"time"
"sync"
"context"
"io"
"io/ioutil"
"crypto/tls"
"crypto/x509"
. "github.com/armPelionEdge/devicedb/client"
. "github.com/armPelionEdge/devicedb/server"
"github.com/armPelionEdge/devicedb/storage"
"github.com/armPelionEdge/devicedb/node"
. "github.com/armPelionEdge/devicedb/error"
. "github.com/armPelionEdge/devicedb/version"
. "github.com/armPelionEdge/devicedb/compatibility"
. "github.com/armPelionEdge/devicedb/shared"
. "github.com/armPelionEdge/devicedb/logging"
. "github.com/armPelionEdge/devicedb/util"
. "github.com/armPelionEdge/devicedb/raft"
. "github.com/armPelionEdge/devicedb/merkle"
. "github.com/armPelionEdge/devicedb/bucket"
. "github.com/armPelionEdge/devicedb/data"
ddbBenchmark "github.com/armPelionEdge/devicedb/benchmarks"
"github.com/armPelionEdge/devicedb/routes"
"github.com/armPelionEdge/devicedb/historian"
"github.com/olekukonko/tablewriter"
)
const defaultPort uint = 8080
var templateConfig string =
`# The db field specifies the directory where the database files reside on
# disk. If it doesn't exist it will be created.
# **REQUIRED**
db: /tmp/devicedb
# The port field specifies the port number on which to run the database server
port: 9090
# The sync session limit is the number of sync sessions that can happen
# concurrently. Adjusting this field allows the database node to synchronize
# with its peers more or less quickly. It is reccomended that this field be
# half the number of cores on the machine where the devicedb node is running
# **REQUIRED**
syncSessionLimit: 2
# The sync session period is the time in milliseconds that determines the rate
# at which sync sessions are initiated with neighboring peers. Adjusting this
# too high will result in unwanted amounts of network traffic and too low may
# result in slow convergence rates for replicas that have not been in contact
# for some time. A rate on the order of seconds is reccomended generally
# **REQUIRED**
syncSessionPeriod: 1000
# This field adjusts the maximum number of objects that can be transferred in
# one sync session. A higher number will result in faster convergence between
# database replicas. This field must be positive and defaults to 1000
syncExplorationPathLimit: 1000
# In addition to background syncing, updates can also be forwarded directly
# to neighbors when a connection is established in order to reduce the time
# that replicas remain divergent. An update will immediately get forwarded
# to the number of connected peers indicated by this number. If this value is
# zero then the update is forwarded to ALL connected peers. In a small network
# of nodes it may be better to set this to zero.
# **REQUIRED**
syncPushBroadcastLimit: 0
# Garbage collection settings determine how often and to what degree tombstones,
# that is markers of deletion, are removed permananetly from the database
# replica. The default values are the minimum allowed settings for these
# properties.
# The garbage collection interval is the amount of time between garbage collection
# sweeps in milliseconds. The lowest it can be set is every ten mintues as shown
# below but could very well be set for once a day, or once a week without much
# ill effect depending on the use case or how aggresively disk space needs to be
# preserved
gcInterval: 300000
# The purge age defines the age past which tombstone will be purged from storage
# Tombstones are markers of key deletion and need to be around long enough to
# propogate through the network of database replicas and ensure a deletion happens
# Setting this value too low may cause deletions that don't propogate to all replicas
# if nodes are often disconnected for a long time. Setting it too high may mean
# that more disk space is used than is needed keeping around old tombstones for
# keys that will no longer be used. This field is also in milliseconds
gcPurgeAge: 600000
# This field can be used to specify how this node handles alert forwarding.
# alerts:
# # How often in milliseconds the latest alerts are forwarded to the cloud
# forwardInterval: 60000
#
# This field can be used to specify how this node handles time-series data.
# These settings adjust how and when historical data is purged from the
# history. If this field is not specified then default values are used.
# history:
# # When this flag is true items in the history are purged from the log
# # after they are successfully forwarded to the cloud. When set to false
# # items are only purged after. It defaults to false if not specified
# purgeOnForward: false
# # This setting controls the amount of events that are left in the log
# # before purging the oldest logged events. It is set to 0 by default
# # which means old events will never be purged from the log
# eventLimit: 1000
# # This setting controls the mimimum number of events that will be left
# # in the log after a purge is triggered. In this case, a log purge
# # is triggered when there are 1001 events stored in the history log
# # and the purge will delete all events until only the 500 most recent
# # events remain. If this value is greater than or equal to eventLimit
# # then it is ignored. This field defaults to 0 which means all events
# # will be purged from disk once the event limit is reached.
# eventFloor: 500
# # This field controls how many events are batched together for deletion
# # when purging a range of events from the log. If 10000 events need to
# # be purged and this field is set to 1000 then there will be 10 batches
# # applied to the underlying storage each contining 1000 delete operations.
# # It is a good idea to leave this value between 1000 and 10000. Too large
# # a number can cause large amounts of memory to be used. This field defaults
# # to 1 if left unspecified meaning events will not be batched and purges
# # of large ranges may take a very long time. If this value is negative
# # then it defaults to 1.
# purgeBatchSize: 1000
# # This setting configures how long devicedb should let history logs queue up
# # before forwarding them to the cloud. If the number of queued history logs
# # exceeds the threshold indicated by forwardThreshold before the forwardInterval
# # has passed, then the log forwarding will be triggered before that time.
# # This value is represented in milliseconds. It must be at least 1000 (once a second)
# forwardInterval: 60000
# # This setting configures how many history logs can queue up before a forwarding
# # session is triggered. Must be >= 0. Zero indicates no thresholdd. In other
# # words if this value is set to 0 then only forwardInterval is used to
# # determine when to forward history logs to the cloud
# forwardThreshold: 100
# # When a forwarding session has been triggered this setting configures how
# # many logs are included in the uploaded history log batches. If there are
# # 1000 history logs and forwardBatchSize is 100 then there would be 10 batches
# # of 100 logs uploaded to the cloud. It must be >= 0. If the batch size is 0
# # then there is no limit on the batch size.
# forwardBatchSize: 1000
# The merkle depth adjusts how efficiently the sync process resolves
# differences between database nodes. A rule of thumb is to set this as high
# as memory constraints allow. Estimated memory overhead for a given depth is
# calculated with the formula: M = 3*(2^(d + 4)). The following table gives a
# quick reference to choose an appropriate depth.
#
# depth | memory overhead
# 2 | 192 bytes (0.1 KiB)
# 4 | 768 bytes (0.8 KiB)
# 6 | 3072 bytes (3.0 KiB)
# 8 | 12288 bytes (12 KiB)
# 10 | 49152 bytes (48 KiB)
# 12 | 196608 bytes (192 KiB) (0.2 MiB)
# 14 | 786432 bytes (768 KiB) (0.7 MiB)
# 16 | 3145728 bytes (3072 KiB) (3.0 MiB)
# 18 | 12582912 bytes (12288 KiB) (12 MiB)
# 20 | 50331648 bytes (49152 KiB) (48 MiB)
# 22 | 201326592 bytes (196608 KiB) (192 MiB) (0.2 GiB)
# 24 | 805306368 bytes (786432 KiB) (768 MiB) (0.8 GiB)
# 26 | 3221225472 bytes (3145728 KiB) (3072 MiB) (3 GiB)
# 28 | 12884901888 bytes (12582912 KiB) (12288 MiB) (12 GiB)
#
# A larger merkle depth also allows more concurrency when processing many
# concurrent updates
# **REQUIRED**
merkleDepth: 19
# The peer list specifies a list of other database nodes that are in the same
# cluster as this node. This database node will contiually try to connect to
# and sync with the nodes in this list. Alternatively peers can be added at
# runtime if an authorized client requests that the node connect to another
# node.
# **REQUIRED**
peers:
# Uncomment these next lines if there are other peers in the cluster to connect
# to and edit accordingly
# - id: WWRL000001
# host: 127.0.0.1
# port: 9191
# - id: WWRL000002
# host: 127.0.0.1
# port: 9292
# These are the possible log levels in order from lowest to highest level.
# Specifying a particular log level means you will see all messages at that
# level and below. For example, if debug is specified, all log messages will
# be seen. If no level is specified or if the log level specified is not valid
# then the level defaults to the error level
# critical
# error
# warning
# notice
# info
# debug
logLevel: info
# This field can be used to specify a devicedb cloud node to which to connect
# If omitted then no cloud connection is established.
# cloud:
# # noValidate is a flag specifying whether or not to validate the cloud
# # node's TLS certificate chain. If omitted this field defaults to false
# # Setting this field to true is not reccomended in production. It can
# # be useful, however, when running against a test cloud where self-signed
# # certificates are used.
# noValidate: true
# # The id field is used to verify the host name that the cloud server provides
# # in its TLS certificate chain. If this field is omitted then the host field
# # will be used as the expected host name in the cloud's certificate. If
# # noValidate is true then no verification is performed either way so this
# # effectively ignored. In this example, the TLS certificate uses a wildcard
# # certificate so the server name provided in the certificate will not
# # match the domain name of the host to which this node is connecting.
# id: *.wigwag.com
# host: devicedb.wigwag.com
# port: 443
# # Starting in version 1.3.0 of devicedb a seperate host name, port, and certificate
# # name can be specified for historical data forwarding. This is to allow decoupling
# # between the devicedb cloud service and historical data processing by putting
# # historical data logging and gathering sync into a standalone cloud service
# #
# # The historyID field is used to verify the host name that the cloud server provides
# # in its TLS certificate chain. If this field is omitted then the historyHost field
# # will be used as the expected host name in the cloud's certificate. If
# # noValidate is true then no verification is performed either way so this
# # effectively ignored. In this example, the TLS certificate uses a wildcard
# # certificate so the server name provided in the certificate will not
# # match the domain name of the host to which this node is connecting.
# historyID: *.wigwag.com
# # The URI of the history service that collects history logs
# historyURI: https://history.wigwag.com/history
# alertsID: *.wigwag.com
# alertsURI: https://alerts.wigwag.com/alerts
# The TLS options specify file paths to PEM encoded SSL certificates and keys
# All connections between database nodes use TLS to identify and authenticate
# each other. A single certificate and key can be used if that certificate has
# the server and client extended key usage options enabled. If seperate
# certificates are used for the client and server certificates then the common
# name on the clint and server certificate must match. The common name of the
# certificate is used to identify this database node with other database nodes
# The rootCA file is the root certificate chain that was used to generate these
# certificates and is shared between nodes in a cluster. A database client does
# not need to provide a client certificate when sending a request to a database
# node but does need to verify the database node's server certificate against
# the same root certificate chain.
# **REQUIRED**
tls:
# If using a single certificate for both client and server authentication
# then it is specified using the certificate and key options as shown below
# If using seperate client and server certificates then uncomment the options
# below for clientCertificate, clientKey, serverCertificate, and serverKey
# A PEM encoded certificate with the 'server' and 'client' extendedKeyUsage
# options set
certificate: path/to/cert.pem
# A PEM encoded key corresponding to the specified certificate
key: path/to/key.pem
# A PEM encoded 'client' type certificate
# clientCertificate: path/to/clientCert.pem
# A PEM encoded key corresponding to the specified client certificate
# clientKey: path/to/clientKey.pem
# A PEM encoded 'server' type certificate
# serverCertificate: path/to/serverCert.pem
# A PEM encoded key corresponding to the specified server certificate
# serverKey: path/to/serverKey.pem
# A PEM encoded certificate chain that can be used to verify the previous
# certificates
rootCA: path/to/ca-chain.pem
`
var usage string =
`Usage: devicedb <command> <arguments> | -version
Commands:
start Start a devicedb relay server
conf Generate a template config file for a relay server
upgrade Upgrade an old database to the latest format on a relay
benchmark Benchmark devicedb performance on a relay
compact Compact underlying disk storage
cluster Manage a devicedb cloud cluster
Use devicedb help <command> for more usage information about a command.
`
var clusterUsage string =
`Usage: devicedb cluster <cluster_command> <arguments>
Cluster Commands:
start Start a devicedb cloud node
remove Force a node to be removed from the cluster
decommission Migrate data away from a node then remove it from the cluster
replace Replace a dead node with a new node
overview Show an overview of the nodes in the cluster
add_site Add a site to the cluster
remove_site Remove a site from the cluster
add_relay Add a relay to the cluster
remove_relay Remove a relay from the cluster
move_relay Move a relay to a site
relay_status Get the connection and site membership status of a relay
get Get an entry in a site database with a certain key
get_matches Get all entries in a site database whose keys match some prefix
put Put a value in a site database with a certain key
delete Delete an entry in a site database with a certain key
log_dump Print the replicated log state of the specified node
snapshot Tell the cluster to create a consistent snapshot
get_snapshot Check if snapshot has been completed at a particular node
download_snapshot Download a piece of the cluster snapshot from a particular node
Use devicedb cluster help <cluster_command> for more usage information about a cluster command.
`
var commandUsage string = "Usage: devicedb %s <arguments>\n"
func isValidPartitionCount(p uint64) bool {
return (p != 0 && ((p & (p - 1)) == 0)) && p >= cluster.MinPartitionCount && p <= cluster.MaxPartitionCount
}
func isValidJoinAddress(s string) bool {
_, _, err := parseJoinAddress(s)
return err == nil
}
func parseJoinAddress(s string) (hostname string, port int, err error) {
parts := strings.Split(s, ":")
if len(parts) != 2 {
err = errors.New("")
return
}
port64, err := strconv.ParseUint(parts[1], 10, 16)
if err != nil {
return
}
hostname = parts[0]
port = int(port64)
return
}
func main() {
startCommand := flag.NewFlagSet("start", flag.ExitOnError)
confCommand := flag.NewFlagSet("conf", flag.ExitOnError)
upgradeCommand := flag.NewFlagSet("upgrade", flag.ExitOnError)
benchmarkCommand := flag.NewFlagSet("benchmark", flag.ExitOnError)
compactCommand := flag.NewFlagSet("compact", flag.ExitOnError)
helpCommand := flag.NewFlagSet("help", flag.ExitOnError)
clusterStartCommand := flag.NewFlagSet("start", flag.ExitOnError)
clusterBenchmarkCommand := flag.NewFlagSet("benchmark", flag.ExitOnError)
clusterRemoveCommand := flag.NewFlagSet("remove", flag.ExitOnError)
clusterDecommissionCommand := flag.NewFlagSet("decommission", flag.ExitOnError)
clusterReplaceCommand := flag.NewFlagSet("replace", flag.ExitOnError)
clusterOverviewCommand := flag.NewFlagSet("overview", flag.ExitOnError)
clusterAddSiteCommand := flag.NewFlagSet("add_site", flag.ExitOnError)
clusterRemoveSiteCommand := flag.NewFlagSet("remove_site", flag.ExitOnError)
clusterAddRelayCommand := flag.NewFlagSet("add_relay", flag.ExitOnError)
clusterRemoveRelayCommand := flag.NewFlagSet("remove_relay", flag.ExitOnError)
clusterMoveRelayCommand := flag.NewFlagSet("move_relay", flag.ExitOnError)
clusterRelayStatusCommand := flag.NewFlagSet("relay_status", flag.ExitOnError)
clusterGetCommand := flag.NewFlagSet("get", flag.ExitOnError)
clusterGetMatchesCommand := flag.NewFlagSet("get_matches", flag.ExitOnError)
clusterPutCommand := flag.NewFlagSet("put", flag.ExitOnError)
clusterDeleteCommand := flag.NewFlagSet("delete", flag.ExitOnError)
clusterHelpCommand := flag.NewFlagSet("help", flag.ExitOnError)
clusterLogDumpCommand := flag.NewFlagSet("log_dump", flag.ExitOnError)
clusterSnapshotCommand := flag.NewFlagSet("snapshot", flag.ExitOnError)
clusterGetSnapshotCommand := flag.NewFlagSet("get_snapshot", flag.ExitOnError)
clusterDownloadSnapshotCommand := flag.NewFlagSet("download_snapshot", flag.ExitOnError)
startConfigFile := startCommand.String("conf", "", "The config file for this server")
upgradeLegacyDB := upgradeCommand.String("legacy", "", "The path to the legacy database data directory")
upgradeConfigFile := upgradeCommand.String("conf", "", "The config file for this server. If this argument is used then don't use the -db and -merkle options.")
upgradeNewDB := upgradeCommand.String("db", "", "The directory to use for the upgraded data directory")
upgradeNewDBMerkleDepth := upgradeCommand.Uint64("merkle", uint64(0), "The merkle depth to use for the upgraded data directory. Default value will be used if omitted.")
benchmarkDB := benchmarkCommand.String("db", "", "The directory to use for the benchark test scratch space")
benchmarkMerkle := benchmarkCommand.Uint64("merkle", uint64(0), "The merkle depth to use with the benchmark databases")
benchmarkStress := benchmarkCommand.Bool("stress", false, "Perform test that continuously writes historical log events as fast as it can")
benchmarkHistoryEventFloor := benchmarkCommand.Uint64("event_floor", 400000, "Event floor for history log")
benchmarkHistoryEventLimit := benchmarkCommand.Uint64("event_limit", 500000, "Event limit for history log")
benchmarkHistoryPurgeBatchSize := benchmarkCommand.Int("purge_batch_size", 1000, "Purge batch size for history log")
compactDB := compactCommand.String("db", "", "The directory containing the database data to compact")
clusterStartHost := clusterStartCommand.String("host", "localhost", "HTTP The hostname or ip to listen on. This is the advertised host address for this node.")
clusterStartPort := clusterStartCommand.Uint("port", defaultPort, "HTTP This is the intra-cluster port used for communication between nodes and between secure clients and the cluster.")
clusterStartRelayHost := clusterStartCommand.String("relay_host", "localhost", "HTTPS The hostname or ip to listen on for incoming relay connections. Applies only if TLS is terminated by devicedb itself")
clusterStartRelayPort := clusterStartCommand.Uint("relay_port", uint(443), "HTTPS This is the port used for incoming relay connections. Applies only if TLS is terminated by devicedb itself.")
clusterStartTLSCertificate := clusterStartCommand.String("cert", "", "PEM encoded x509 certificate to be used by relay connections. Applies only if TLS is terminated by devicedb. (Required) (Ex: /path/to/certs/cert.pem)")
clusterStartTLSKey := clusterStartCommand.String("key", "", "PEM encoded x509 key corresponding to the specified 'cert'. Applies only if TLS is terminated by devicedb. (Required) (Ex: /path/to/certs/key.pem)")
clusterStartTLSRelayCA := clusterStartCommand.String("relay_ca", "", "PEM encoded x509 certificate authority used to validate relay client certs for incoming relay connections. Applies only if TLS is terminated by devicedb. (Required) (Ex: /path/to/certs/relays.ca.pem)")
clusterStartPartitions := clusterStartCommand.Uint64("partitions", uint64(cluster.DefaultPartitionCount), "The number of hash space partitions in the cluster. Must be a power of 2. (Only specified when starting a new cluster)")
clusterStartReplicationFactor := clusterStartCommand.Uint64("replication_factor", uint64(3), "The number of replcas required for every database key. (Only specified when starting a new cluster)")
clusterStartStore := clusterStartCommand.String("store", "", "The path to the storage. (Required) (Ex: /tmp/devicedb)")
clusterStartJoin := clusterStartCommand.String("join", "", "Join the cluster that the node listening at this address belongs to. Ex: 10.10.102.8:80")
clusterStartReplacement := clusterStartCommand.Bool("replacement", false, "Specify this flag if this node is being added to replace some other node in the cluster.")
clusterStartMerkleDepth := clusterStartCommand.Uint("merkle", 4, "Use this flag to adjust the merkle depth used for site merkle trees.")
clusterStartSyncMaxSessions := clusterStartCommand.Uint("sync_max_sessions", 10, "The number of sync sessions to allow at the same time.")
clusterStartSyncPathLimit := clusterStartCommand.Uint("sync_path_limit", 10, "The number of exploration paths to allow in a sync session.")
clusterStartSyncPeriod := clusterStartCommand.Uint("sync_period", 1000, "The period in milliseconds between sync sessions with individual relays.")
clusterStartLogLevel := clusterStartCommand.String("log_level", "info", "The log level configures how detailed the output produced by devicedb is. Must be one of { critical, error, warning, notice, info, debug }")
clusterStartNoValidate := clusterStartCommand.Bool("no_validate", false, "This flag enables relays connecting to this node to decide their own relay ID. It only applies to TLS enabled servers and should only be used for testing.")
clusterStartSnapshotDirectory := clusterStartCommand.String("snapshot_store", "", "To enable snapshots set this to some directory where database snapshots can be stored")
clusterBenchmarkExternalAddresses := clusterBenchmarkCommand.String("external_addresses", "", "A comma separated list of cluster node addresses. Ex: wss://localhost:9090,wss://localhost:8080")
clusterBenchmarkInternalAddresses := clusterBenchmarkCommand.String("internal_addresses", "", "A comma separated list of cluster node addresses. Ex: localhost:9090,localhost:8080")
clusterBenchmarkName := clusterBenchmarkCommand.String("name", "multiple_relays", "The name of the benchmark to run")
clusterBenchmarkNSites := clusterBenchmarkCommand.Uint("n_sites", 100, "The number of sites to simulate")
clusterBenchmarkNRelays := clusterBenchmarkCommand.Uint("n_relays", 1, "The number of relays to simulate per site")
clusterBenchmarkUpdatesPerSecond := clusterBenchmarkCommand.Uint("updates_per_second", 5, "The number of updates per second to simulate per relay")
clusterBenchmarkSyncPeriod := clusterBenchmarkCommand.Uint("sync_period", 1000, "The sync period in milliseconds per relay")
clusterRemoveHost := clusterRemoveCommand.String("host", "localhost", "The hostname or ip of some cluster member to contact to initiate the node removal.")
clusterRemovePort := clusterRemoveCommand.Uint("port", defaultPort, "The port of the cluster member to contact.")
clusterRemoveNodeID := clusterRemoveCommand.Uint64("node", uint64(0), "The ID of the node that should be removed from the cluster. Defaults to the ID of the node being contacted.")
clusterDecommissionHost := clusterDecommissionCommand.String("host", "localhost", "The hostname or ip of some cluster member to contact to initiate the node decommissioning.")
clusterDecommissionPort := clusterDecommissionCommand.Uint("port", defaultPort, "The port of the cluster member to contact.")
clusterDecommissionNodeID := clusterDecommissionCommand.Uint64("node", uint64(0), "The ID of the node that should be decommissioned from the cluster. Defaults to the ID of the node being contacted.")
clusterReplaceHost := clusterReplaceCommand.String("host", "localhost", "The hostname or ip of some cluster member to contact to initiate the node decommissioning.")
clusterReplacePort := clusterReplaceCommand.Uint("port", defaultPort, "The port of the cluster member to contact.")
clusterReplaceNodeID := clusterReplaceCommand.Uint64("node", uint64(0), "The ID of the node that is being replaced. Defaults the the ID of the node being contacted.")
clusterReplaceReplacementNodeID := clusterReplaceCommand.Uint64("replacement_node", uint64(0), "The ID of the node that is replacing the other node.")
clusterOverviewHost := clusterOverviewCommand.String("host", "localhost", "The hostname or ip of some cluster member to contact to query the cluster state.")
clusterOverviewPort := clusterOverviewCommand.Uint("port", defaultPort, "The port of the cluster member to contact.")
clusterAddSiteHost := clusterAddSiteCommand.String("host", "localhost", "The hostname or ip of some cluster member to contact about adding the site.")
clusterAddSitePort := clusterAddSiteCommand.Uint("port", defaultPort, "The port of the cluster member to contact.")
clusterAddSiteSiteID := clusterAddSiteCommand.String("site", "", "The ID of the site to add. (Required)")
clusterRemoveSiteHost := clusterRemoveSiteCommand.String("host", "localhost", "The hostname or ip of some cluster member to contact about removing the site.")
clusterRemoveSitePort := clusterRemoveSiteCommand.Uint("port", defaultPort, "The port of the cluster member to contact.")
clusterRemoveSiteSiteID := clusterRemoveSiteCommand.String("site", "", "The ID of the site to remove. (Required)")
clusterAddRelayHost := clusterAddRelayCommand.String("host", "localhost", "The hostname or ip of some cluster member to contact about adding the relay.")
clusterAddRelayPort := clusterAddRelayCommand.Uint("port", defaultPort, "The port of the cluster member to contact.")
clusterAddRelayRelayID := clusterAddRelayCommand.String("relay", "", "The ID of the relay to add. (Required)")
clusterRemoveRelayHost := clusterRemoveRelayCommand.String("host", "localhost", "The hostname or ip of some cluster member to contact about removing the relay.")
clusterRemoveRelayPort := clusterRemoveRelayCommand.Uint("port", defaultPort, "The port of the cluster member to contact.")
clusterRemoveRelayRelayID := clusterRemoveRelayCommand.String("relay", "", "The ID of the relay to remove. (Required)")
clusterMoveRelayHost := clusterMoveRelayCommand.String("host", "localhost", "The hostname or ip of some cluster member to contact about removing the relay.")
clusterMoveRelayPort := clusterMoveRelayCommand.Uint("port", defaultPort, "The port of the cluster member to contact.")
clusterMoveRelayRelayID := clusterMoveRelayCommand.String("relay", "", "The ID of the relay to move. (Required)")
clusterMoveRelaySiteID := clusterMoveRelayCommand.String("site", "", "The ID of the site to move the relay to. If left blank the relay is removed from its current site.")
clusterRelayStatusHost := clusterRelayStatusCommand.String("host", "localhost", "The hostname or ip of some cluster member to contact about getting the relay status.")
clusterRelayStatusPort := clusterRelayStatusCommand.Uint("port", defaultPort, "The port of the cluster member to contact.")
clusterRelayStatusRelayID := clusterRelayStatusCommand.String("relay", "", "The ID of the relay to query. (Required)")
clusterGetHost := clusterGetCommand.String("host", "localhost", "The hostname or ip of some cluster member to contact about getting this key.")
clusterGetPort := clusterGetCommand.Uint("port", defaultPort, "The port of the cluster member to contact.")
clusterGetSiteID := clusterGetCommand.String("site", "", "The ID of the site. (Required)")
clusterGetBucket := clusterGetCommand.String("bucket", "default", "The bucket to query in the site.")
clusterGetKey := clusterGetCommand.String("key", "", "The key to get from the bucket. (Required)")
clusterGetMatchesHost := clusterGetMatchesCommand.String("host", "localhost", "The hostname or ip of some cluster member to contact about getting these keys.")
clusterGetMatchesPort := clusterGetMatchesCommand.Uint("port", defaultPort, "The port of the cluster member to contact.")
clusterGetMatchesSiteID := clusterGetMatchesCommand.String("site", "", "The ID of the site. (Required)")
clusterGetMatchesBucket := clusterGetMatchesCommand.String("bucket", "default", "The bucket to query in the site.")
clusterGetMatchesPrefix := clusterGetMatchesCommand.String("prefix", "", "The prefix of keys to get from the bucket. (Required)")
clusterPutHost := clusterPutCommand.String("host", "localhost", "The hostname or ip of some cluster member to contact about updating this key.")
clusterPutPort := clusterPutCommand.Uint("port", defaultPort, "The port of the cluster member to contact.")
clusterPutSiteID := clusterPutCommand.String("site", "", "The ID of the site. (Required)")
clusterPutBucket := clusterPutCommand.String("bucket", "default", "The bucket in the site where this key goes.")
clusterPutKey := clusterPutCommand.String("key", "", "The key to update in the bucket. (Required)")
clusterPutValue := clusterPutCommand.String("value", "", "The value to put at this key. (Required)")
clusterPutContext := clusterPutCommand.String("context", "", "The causal context of this put operation")
clusterDeleteHost := clusterDeleteCommand.String("host", "localhost", "The hostname or ip of some cluster member to contact about updating this key.")
clusterDeletePort := clusterDeleteCommand.Uint("port", defaultPort, "The port of the cluster member to contact.")
clusterDeleteSiteID := clusterDeleteCommand.String("site", "", "The ID of the site. (Required)")
clusterDeleteBucket := clusterDeleteCommand.String("bucket", "default", "The bucket in the site where this key goes.")
clusterDeleteKey := clusterDeleteCommand.String("key", "", "The key to update in the bucket. (Required)")
clusterDeleteContext := clusterDeleteCommand.String("context", "", "The causal context of this put operation")
clusterLogDumpHost := clusterLogDumpCommand.String("host", "localhost", "The hostname or ip of some cluster member whose raft state to print.")
clusterLogDumpPort := clusterLogDumpCommand.Uint("port", defaultPort, "The port of the cluster member to contact.")
clusterSnapshotHost := clusterSnapshotCommand.String("host", "localhost", "The hostname or ip of some cluster member that should take a snapshot.")
clusterSnapshotPort := clusterSnapshotCommand.Uint("port", defaultPort, "The port of the cluster member to contact.")
clusterGetSnapshotHost := clusterGetSnapshotCommand.String("host", "localhost", "The hostname or ip of some cluster member to get a snapshot from.")
clusterGetSnapshotPort := clusterGetSnapshotCommand.Uint("port", defaultPort, "The port of the cluster member to contact.")
clusterGetSnapshotSnapshotId := clusterGetSnapshotCommand.String("uuid", "", "The UUID of the snapshot to check for")
clusterDownloadSnapshotHost := clusterDownloadSnapshotCommand.String("host", "localhost", "The hostname or ip of some cluster member to download a snapshot from.")
clusterDownloadSnapshotPort := clusterDownloadSnapshotCommand.Uint("port", defaultPort, "The port of the cluster member to contact.")
clusterDownloadSnapshotSnapshotId := clusterDownloadSnapshotCommand.String("uuid", "", "The UUID of the snapshot to download")
if len(os.Args) < 2 {
fmt.Fprintf(os.Stderr, "Error: %s", "No command specified\n\n")
fmt.Fprintf(os.Stderr, "%s", usage)
os.Exit(1)
}
switch os.Args[1] {
case "cluster":
if len(os.Args) < 3 {
fmt.Fprintf(os.Stderr, "Error: %s", "No cluster command specified\n\n")
fmt.Fprintf(os.Stderr, "%s", clusterUsage)
os.Exit(1)
}
switch os.Args[2] {
case "start":
clusterStartCommand.Parse(os.Args[3:])
case "benchmark":
clusterBenchmarkCommand.Parse(os.Args[3:])
case "remove":
clusterRemoveCommand.Parse(os.Args[3:])
case "decommission":
clusterDecommissionCommand.Parse(os.Args[3:])
case "replace":
clusterReplaceCommand.Parse(os.Args[3:])
case "overview":
clusterOverviewCommand.Parse(os.Args[3:])
case "add_site":
clusterAddSiteCommand.Parse(os.Args[3:])
case "remove_site":
clusterRemoveSiteCommand.Parse(os.Args[3:])
case "add_relay":
clusterAddRelayCommand.Parse(os.Args[3:])
case "remove_relay":
clusterRemoveRelayCommand.Parse(os.Args[3:])
case "move_relay":
clusterMoveRelayCommand.Parse(os.Args[3:])
case "relay_status":
clusterRelayStatusCommand.Parse(os.Args[3:])
case "get":
clusterGetCommand.Parse(os.Args[3:])
case "get_matches":
clusterGetMatchesCommand.Parse(os.Args[3:])
case "put":
clusterPutCommand.Parse(os.Args[3:])
case "delete":
clusterDeleteCommand.Parse(os.Args[3:])
case "log_dump":
clusterLogDumpCommand.Parse(os.Args[3:])
case "snapshot":
clusterSnapshotCommand.Parse(os.Args[3:])
case "get_snapshot":
clusterGetSnapshotCommand.Parse(os.Args[3:])
case "download_snapshot":
clusterDownloadSnapshotCommand.Parse(os.Args[3:])
case "help":
clusterHelpCommand.Parse(os.Args[3:])
case "-help":
fmt.Fprintf(os.Stderr, "%s", clusterUsage)
default:
fmt.Fprintf(os.Stderr, "Error: \"%s\" is not a recognized cluster command\n\n", os.Args[2])
fmt.Fprintf(os.Stderr, "%s", clusterUsage)
os.Exit(1)
}
case "start":
startCommand.Parse(os.Args[2:])
case "conf":
confCommand.Parse(os.Args[2:])
case "upgrade":
upgradeCommand.Parse(os.Args[2:])
case "benchmark":
benchmarkCommand.Parse(os.Args[2:])
case "compact":
compactCommand.Parse(os.Args[2:])
case "help":
helpCommand.Parse(os.Args[2:])
case "-help":
fmt.Fprintf(os.Stderr, "%s", usage)
os.Exit(0)
case "-version":
fmt.Fprintf(os.Stdout, "%s\n", DEVICEDB_VERSION)
os.Exit(0)
default:
fmt.Fprintf(os.Stderr, "Error: \"%s\" is not a recognized command\n\n", os.Args[1])
fmt.Fprintf(os.Stderr, "%s", usage)
os.Exit(1)
}
if startCommand.Parsed() {
if *startConfigFile == "" {
fmt.Fprintf(os.Stderr, "Error: No config file specified\n")
os.Exit(1)
}
start(*startConfigFile)
}
if confCommand.Parsed() {
fmt.Fprintf(os.Stderr, "%s", templateConfig)
os.Exit(0)
}
if upgradeCommand.Parsed() {
var serverConfig YAMLServerConfig
if len(*upgradeConfigFile) != 0 {
err := serverConfig.LoadFromFile(*upgradeConfigFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: Unable to load configuration file: %v\n", err)
os.Exit(1)
}
} else {
if *upgradeNewDBMerkleDepth < uint64(MerkleMinDepth) || *upgradeNewDBMerkleDepth > uint64(MerkleMaxDepth) {
fmt.Fprintf(os.Stderr, "No valid merkle depth specified. Defaulting to %d\n", MerkleDefaultDepth)
serverConfig.MerkleDepth = MerkleDefaultDepth
} else {
serverConfig.MerkleDepth = uint8(*upgradeNewDBMerkleDepth)
}
if len(*upgradeNewDB) == 0 {
fmt.Fprintf(os.Stderr, "Error: No database directory (-db) specified\n")
os.Exit(1)
}
serverConfig.DBFile = *upgradeNewDB
}
if len(*upgradeLegacyDB) == 0 {
fmt.Fprintf(os.Stderr, "Error: No legacy database directory (-legacy) specified\n")
os.Exit(1)
}
err := UpgradeLegacyDatabase(*upgradeLegacyDB, serverConfig)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: Unable to migrate legacy database: %v\n", err)
os.Exit(1)
}
}
if benchmarkCommand.Parsed() {
var benchmarkMagnitude int = 10000
var serverConfig ServerConfig
var server *Server
if len(*benchmarkDB) == 0 {
fmt.Fprintf(os.Stderr, "Error: No database directory (-db) specified\n")
os.Exit(1)
}
if *benchmarkMerkle < uint64(MerkleMinDepth) || *benchmarkMerkle > uint64(MerkleMaxDepth) {
fmt.Fprintf(os.Stderr, "No valid merkle depth specified. Defaulting to %d\n", MerkleDefaultDepth)
serverConfig.MerkleDepth = MerkleDefaultDepth
} else {
serverConfig.MerkleDepth = uint8(*benchmarkMerkle)
}
err := os.RemoveAll(*benchmarkDB)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: Unable to initialized benchmark workspace at %s: %v\n", *benchmarkDB, err)
os.Exit(1)
}
SetLoggingLevel("debug")
serverConfig.DBFile = *benchmarkDB
serverConfig.HistoryEventLimit = *benchmarkHistoryEventLimit
serverConfig.HistoryEventFloor = *benchmarkHistoryEventFloor
serverConfig.HistoryPurgeBatchSize = *benchmarkHistoryPurgeBatchSize
server, err = NewServer(serverConfig)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: Unable to initialize test database: %v\n", err)
os.Exit(1)
}
if *benchmarkStress {
fmt.Fprintf(os.Stderr, "Start stressing disk by writing to the history log indefinitely...\n")
err = benchmarkHistoryStress(server)
fmt.Fprintf(os.Stderr, "Error: While running history stress benchmark: %v\n", err.Error())
os.Exit(1)
}
err = benchmarkSequentialReads(benchmarkMagnitude, server)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: Failed at sequential reads benchmark: %v\n", err)
os.Exit(1)
}
err = benchmarkRandomReads(benchmarkMagnitude, server)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: Failed at random reads benchmark: %v\n", err)
os.Exit(1)
}
err = benchmarkWrites(benchmarkMagnitude, server)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: Failed at writes benchmark: %v\n", err)
os.Exit(1)
}
}
if compactCommand.Parsed() {
if len(*compactDB) == 0 {
fmt.Fprintf(os.Stderr, "Error: No database directory (-db) specified\n")
os.Exit(1)
}
// compact here
storageDriver := storage.NewLevelDBStorageDriver(*compactDB, nil)
if err := storageDriver.Open(); err != nil {
fmt.Fprintf(os.Stderr, "Error: Unable to open storage: %v\n", err.Error())
os.Exit(1)
}
fmt.Fprintf(os.Stderr, "Compacting database...\n")
if err := storageDriver.Compact(); err != nil {
fmt.Fprintf(os.Stderr, "Error: Compaction failed: %v\n", err.Error())
os.Exit(1)
}
fmt.Fprintf(os.Stderr, "Compacted database!\n")
os.Exit(0)
}
if helpCommand.Parsed() {
if len(os.Args) < 3 {
fmt.Fprintf(os.Stderr, "Error: No command specified for help\n")
os.Exit(1)
}
var flagSet *flag.FlagSet
switch os.Args[2] {
case "start":
flagSet = startCommand
case "conf":
fmt.Fprintf(os.Stderr, "Usage: devicedb conf\n")
os.Exit(0)
case "upgrade":
flagSet = upgradeCommand
case "benchmark":
flagSet = benchmarkCommand
case "cluster":
fmt.Fprintf(os.Stderr, commandUsage, "cluster <cluster_command>")
os.Exit(0)
default:
fmt.Fprintf(os.Stderr, "Error: \"%s\" is not a valid command.\n", os.Args[2])
os.Exit(1)
}
fmt.Fprintf(os.Stderr, commandUsage + "\n", os.Args[2])
flagSet.PrintDefaults()
os.Exit(0)
}
if clusterStartCommand.Parsed() {
if *clusterStartJoin != "" && !isValidJoinAddress(*clusterStartJoin) {
fmt.Fprintf(os.Stderr, "Error: -join must specify a valid address of some host in an existing cluster formatted like: host:port Ex: 10.10.102.89:80.\n")
os.Exit(1)
}
if *clusterStartStore == "" {
fmt.Fprintf(os.Stderr, "Error: -store is a required parameter of the devicedb cluster start command. It must specify a valid file system path.\n")
os.Exit(1)
}
if *clusterStartJoin == "" {
if !isValidPartitionCount(*clusterStartPartitions) {
fmt.Fprintf(os.Stderr, "Error: -partitions must be a power of 2 and be in the range [%d, %d]\n", cluster.MinPartitionCount, cluster.MaxPartitionCount)
os.Exit(1)
}
if *clusterStartReplicationFactor == 0 {
fmt.Fprintf(os.Stderr, "Error: -replication_factor must be a positive value\n")
os.Exit(1)
}
}
if *clusterStartLogLevel != "critical" && *clusterStartLogLevel != "error" && *clusterStartLogLevel != "warning" && *clusterStartLogLevel != "notice" && *clusterStartLogLevel != "info" && *clusterStartLogLevel != "debug" {
fmt.Fprintf(os.Stderr, "Error: -log_level must be one of { critical, error, warning, notice, info, debug }\n")
os.Exit(1)
}
var certificate []byte
var key []byte
var rootCAs *x509.CertPool
var cert tls.Certificate
var httpOnly bool
var err error
if *clusterStartTLSCertificate == "" && *clusterStartTLSKey == "" && *clusterStartTLSRelayCA == "" {
// http only mode
httpOnly = true
} else if *clusterStartTLSCertificate != "" && *clusterStartTLSKey != "" && *clusterStartTLSRelayCA != "" {
// https mode
certificate, err = ioutil.ReadFile(*clusterStartTLSCertificate)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: Could not load the TLS certificate from %s: %v\n", *clusterStartTLSCertificate, err.Error())
os.Exit(1)
}
key, err = ioutil.ReadFile(*clusterStartTLSKey)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: Could not load the TLS key from %s: %v\n", *clusterStartTLSKey, err.Error())
os.Exit(1)
}
relayCA, err := ioutil.ReadFile(*clusterStartTLSRelayCA)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: Could not load the TLS Relay CA from %s: %v\n", *clusterStartTLSRelayCA, err.Error())
os.Exit(1)
}
cert, err = tls.X509KeyPair([]byte(certificate), []byte(key))
if err != nil {
fmt.Fprintf(os.Stderr, "Error: The specified certificate and key represent an invalid public/private key pair\n")
os.Exit(1)
}
rootCAs = x509.NewCertPool()
if !rootCAs.AppendCertsFromPEM([]byte(relayCA)) {
fmt.Fprintf(os.Stderr, "Error: The specified TLS Relay CA was not valid\n")
os.Exit(1)
}
} else {
fmt.Fprintf(os.Stderr, "Error: -cert, -key, and -relay_ca must all be provided if one is provided to enable TLS mode\n")
os.Exit(1)
}
if *clusterStartMerkleDepth < uint(MerkleMinDepth) || *clusterStartMerkleDepth > uint(MerkleMaxDepth) {
fmt.Fprintf(os.Stderr, "Error: The specified merkle depth is not valid. It must be between %d and %d inclusive\n", MerkleMinDepth, MerkleMaxDepth)
os.Exit(1)
}
if *clusterStartSyncMaxSessions == 0 {
fmt.Fprintf(os.Stderr, "Error: The specified sync sessions max is not valid. It must be a positive integer\n")
os.Exit(1)
}
if *clusterStartSyncPathLimit == 0 {
fmt.Fprintf(os.Stderr, "Error: The specified sync path limit is not valid. It must be a positive integer\n")
os.Exit(1)
}
if *clusterStartSyncPeriod == 0 {
fmt.Fprintf(os.Stderr, "Error: The specified sync period is not valid. It must be a positive integer\n")
os.Exit(1)
}
var seedHost string
var seedPort int
var startOptions node.NodeInitializationOptions
if *clusterStartJoin != "" {
seedHost, seedPort, _ = parseJoinAddress(*clusterStartJoin)
startOptions.JoinCluster = true
startOptions.SeedNodeHost = seedHost
startOptions.SeedNodePort = seedPort
} else {
startOptions.StartCluster = true
startOptions.ClusterSettings.Partitions = *clusterStartPartitions
startOptions.ClusterSettings.ReplicationFactor = *clusterStartReplicationFactor
}
startOptions.ClusterHost = *clusterStartHost
startOptions.ClusterPort = int(*clusterStartPort)
if !httpOnly {
startOptions.ExternalHost = *clusterStartRelayHost
startOptions.ExternalPort = int(*clusterStartRelayPort)
}
startOptions.SyncMaxSessions = *clusterStartSyncMaxSessions
startOptions.SyncPathLimit = uint32(*clusterStartSyncPathLimit)
startOptions.SyncPeriod = *clusterStartSyncPeriod
startOptions.SnapshotDirectory = *clusterStartSnapshotDirectory
SetLoggingLevel(*clusterStartLogLevel)
cloudNodeStorage := storage.NewLevelDBStorageDriver(*clusterStartStore, nil)
var cloudServerConfig CloudServerConfig = CloudServerConfig{
InternalHost: *clusterStartHost,
InternalPort: int(*clusterStartPort),
NodeID: 1,
RelayTLSConfig: &tls.Config{
Certificates: []tls.Certificate{ cert },
ClientCAs: rootCAs,
ClientAuth: tls.RequireAndVerifyClientCert,
},
}
if !httpOnly {
cloudServerConfig.ExternalHost = *clusterStartRelayHost
cloudServerConfig.ExternalPort = int(*clusterStartRelayPort)
}
cloudServer := NewCloudServer(cloudServerConfig)
var capacity uint64 = 1
if *clusterStartReplacement {
capacity = 0
}
cloudNode := node.New(node.ClusterNodeConfig{
CloudServer: cloudServer,
StorageDriver: cloudNodeStorage,
MerkleDepth: uint8(*clusterStartMerkleDepth),
Capacity: capacity,
NoValidate: *clusterStartNoValidate,
})
if err := cloudNode.Start(startOptions); err != nil {
os.Exit(1)
}
os.Exit(0)
}
if clusterRemoveCommand.Parsed() {
fmt.Fprintf(os.Stderr, "Removing node %d from the cluster...\n", *clusterRemoveNodeID)
client := NewClient(ClientConfig{ })
err := client.ForceRemoveNode(context.TODO(), PeerAddress{ Host: *clusterRemoveHost, Port: int(*clusterRemovePort) }, *clusterRemoveNodeID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: Unable to remove node %d from the cluster: %v\n", *clusterRemoveNodeID, err.Error())
os.Exit(1)
}
fmt.Fprintf(os.Stderr, "Removed node %d from the cluster.\n", *clusterRemoveNodeID)
os.Exit(0)
}
if clusterDecommissionCommand.Parsed() {
fmt.Fprintf(os.Stderr, "Decommissioning node %d...\n", *clusterDecommissionNodeID)
client := NewClient(ClientConfig{ })
err := client.DecommissionNode(context.TODO(), PeerAddress{ Host: *clusterDecommissionHost, Port: int(*clusterDecommissionPort) }, *clusterDecommissionNodeID)