309 lines
6.7 KiB
Go
309 lines
6.7 KiB
Go
package raft
|
|
|
|
import (
|
|
"crypto/tls"
|
|
"crypto/x509"
|
|
"encoding/json"
|
|
"fmt"
|
|
"log"
|
|
"os"
|
|
"path/filepath"
|
|
"regexp"
|
|
"strings"
|
|
"sync"
|
|
"time"
|
|
|
|
transport "deevirt.fr/compute/pkg/raft/transport"
|
|
"github.com/hashicorp/raft"
|
|
raftboltdb "github.com/hashicorp/raft-boltdb/v2"
|
|
raftwal "github.com/hashicorp/raft-wal"
|
|
"google.golang.org/grpc"
|
|
"google.golang.org/grpc/credentials"
|
|
|
|
"deevirt.fr/compute/pkg/config"
|
|
etcd_client "deevirt.fr/compute/pkg/etcd"
|
|
)
|
|
|
|
const (
|
|
retainSnapshotCount = 2
|
|
raftTimeout = 10 * time.Second
|
|
)
|
|
|
|
type Domain struct {
|
|
State int
|
|
Config byte
|
|
CPUMAP byte
|
|
}
|
|
|
|
type Node struct {
|
|
Domains []Domain
|
|
}
|
|
|
|
type command struct {
|
|
Op string `json:"op,omitempty"`
|
|
Key string `json:"key,omitempty"`
|
|
Value []byte `json:"value,omitempty"`
|
|
}
|
|
|
|
type Store struct {
|
|
mu sync.Mutex
|
|
conf *config.Config // Configuration générale
|
|
|
|
m map[string][]byte // The key-value store for the system.
|
|
|
|
Raft *raft.Raft // The consensus mechanism
|
|
|
|
lastIndex uint64
|
|
|
|
logger *log.Logger
|
|
}
|
|
|
|
type Peers struct {
|
|
Id string
|
|
Address string
|
|
}
|
|
|
|
func getTLSCredentials(conf *config.Config) credentials.TransportCredentials {
|
|
cert, err := tls.LoadX509KeyPair(conf.Manager.TlsCert, conf.Manager.TlsKey)
|
|
if err != nil {
|
|
log.Fatalf("Erreur chargement du certificat: %v", err)
|
|
}
|
|
|
|
// Charger la CA (facultatif, pour la vérification des clients)
|
|
caCert, err := os.ReadFile(conf.Manager.TlsCert)
|
|
if err != nil {
|
|
log.Fatalf("Erreur chargement CA: %v", err)
|
|
}
|
|
certPool := x509.NewCertPool()
|
|
certPool.AppendCertsFromPEM(caCert)
|
|
|
|
// Créer les credentials TLS
|
|
creds := credentials.NewTLS(&tls.Config{
|
|
Certificates: []tls.Certificate{cert},
|
|
ClientCAs: certPool,
|
|
InsecureSkipVerify: true,
|
|
})
|
|
|
|
return creds
|
|
}
|
|
|
|
func New(conf *config.Config) *Store {
|
|
return &Store{
|
|
conf: conf,
|
|
m: make(map[string][]byte),
|
|
logger: log.New(os.Stderr, "[store] ", log.LstdFlags),
|
|
}
|
|
}
|
|
|
|
func (s *Store) Open() (*Store, *transport.Manager, error) {
|
|
// Création du répertoire
|
|
baseDir := filepath.Join("/var/lib/deevirt/mgr/", s.conf.NodeID)
|
|
err := os.MkdirAll(baseDir, 0740)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
|
|
walDir := filepath.Join(baseDir, "/wal")
|
|
err = os.MkdirAll(walDir, 0740)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
|
|
c := raft.DefaultConfig()
|
|
c.SnapshotInterval = 60 * time.Second
|
|
c.SnapshotThreshold = 500
|
|
c.HeartbeatTimeout = 2 * time.Second
|
|
c.ElectionTimeout = 3 * time.Second
|
|
|
|
c.LocalID = raft.ServerID(s.conf.NodeID)
|
|
|
|
// Créer un LogStore avec Raft-WAL
|
|
logStore, err := raftwal.Open(walDir)
|
|
if err != nil {
|
|
log.Fatalf("Erreur lors de la création du LogStore Raft-WAL : %v", err)
|
|
}
|
|
|
|
s.lastIndex, err = logStore.LastIndex()
|
|
if err != nil {
|
|
log.Fatalf("Erreur lors de la récupération de l'index de la dernière entrée: %v", err)
|
|
}
|
|
|
|
stableStore, err := raftboltdb.NewBoltStore(filepath.Join(baseDir, "logs.dat"))
|
|
if err != nil {
|
|
return nil, nil, fmt.Errorf(`boltdb.NewBoltStore(%q): %v`, filepath.Join(baseDir, "logs.dat"), err)
|
|
}
|
|
|
|
fss, err := raft.NewFileSnapshotStore(baseDir, 3, os.Stderr)
|
|
if err != nil {
|
|
return nil, nil, fmt.Errorf(`raft.NewFileSnapshotStore(%q, ...): %v`, baseDir, err)
|
|
}
|
|
|
|
dialOption := []grpc.DialOption{}
|
|
|
|
if s.conf.Manager.TlsKey != "" {
|
|
dialOption = append(dialOption, grpc.WithTransportCredentials(getTLSCredentials(s.conf)))
|
|
}
|
|
|
|
tm := transport.New(raft.ServerAddress(s.conf.AddressPrivate), dialOption)
|
|
|
|
fsm, err := NewFSM(strings.Split(s.conf.EtcdURI, ","), s)
|
|
if err != nil {
|
|
log.Fatalf("%v", err)
|
|
}
|
|
|
|
r, err := raft.NewRaft(c, fsm, logStore, stableStore, fss, tm.Transport())
|
|
if err != nil {
|
|
return nil, nil, fmt.Errorf("raft.NewRaft: %v", err)
|
|
}
|
|
s.Raft = r
|
|
|
|
hasState, _ := checkIfStateExists(logStore)
|
|
|
|
if strings.Split(s.conf.AddressPrivate, ":")[0] == s.conf.AddressPrivate && !hasState {
|
|
println("Démarrage du bootstrap ! ")
|
|
//node.Bootstrap = true
|
|
|
|
// Récupération des Noeuds ID
|
|
etcd, err := etcd_client.New(s.conf.EtcdURI)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
defer etcd.Close()
|
|
|
|
peers := []raft.Server{}
|
|
|
|
for key, value := range etcd_client.GetNodes(etcd, s.conf.ClusterID) {
|
|
for _, peer := range s.conf.Manager.Peers {
|
|
addressPort := strings.Split(peer, ":")
|
|
if addressPort[0] == value.IpManagement {
|
|
peers = append(peers, raft.Server{
|
|
ID: raft.ServerID(key),
|
|
Address: raft.ServerAddress(peer),
|
|
})
|
|
}
|
|
}
|
|
}
|
|
|
|
cfg := raft.Configuration{
|
|
Servers: peers,
|
|
}
|
|
f := r.BootstrapCluster(cfg)
|
|
if err := f.Error(); err != nil {
|
|
return nil, nil, fmt.Errorf("raft.Raft.BootstrapCluster: %v", err)
|
|
}
|
|
}
|
|
|
|
return s, tm, nil
|
|
}
|
|
|
|
type LsOptions struct {
|
|
Recursive bool
|
|
Data bool
|
|
}
|
|
|
|
// Retourne le contenu de la clé
|
|
func (s *Store) Ls(key string, options LsOptions) (map[string][]byte, error) {
|
|
barrier := s.Raft.Barrier(10 * time.Second)
|
|
if err := barrier.Error(); err != nil {
|
|
return nil, fmt.Errorf("barrier timeout: %v", err)
|
|
}
|
|
|
|
s.mu.Lock()
|
|
defer s.mu.Unlock()
|
|
|
|
dir := map[string][]byte{}
|
|
|
|
for k, v := range s.m {
|
|
if options.Recursive {
|
|
re := regexp.MustCompile(fmt.Sprintf("^%s/([^/]+)/([^/]+)", key))
|
|
matches := re.FindStringSubmatch(k)
|
|
if matches != nil {
|
|
if options.Data {
|
|
dir[strings.Join(matches[1:], "/")] = v
|
|
} else {
|
|
dir[strings.Join(matches[1:], "/")] = nil
|
|
}
|
|
}
|
|
} else {
|
|
re := regexp.MustCompile(fmt.Sprintf("^%s/([^/]+)$", key))
|
|
matches := re.FindStringSubmatch(k)
|
|
if matches != nil {
|
|
if options.Data {
|
|
dir[matches[1]] = v
|
|
} else {
|
|
dir[matches[1]] = nil
|
|
}
|
|
}
|
|
}
|
|
|
|
}
|
|
|
|
return dir, nil
|
|
}
|
|
|
|
// Get returns the value for the given key.
|
|
func (s *Store) Get(key string) ([]byte, error) {
|
|
barrier := s.Raft.Barrier(10 * time.Second)
|
|
if err := barrier.Error(); err != nil {
|
|
return nil, fmt.Errorf("barrier timeout: %v", err)
|
|
}
|
|
|
|
s.mu.Lock()
|
|
defer s.mu.Unlock()
|
|
return s.m[key], nil
|
|
}
|
|
|
|
// Set sets the value for the given key.
|
|
func (s *Store) Set(key string, value []byte) error {
|
|
if s.Raft.State() != raft.Leader {
|
|
return fmt.Errorf("not leader")
|
|
}
|
|
|
|
c := &command{
|
|
Op: "set",
|
|
Key: key,
|
|
Value: value,
|
|
}
|
|
b, err := json.Marshal(c)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
f := s.Raft.Apply(b, raftTimeout)
|
|
return f.Error()
|
|
}
|
|
|
|
// Delete deletes the given key.
|
|
func (s *Store) Delete(key string) error {
|
|
if s.Raft.State() != raft.Leader {
|
|
return fmt.Errorf("not leader")
|
|
}
|
|
|
|
c := &command{
|
|
Op: "delete",
|
|
Key: key,
|
|
}
|
|
b, err := json.Marshal(c)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
f := s.Raft.Apply(b, raftTimeout)
|
|
return f.Error()
|
|
}
|
|
|
|
// Vérifie si l'état Raft existe déjà
|
|
func checkIfStateExists(logStore *raftwal.WAL) (bool, error) {
|
|
// Vérifier les logs Raft
|
|
firstIndex, err := logStore.FirstIndex()
|
|
if err != nil {
|
|
return false, err
|
|
}
|
|
|
|
if firstIndex > 0 {
|
|
return true, nil
|
|
}
|
|
|
|
return false, nil
|
|
}
|