191 lines
5.2 KiB
Go
191 lines
5.2 KiB
Go
package domain
|
|
|
|
import (
|
|
"go.uber.org/zap"
|
|
|
|
"deevirt.fr/compute/pkg/api/proto"
|
|
"deevirt.fr/compute/pkg/config"
|
|
"deevirt.fr/compute/pkg/raft"
|
|
)
|
|
|
|
type Domain struct {
|
|
Config *config.Config
|
|
Store *raft.Store
|
|
Logger *zap.Logger
|
|
proto.UnimplementedDomainServer
|
|
}
|
|
|
|
/*func (d *Domain) connectNode(NodeId string) (*libvirt.Connect, error) {
|
|
var jCluster schema.NodeStore
|
|
cluster, _ := d.Store.Get("/etc/libvirt/cluster")
|
|
json.Unmarshal(cluster, &jCluster)
|
|
|
|
var libvirt_uri string
|
|
if d.Config.LibvirtTLS {
|
|
libvirt_uri = fmt.Sprintf("qemu+tls://%s/system", jCluster[NodeId].IpManagement)
|
|
} else {
|
|
libvirt_uri = fmt.Sprintf("qemu+tcp://%s/system", jCluster[NodeId].IpManagement)
|
|
}
|
|
|
|
c, err := libvirt.NewConnect(libvirt_uri)
|
|
if err != nil {
|
|
log.Fatalf("Erreur %v", err)
|
|
}
|
|
|
|
return c, nil
|
|
}
|
|
|
|
func (d *Domain) connectDomain(ctx context.Context, domainID string) (string, *libvirt.Connect, error) {
|
|
dom, _ := d.Get(ctx, &proto.DomainListRequest{
|
|
DomainId: domainID,
|
|
})
|
|
|
|
var jCluster schema.NodeStore
|
|
cluster, _ := d.Store.Get("/etc/libvirt/cluster")
|
|
json.Unmarshal(cluster, &jCluster)
|
|
|
|
c, err := d.connectNode(dom.NodeId)
|
|
|
|
return dom.NodeId, c, err
|
|
}
|
|
|
|
func (d *Domain) List(ctx context.Context, in *proto.DomainListAllRequest) (*proto.DomainListAllResponse, error) {
|
|
domainsListResponse := []*proto.DomainListResponse{}
|
|
|
|
domains, err := d.Store.Ls("/etc/libvirt/domain", raft.LsOptions{
|
|
Recursive: false,
|
|
Data: true,
|
|
})
|
|
if err != nil {
|
|
return nil, status.Errorf(codes.Internal, "Error read a store %v", err)
|
|
}
|
|
|
|
for domId, data := range domains {
|
|
domData := schema.Domain{}
|
|
json.Unmarshal(data, &domData)
|
|
|
|
nodeData, _ := d.Store.Get(fmt.Sprintf("/etc/libvirt/%s/%s/%s", domData.Type, domData.NodeId, domId))
|
|
domNodeData := schema.DomainToNode{}
|
|
json.Unmarshal(nodeData, &domNodeData)
|
|
|
|
domainsListResponse = append(domainsListResponse, &proto.DomainListResponse{
|
|
//NodeId: domData.NodeId,
|
|
DomainId: domId,
|
|
Config: string(domData.Config),
|
|
State: int64(domNodeData.State),
|
|
})
|
|
}
|
|
|
|
return &proto.DomainListAllResponse{
|
|
Domains: domainsListResponse,
|
|
}, nil
|
|
}
|
|
|
|
func (d *Domain) Get(ctx context.Context, req *proto.DomainListRequest) (*proto.DomainListResponse, error) {
|
|
domainsListResponse := proto.DomainListResponse{}
|
|
|
|
domain, err := d.Store.Get(fmt.Sprintf("/etc/libvirt/domain/%s", req.DomainId))
|
|
if err != nil {
|
|
return nil, status.Errorf(codes.Internal, "Error read a store %v", err)
|
|
}
|
|
|
|
domData := deevirt_schema.Domain{}
|
|
json.Unmarshal(domain, &domData)
|
|
|
|
nodeData, _ := d.Store.Get(fmt.Sprintf("/etc/libvirt/%s/%s/%s", domData.Type, domData.NodeId, req.DomainId))
|
|
domNodeData := deevirt_schema.DomainToNode{}
|
|
json.Unmarshal(nodeData, &domNodeData)
|
|
|
|
domainsListResponse = proto.DomainListResponse{
|
|
NodeId: domData.NodeId,
|
|
DomainId: req.DomainId,
|
|
Config: string(domData.Config),
|
|
State: int64(domNodeData.State),
|
|
}
|
|
|
|
return &domainsListResponse, nil
|
|
}
|
|
|
|
func (d *Domain) Migrate(in *proto.DomainMigrateRequest, stream proto.Domain_MigrateServer) error {
|
|
ctx := context.Background()
|
|
|
|
nodeID, c, err := d.connectDomain(ctx, in.DomainId)
|
|
if err != nil {
|
|
return status.Errorf(codes.Internal, "Connexion error to libvirt")
|
|
}
|
|
defer c.Close()
|
|
|
|
dom, err := c.LookupDomainByUUIDString(in.DomainId)
|
|
if err != nil {
|
|
return status.Errorf(codes.Internal, "Domain unknown")
|
|
}
|
|
|
|
s, err := scheduler.New()
|
|
if err != nil {
|
|
return status.Errorf(codes.Internal, "Connexion error to libvirt %v", err)
|
|
}
|
|
|
|
topNode, err := s.GetTopNode(1)
|
|
if err != nil {
|
|
return status.Errorf(codes.Internal, "Connexion error to libvirt %v", err)
|
|
}
|
|
newNode := topNode[0]
|
|
|
|
if nodeID == newNode.NodeID {
|
|
d.Logger.Sugar().Errorf("Attempt to migrate guest to the same host %v", newNode.NodeID)
|
|
return status.Errorf(codes.Internal, "Attempt to migrate guest to the same host %v", newNode.NodeID)
|
|
}
|
|
|
|
ctx1, cancel := context.WithCancel(context.Background())
|
|
|
|
migrate := func(cancel context.CancelFunc) {
|
|
defer cancel()
|
|
|
|
c_new, err := d.connectNode(newNode.NodeID)
|
|
if err != nil {
|
|
d.Store.Delete(fmt.Sprintf("/etc/libvirt/qemu/%s/%s", newNode.NodeID, in.DomainId))
|
|
d.Logger.Sugar().Infof("Connexion error to libvirt %v", err.Error())
|
|
return
|
|
}
|
|
defer c_new.Close()
|
|
_, err = dom.Migrate(c_new, libvirt.MIGRATE_LIVE|libvirt.MIGRATE_PERSIST_DEST|libvirt.MIGRATE_UNDEFINE_SOURCE, "", "", 0)
|
|
if err != nil {
|
|
d.Logger.Sugar().Infof("Migration error %v", err.Error())
|
|
return
|
|
}
|
|
}
|
|
|
|
go migrate(cancel)
|
|
|
|
for {
|
|
select {
|
|
case <-ctx1.Done():
|
|
return nil
|
|
default:
|
|
var queryMigrate struct {
|
|
Return struct {
|
|
RAM struct {
|
|
Total float64 `json:"total"`
|
|
Remaining float64 `json:"remaining"`
|
|
} `json:"ram"`
|
|
} `json:"return"`
|
|
}
|
|
|
|
t, _ := dom.QemuMonitorCommand("{\"execute\": \"query-migrate\"}", libvirt.DOMAIN_QEMU_MONITOR_COMMAND_DEFAULT)
|
|
if err := json.Unmarshal([]byte(t), &queryMigrate); err == nil {
|
|
progress := (1 - (queryMigrate.Return.RAM.Remaining / queryMigrate.Return.RAM.Total)) * 100
|
|
|
|
if progress > 0 {
|
|
stream.Send(&proto.DomainMigrateResponse{
|
|
Percentage: float32(progress),
|
|
})
|
|
|
|
d.Logger.Sugar().Infof("%s Progression: %.2f%%\n", nodeID, progress)
|
|
}
|
|
}
|
|
|
|
time.Sleep(500 * time.Millisecond)
|
|
}
|
|
}
|
|
}*/
|