mirror of
https://github.com/siderolabs/talos.git
synced 2025-08-20 14:11:11 +02:00
This change is only moving packages and updating import paths. Goal: expose `internal/pkg/provision` as `pkg/provision` to enable other projects to import Talos provisioning library. As cluster checks are almost always required as part of provisioning process, package `internal/pkg/cluster` was also made public as `pkg/cluster`. Other changes were direct dependencies discovered by `importvet` which were updated. Public packages (useful, general purpose packages with stable API): * `internal/pkg/conditions` -> `pkg/conditions` * `internal/pkg/tail` -> `pkg/tail` Private packages (used only on provisioning library internally): * `internal/pkg/inmemhttp` -> `pkg/provision/internal/inmemhttp` * `internal/pkg/kernel/vmlinuz` -> `pkg/provision/internal/vmlinuz` * `internal/pkg/cniutils` -> `pkg/provision/internal/cniutils` Signed-off-by: Andrey Smirnov <smirnov.andrey@gmail.com>
103 lines
2.6 KiB
Go
103 lines
2.6 KiB
Go
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
|
|
package firecracker
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"path/filepath"
|
|
"runtime"
|
|
|
|
"github.com/talos-systems/talos/pkg/provision"
|
|
"github.com/talos-systems/talos/pkg/provision/providers/vm"
|
|
)
|
|
|
|
// Create Talos cluster as a set of firecracker micro-VMs.
|
|
//
|
|
//nolint: gocyclo
|
|
func (p *provisioner) Create(ctx context.Context, request provision.ClusterRequest, opts ...provision.Option) (provision.Cluster, error) {
|
|
options := provision.DefaultOptions()
|
|
|
|
for _, opt := range opts {
|
|
if err := opt(&options); err != nil {
|
|
return nil, err
|
|
}
|
|
}
|
|
|
|
if options.TargetArch != runtime.GOARCH {
|
|
return nil, fmt.Errorf("firecracker is supported only on native arch: %q != %q", options.TargetArch, runtime.GOARCH)
|
|
}
|
|
|
|
statePath := filepath.Join(request.StateDirectory, request.Name)
|
|
|
|
fmt.Fprintf(options.LogWriter, "creating state directory in %q\n", statePath)
|
|
|
|
state, err := vm.NewState(
|
|
statePath,
|
|
p.Name,
|
|
request.Name,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
fmt.Fprintln(options.LogWriter, "uncompressing kernel")
|
|
|
|
tempKernelPath := state.GetRelativePath("vmlinux")
|
|
if err = uncompressKernel(request.KernelPath, tempKernelPath); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
request.KernelPath = tempKernelPath
|
|
|
|
fmt.Fprintln(options.LogWriter, "creating network", request.Network.Name)
|
|
|
|
if err = p.CreateNetwork(ctx, state, request.Network); err != nil {
|
|
return nil, fmt.Errorf("unable to provision CNI network: %w", err)
|
|
}
|
|
|
|
fmt.Fprintln(options.LogWriter, "creating load balancer")
|
|
|
|
if err = p.CreateLoadBalancer(state, request); err != nil {
|
|
return nil, fmt.Errorf("error creating loadbalancer: %w", err)
|
|
}
|
|
|
|
var nodeInfo []provision.NodeInfo
|
|
|
|
fmt.Fprintln(options.LogWriter, "creating master nodes")
|
|
|
|
if nodeInfo, err = p.createNodes(state, request, request.Nodes.MasterNodes(), &options); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
fmt.Fprintln(options.LogWriter, "creating worker nodes")
|
|
|
|
var workerNodeInfo []provision.NodeInfo
|
|
|
|
if workerNodeInfo, err = p.createNodes(state, request, request.Nodes.WorkerNodes(), &options); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
nodeInfo = append(nodeInfo, workerNodeInfo...)
|
|
|
|
state.ClusterInfo = provision.ClusterInfo{
|
|
ClusterName: request.Name,
|
|
Network: provision.NetworkInfo{
|
|
Name: request.Network.Name,
|
|
CIDR: request.Network.CIDR,
|
|
GatewayAddr: request.Network.GatewayAddr,
|
|
MTU: request.Network.MTU,
|
|
},
|
|
Nodes: nodeInfo,
|
|
}
|
|
|
|
err = state.Save()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return state, nil
|
|
}
|