chore: move docs to a dedicated repo (#484)

Signed-off-by: Andrew Rynhard <andrew@andrewrynhard.com>
This commit is contained in:
Andrew Rynhard 2019-04-03 22:34:37 -07:00 committed by GitHub
parent 6b9fa1762e
commit a992efd88c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 0 additions and 788 deletions

3
.gitmodules vendored
View File

@ -1,3 +0,0 @@
[submodule "docs/themes/hugo-theme-talos"]
path = docs/themes/hugo-theme-talos
url = https://github.com/talos-systems/hugo-theme-talos.git

View File

@ -1,31 +0,0 @@
baseURL = "https://docs.talos-systems.com/"
languageCode = "en-us"
title = "Talos"
theme = "hugo-theme-talos"
sectionPagesMenu = "main"
[outputs]
home = ["HTML", "RSS", "JSON"]
[params]
Copyright = "Released under Mozilla Public License 2.0"
GitHubLink = "https://github.com/talos-systems/talos"
DockerHubLink = "https://hub.docker.com/u/talos-systems"
[[menu.main]]
identifier = "components"
name = "Components"
url = "/components/"
weight = 10
[[menu.main]]
identifier = "configuration"
name = "Configuration"
url = "/configuration/"
weight = 20
[[menu.main]]
identifier = "examples"
name = "Examples"
url = "/examples/"
weight = 30

View File

@ -1,7 +0,0 @@
---
title: "Components"
date: 2018-10-29T19:40:55-07:00
draft: false
---
In this section we will discuss the various components that Talos is comprised of.

View File

@ -1,20 +0,0 @@
---
title: "blockd"
date: 2018-10-30T09:16:35-07:00
draft: false
weight: 80
menu:
main:
parent: 'components'
weight: 80
---
Talos comes with a reserved block device with three partitions:
- an EFI System Partition (`ESP`)
- a `ROOT` partition mounted as read-only that contains the minimal set of binaries to operate system services
- and a `DATA` partion that is mounted as read/write at `/var/run`
These partitions are reserved and cannot be modified.
The one exception to this is that the `DATA` partition will be resized automatically in the `init` process to the maximum size possible.
Managing any other block device can be done via the `blockd` service.

View File

@ -1,27 +0,0 @@
---
title: "init"
date: 2018-10-29T19:40:55-07:00
draft: false
weight: 20
menu:
main:
parent: 'components'
weight: 20
---
A common theme throughout the design of Talos is minimalism.
We believe strongly in the UNIX philosophy that each program should do one job well.
The `init` included in Talos is one example of this.
We wanted to create a focused `init` that had one job - run Kubernetes.
There simply is no mechanism in place to do anything else.
To accomplish this, we must address real world operations needs like:
- Orchestration around creating a highly available control plane
- Log retrieval
- Restarting system services
- Rebooting a node
- and more
In the following sections we will take a closer look at how these needs are addressed, and how services managed by `init` are designed to enhance the Kubernetes experience.

View File

@ -1,12 +0,0 @@
---
title: "kernel"
date: 2018-10-29T19:40:55-07:00
draft: false
weight: 10
menu:
main:
parent: 'components'
weight: 10
---
The kernel included with Talos is configured according to the recommendations outlined in the Kernel Self Protection Project ([KSSP](http://kernsec.org/wiki/index.php/Kernel_Self_Protection_Project)).

View File

@ -1,13 +0,0 @@
---
title: "kubeadm"
date: 2018-10-29T19:40:55-07:00
draft: false
weight: 30
menu:
main:
parent: 'components'
weight: 30
---
At the heart of Talos is [`kubeadm`](https://github.com/kubernetes/kubernetes/tree/master/cmd/kubeadm), allowing it to harness the power of the official upstream bootstrap tool.
By integrating with `kubeadm` natively, Talos stands to gain a strong community of users and developers already familiar with `kubeadm`.

View File

@ -1,21 +0,0 @@
---
title: "osctl"
date: 2018-10-29T19:40:55-07:00
draft: false
weight: 70
menu:
main:
parent: 'components'
weight: 70
---
The `osctl` CLI is the client to the `osd` service running on every node.
With it you can do things like:
- retrieve container logs
- restart a service
- reset a node
- reboot a node
- retrieve kernel logs
- generate pki resources
- inject data into node configuration files

View File

@ -1,24 +0,0 @@
---
title: "osd"
date: 2018-10-29T19:40:55-07:00
draft: false
weight: 60
menu:
main:
parent: 'components'
weight: 60
---
Talos is unique in that it has no concept of host-level access.
There are no shells installed.
No ssh daemon.
Only what is required to run Kubernetes.
Furthermore, there is no way to run any custom processes on the host level.
To make this work, we needed an out-of-band tool for managing the nodes.
In an ideal world, the system would be self-healing and we would never have to touch it.
But, in the real world, this does not happen.
We still need a way to handle operational scenarios that may arise.
The `osd` daemon provides a way to do just that.
Based on the Principle of Least Privilege, `osd` provides operational value for cluster administrators by providing an API for node management.

View File

@ -1,13 +0,0 @@
---
title: "proxyd"
date: 2018-10-29T19:40:55-07:00
draft: false
weight: 50
menu:
main:
parent: 'components'
weight: 50
---
Highly available Kubernetes clusters are crucial for production quality clusters.
The `proxyd` component is a simple yet powerful reverse proxy that adapts to where Talos is employed and provides load balancing across all API servers.

View File

@ -1,21 +0,0 @@
---
title: "trustd"
date: 2018-10-29T19:40:55-07:00
draft: false
weight: 40
menu:
main:
parent: 'components'
weight: 40
---
Security is one of the highest priorities within Talos.
To run a Kubernetes cluster a certain level of trust is required to operate a cluster.
For example, orchestrating the bootstrap of a highly available control plane requires the distribution of sensitive PKI data.
To that end, we created `trustd`.
Based on the concept of a Root of Trust, `trustd` is a simple daemon responsible for establishing trust within the system.
Once trust is established, various methods become available to the trustee.
It can, for example, accept a write request from another node to place a file on disk.
We imagine that the number available methods will grow as Talos gets tested in the real world.

View File

@ -1,12 +0,0 @@
---
title: "Configuration"
date: 2018-10-29T19:40:55-07:00
draft: false
---
In this section, we will step through the configuration of a Talos based Kubernetes cluster.
There are three major components we will configure:
- `osd` and `osctl`
- the master nodes
- the worker nodes

View File

@ -1,173 +0,0 @@
---
title: "Masters"
date: 2018-10-29T19:40:55-07:00
draft: false
weight: 20
menu:
main:
parent: 'configuration'
weight: 20
---
Configuring master nodes in a Talos Kubernetes cluster is a two part process:
- configuring the Talos specific options
- and configuring the Kubernetes specific options
To get started, create a YAML file we will use in the following steps:
```bash
touch <node-name>.yaml
```
## Configuring Talos
### Injecting the Talos PKI
Using `osctl`, and our output from the `osd` configuration [documentation]({{< ref "osd.md" >}}), inject the generated PKI into the configuration file:
```bash
osctl inject os --crt <organization>.crt --key <organization>.key <node-name>.yaml
```
You should see the following fields populated:
```yaml
security:
os:
ca:
crt: <base 64 encoded root public certificate>
key: <base 64 encoded root private key>
...
```
This process only needs to be performed on you initial node's configuration file.
### Configuring `trustd`
Each master node participates as a Root of Trust in the cluster.
The responsibilities of `trustd` include:
- certificate as a service
- and Kubernetes PKI distribution amongst master nodes
The auth done between `trustd` and a client is, for now, a simple username and password combination.
Having these credentials gives a client the power to request a certifcate that identifies itself.
In the `<node-name>.yaml`, add the follwing:
```yaml
security:
...
services:
...
trustd:
username: '<username>'
password: '<password>'
...
```
## Configuring Kubernetes
### Generating the Root CA
To create the root CA for the Kubernetes cluster, run:
```bash
osctl gen ca --rsa --hours <hours> --organization <kubernetes-organization>
```
{{% note %}}The `--rsa` flag is required for the generation of the Kubernetes CA. {{% /note %}}
### Injecting the Kubernetes PKI
Using `osctl`, inject the generated PKI into the configuration file:
```bash
osctl inject kubernetes --crt <kubernetes-organization>.crt --key <kubernetes-organization>.key <node-name>.yaml
```
You should see the following fields populated:
```yaml
security:
...
kubernetes:
ca:
crt: <base 64 encoded root public certificate>
key: <base 64 encoded root private key>
...
```
### Configuring Kubeadm
The configuration of the `kubeadm` service is done in two parts:
- supplying the Talos specific options
- supplying the `kubeadm` `InitConfiguration`
#### Talos Specific Options
```yaml
services:
...
kubeadm:
init:
cni: <flannel|calico>
...
```
#### Kubeadm Specific Options
```yaml
services:
...
kubeadm:
...
configuration: |
apiVersion: kubeadm.k8s.io/v1alpha3
kind: InitConfiguration
...
...
```
> See the official [documentation](https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init/) for the options available in `InitConfiguration`.
In the end you should have something that looks similar to the following:
```yaml
version: ""
security:
os:
ca:
crt: <base 64 encoded root public certificate>
key: <base 64 encoded root private key>
kubernetes:
ca:
crt: <base 64 encoded root public certificate>
key: <base 64 encoded root private key>
services:
init:
cni: <flannel|calico>
kubeadm:
configuration: |
apiVersion: kubeadm.k8s.io/v1beta1
kind: InitConfiguration
apiEndpoint:
advertiseAddress: <master ip>
bindPort: 6443
apiVersion: kubeadm.k8s.io/v1beta1
bootstrapTokens:
- token: '<kubeadm token>'
ttl: 0s
---
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
controlPlaneEndpoint: <master ip>:443
networking:
dnsDomain: cluster.local
podSubnet: <pod subnet>
serviceSubnet: <service subnet>
trustd:
username: '<username>'
password: '<password>'
```

View File

@ -1,101 +0,0 @@
---
title: "osd"
date: 2018-11-03T17:14:49-07:00
draft: false
weight: 10
menu:
main:
identifier: "osd-configuration"
parent: 'configuration'
weight: 10
---
The `osd` service enforces a high level of security by utilizing mutual TLS for authentication and authorization.
In this section we will configure mutual TLS by generating the certificates for the servers (`osd`) and clients (`osctl`).
### Cluster Owners
We recommend that the configuration of `osd` be performed by a cluster owner.
A cluster owner should be a person of authority within an organization.
Perhaps a director, manager, or senior member of a team.
They are responsible for storing the root CA, and distributing the PKI for authorized cluster administrators.
### Cluster Administrators
The authorization to use `osctl` should be granted to a person fit for cluster administration.
As a cluster administrator, the user gains access to the out-of-band management tools offered by Talos.
## Configuring `osd`
To configure `osd`, we will need:
- static IP addresses for each node that will participate as a master
- and a root CA
The following steps should be performed by a cluster owner.
### Generating the Root CA
The root CA can be generated by running:
```bash
osctl gen ca --hours <hours> --organization <organization>
```
The cluster owner should store the generated private key (`<organization>.key`) in a safe place, that only other cluster owners have access to.
The public certificate (`<organization>.crt`) should be made available to cluster administrators because, as we will see shortly, it is required to configure `osctl`.
{{% note %}}The `--rsa` flag should _not_ be specified for the generation of the `osd` CA.{{% /note %}}
### Generating the Identity Certificates
Talos provides automation for generating each node's certificate.
## Configuring `osctl`
To configure `osctl`, we will need:
- the root CA we generated above
- and a certificate signed by the root CA specific to the user
The process for setting up `osctl` is done in part between a cluster owner and a user requesting to become a cluster administrator.
### Generating the User Certificate
The user requesting cluster administration access runs the following:
```bash
osctl gen key --name <user>
osctl gen csr --ip 127.0.0.1 --key <user>.key
```
Now, the cluster owner must generate a certificate from the above CSR.
To do this, the user requesting access submits the CSR generated above to the cluster owner, and the cluster owner runs the following:
```bash
osctl gen crt --hours <hours> --ca <organization> --csr <user>.csr --name <user>
```
The generated certificate is then sent to the requesting user using a secure channel.
### The Configuration File
With all the above steps done, the new cluster administrator can now create the configuration file for `osctl`.
```bash
cat <organization>.crt | base64
cat <user>.crt | base64
cat <user>.key | base64
```
Now, create `~/.talos/config` with the following contents:
```yaml
context: <context>
contexts:
<context>:
target: <node-ip>
ca: <base 64 encoded root public certificate>
crt: <base 64 encoded user public certificate>
key: <base 64 encoded user private key>
```

View File

@ -1,40 +0,0 @@
---
title: "Workers"
date: 2018-10-29T19:40:55-07:00
draft: false
weight: 30
menu:
main:
parent: 'configuration'
weight: 30
---
Configuring the worker nodes is much more simple in comparison to configuring the master nodes.
Using the `trustd` API, worker nodes submit a `CSR`, and, if authenticated, receive a valid `osd` certificate.
Similarly, using a `kubeadm` token, the node joins an existing cluster.
We need to specify:
- the `osd` public certificate
- `trustd` credentials and endpoints
- and a `kubeadm` `JoinConfiguration`
```yaml
version: ""
...
services:
kubeadm:
configuration: |
apiVersion: kubeadm.k8s.io/v1alpha3
kind: JoinConfiguration
...
trustd:
username: <username>
password: <password>
endpoints:
- <master-1>
...
- <master-n>
```
> See the official [documentation](https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-join/) for the options available in `JoinConfiguration`.

View File

@ -1,7 +0,0 @@
---
title: "Examples"
date: 2018-10-29T19:40:55-07:00
draft: false
---
One of the primary goals of Talos is a consistent experience regardless of _where_ you are operating.
In the following sections we will cover how to deploy Talos to well known platforms.

View File

@ -1,26 +0,0 @@
---
title: "AWS"
date: 2018-10-29T19:40:55-07:00
draft: false
weight: 10
menu:
main:
parent: 'examples'
weight: 10
---
First, create the AMI:
```bash
docker run \
--rm \
--volume $HOME/.aws/credentials:/root/.aws/credentials \
--env AWS_DEFAULT_PROFILE=${PROFILE} \
--env AWS_DEFAULT_REGION=${REGION} \
talos-systems/talos:latest ami -var regions=${COMMA_SEPARATED_LIST_OF_REGIONS}
```
Once the AMI is created, you can now start an EC2 instance using the AMI ID.
Provide the proper configuration as the instance's user data.
> An official Terraform module is currently being developed, stay tuned!

View File

@ -1,32 +0,0 @@
---
title: "Google Cloud"
date: 2019-2-19
draft: false
weight: 20
menu:
main:
parent: 'examples'
weight: 20
---
First, create the Google Cloud compatible image:
```bash
make image-gcloud
```
Upload the image with:
```bash
gsutil cp /path/to/talos/build/gcloud/talos.tar.gz gs://<gcloud bucket name>
```
Create a custom google cloud image with:
```bash
gcloud compute images create talos --source-uri=gs://<gcloud bucket name>/talos.tar.gz --guest-os-features=VIRTIO_SCSI_MULTIQUEUE
```
Create an instance in google cloud, making sure to create a `user-data` key in the "Metadata" section, with a value of your full talos node configuration.
{{% note %}} Further exploration is needed to see if we can use the "Startup script" section instead. {{% /note %}}

View File

@ -1,76 +0,0 @@
---
title: "KVM"
date: 2018-10-29T19:40:55-07:00
draft: false
weight: 30
menu:
main:
parent: 'examples'
weight: 30
---
## Creating a Master Node
On the KVM host, install a master node to an available block device:
```bash
docker run \
--rm \
--privileged \
--volume /dev:/dev \
talos-systems/talos:latest image -b /dev/sdb -f -p bare-metal -u http://${IP}:8080/master.yaml
```
{{% note %}}`http://${IP}:8080/master.yaml` should be reachable by the VM and contain a valid master configuration file.{{% /note %}}
Now, create the VM:
```bash
virt-install \
-n master \
--description "Kubernetes master node." \
--os-type=Linux \
--os-variant=generic \
--virt-type=kvm \
--cpu=host \
--vcpus=2 \
--ram=4096 \
--disk path=/dev/sdb \
--network bridge=br0,model=e1000,mac=52:54:00:A8:4C:E1 \
--graphics none \
--boot hd \
--rng /dev/random
```
## Creating a Worker Node
On the KVM host, install a worker node to an available block device:
```bash
docker run \
--rm \
--privileged \
--volume /dev:/dev \
talos-systems/talos:latest image -b /dev/sdc -f -p bare-metal -u http://${IP}:8080/worker.yaml
```
{{% note %}}`http://${IP}:8080/worker.yaml` should be reachable by the VM and contain a valid worker configuration file.{{% /note %}}
Now, create the VM:
```bash
virt-install \
-n master \
--description "Kubernetes worker node." \
--os-type=Linux \
--os-variant=generic \
--virt-type=kvm \
--cpu=host \
--vcpus=2 \
--ram=4096 \
--disk path=/dev/sdc \
--network bridge=br0,model=e1000,mac=52:54:00:B9:5D:F2 \
--graphics none \
--boot hd \
--rng /dev/random
```

View File

@ -1,90 +0,0 @@
---
title: "Xen"
date: 2018-11-06T06:25:46-08:00
draft: false
weight: 40
menu:
main:
parent: 'examples'
weight: 40
---
## Creating a Master Node
On `Dom0`, install Talos to an available block device:
```bash
docker run \
--rm \
--privileged \
--volume /dev:/dev \
talos-systems/talos:latest image -b /dev/sdb
```
Save the following as `/etc/xen/master.cfg`
```python
name = "master"
builder='hvm'
bootloader = "/bin/pygrub"
firmware_override = "/usr/lib64/xen/boot/hvmloader"
vcpus=2
memory = 4096
serial = "pty"
kernel = "/var/lib/xen/talos/vmlinuz"
ramdisk = "/var/lib/xen/talos/initramfs.xz"
disk = [ 'phy:/dev/sdb,xvda,w', ]
vif = [ 'mac=52:54:00:A8:4C:E1,bridge=xenbr0,model=e1000', ]
extra = "consoleblank=0 console=hvc0 console=tty0 console=ttyS0,9600 talos.platform=bare-metal talos.userdata=http://${IP}:8080/master.yaml"
```
{{% note %}}`http://${IP}:8080/master.yaml` should be reachable by the VM and contain a valid master configuration file.{{% /note %}}
Now, create the VM:
```bash
xl create /etc/xen/master.cfg
```
## Creating a Worker Node
On `Dom0`, install Talos to an available block device:
```bash
docker run \
--rm \
--privileged \
--volume /dev:/dev \
talos-systems/talos:latest image -b /dev/sdc
```
Save the following as `/etc/xen/worker.cfg`
```python
name = "worker"
builder='hvm'
bootloader = "/bin/pygrub"
firmware_override = "/usr/lib64/xen/boot/hvmloader"
vcpus=2
memory = 4096
serial = "pty"
kernel = "/var/lib/xen/talos/vmlinuz"
ramdisk = "/var/lib/xen/talos/initramfs.xz"
disk = [ 'phy:/dev/sdc,xvda,w', ]
vif = [ 'mac=52:54:00:B9:5D:F2,bridge=xenbr0,model=e1000', ]
extra = "consoleblank=0 console=hvc0 console=tty0 console=ttyS0,9600 talos.platform=bare-metal talos.userdata=http://${IP}:8080/worker.yaml"
```
{{% note %}}`http://${IP}:8080/worker.yaml` should be reachable by the VM and contain a valid worker configuration file.{{% /note %}}
Now, create the VM:
```bash
xl create /etc/xen/worker.cfg
```

View File

@ -1,7 +0,0 @@
---
title: "Talos"
date: 2018-10-29T19:40:55-07:00
type: index
---
Talos is a modern Linux distribution designed to be secure, immutable, and minimal.

View File

@ -1,31 +0,0 @@
[build]
publish = "public"
command = "hugo --gc --minify"
[context.production.environment]
HUGO_VERSION = "0.54.0"
HUGO_ENV = "production"
HUGO_ENABLEGITINFO = "true"
[context.split1]
command = "hugo --gc --minify --enableGitInfo"
[context.split1.environment]
HUGO_VERSION = "0.54.0"
HUGO_ENV = "production"
[context.deploy-preview]
command = "hugo --gc --minify --buildFuture -b $DEPLOY_PRIME_URL"
[context.deploy-preview.environment]
HUGO_VERSION = "0.54.0"
[context.branch-deploy]
command = "hugo --gc --minify -b $DEPLOY_PRIME_URL"
[context.branch-deploy.environment]
HUGO_VERSION = "0.54.0"
[context.next.environment]
HUGO_ENABLEGITINFO = "true"

@ -1 +0,0 @@
Subproject commit 7d4bb97020b4aebc47aa828aae77c7a0aeaf78e5