first-commit
This commit is contained in:
42
modules/k8s/controller/devcontainer/controller-wrapper.go
Normal file
42
modules/k8s/controller/devcontainer/controller-wrapper.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package devcontainer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
|
||||
devcontainerv1 "code.gitea.io/gitea/modules/k8s/api/devcontainer/v1"
|
||||
)
|
||||
|
||||
// Controller 实现 controller.Controller 接口
|
||||
type Controller struct{}
|
||||
|
||||
// Name 返回控制器名称
|
||||
func (c *Controller) Name() string {
|
||||
return "devcontainer"
|
||||
}
|
||||
|
||||
// Init 初始化控制器
|
||||
func (c *Controller) Init(mgr manager.Manager) error {
|
||||
// 添加 API 到 scheme
|
||||
klog.InfoS("Adding DevContainer API to scheme")
|
||||
if err := devcontainerv1.AddToScheme(mgr.GetScheme()); err != nil {
|
||||
return fmt.Errorf("unable to add DevContainer API to scheme: %w", err)
|
||||
}
|
||||
|
||||
// 创建 DevContainer reconciler
|
||||
klog.InfoS("Creating DevContainer reconciler")
|
||||
reconciler := &DevcontainerAppReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}
|
||||
|
||||
// 设置 reconciler 与 manager
|
||||
klog.InfoS("Setting up DevContainer with manager")
|
||||
if err := reconciler.SetupWithManager(mgr); err != nil {
|
||||
return fmt.Errorf("failed to setup DevContainer controller: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@@ -0,0 +1,449 @@
|
||||
/*
|
||||
Copyright 2024.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package devcontainer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
devcontainer_v1 "code.gitea.io/gitea/modules/k8s/api/devcontainer/v1"
|
||||
devcontainer_controller_utils "code.gitea.io/gitea/modules/k8s/controller/devcontainer/utils"
|
||||
apps_v1 "k8s.io/api/apps/v1"
|
||||
core_v1 "k8s.io/api/core/v1"
|
||||
k8s_sigs_controller_runtime_utils "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
)
|
||||
|
||||
// DevcontainerAppReconciler reconciles a DevcontainerApp object
|
||||
type DevcontainerAppReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=devcontainer.devstar.cn,resources=devcontainerapps,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=devcontainer.devstar.cn,resources=devcontainerapps/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=devcontainer.devstar.cn,resources=devcontainerapps/finalizers,verbs=update
|
||||
// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=create;delete;get;list;watch
|
||||
// +kubebuilder:rbac:groups="",resources=services,verbs=create;delete;get;list;watch
|
||||
// +kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch;delete
|
||||
|
||||
// Reconcile is part of the main kubernetes reconciliation loop which aims to
|
||||
// move the current state of the cluster closer to the desired state.
|
||||
// Modify the Reconcile function to compare the state specified by
|
||||
// the DevcontainerApp object against the actual cluster state, and then
|
||||
// perform operations to make the cluster state reflect the state specified by
|
||||
// the user.
|
||||
//
|
||||
// For more details, check Reconcile and its Result here:
|
||||
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.0/pkg/reconcile
|
||||
|
||||
func (r *DevcontainerAppReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
logger := log.FromContext(ctx)
|
||||
var err error
|
||||
|
||||
// 1. 读取缓存中的 DevcontainerApp
|
||||
app := &devcontainer_v1.DevcontainerApp{}
|
||||
err = r.Get(ctx, req.NamespacedName, app)
|
||||
if err != nil {
|
||||
// 当 CRD 资源 "DevcontainerApp" 被删除后,直接返回空结果,跳过剩下步骤
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// 添加 finalizer 处理逻辑
|
||||
finalizerName := "devcontainer.devstar.cn/finalizer"
|
||||
|
||||
// 检查对象是否正在被删除
|
||||
if !app.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
// 对象正在被删除 - 处理 finalizer
|
||||
if k8s_sigs_controller_runtime_utils.ContainsFinalizer(app, finalizerName) {
|
||||
// 执行清理操作
|
||||
logger.Info("Cleaning up resources before deletion", "name", app.Name)
|
||||
|
||||
// 查找并删除关联的 PVC
|
||||
if err := r.cleanupPersistentVolumeClaims(ctx, app); err != nil {
|
||||
logger.Error(err, "Failed to clean up PVCs")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// 删除完成后移除 finalizer
|
||||
k8s_sigs_controller_runtime_utils.RemoveFinalizer(app, finalizerName)
|
||||
if err := r.Update(ctx, app); err != nil {
|
||||
logger.Error(err, "Failed to remove finalizer")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// 已标记为删除且处理完成,允许继续删除流程
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// 如果对象不包含 finalizer,就添加它
|
||||
if !k8s_sigs_controller_runtime_utils.ContainsFinalizer(app, finalizerName) {
|
||||
logger.Info("Adding finalizer", "name", app.Name)
|
||||
k8s_sigs_controller_runtime_utils.AddFinalizer(app, finalizerName)
|
||||
if err := r.Update(ctx, app); err != nil {
|
||||
logger.Error(err, "Failed to add finalizer")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// 检查停止容器的注解
|
||||
if desiredReplicas, exists := app.Annotations["devstar.io/desiredReplicas"]; exists && desiredReplicas == "0" {
|
||||
logger.Info("DevContainer stop requested via annotation", "name", app.Name)
|
||||
|
||||
// 获取当前的 StatefulSet
|
||||
statefulSetInNamespace := &apps_v1.StatefulSet{}
|
||||
err = r.Get(ctx, req.NamespacedName, statefulSetInNamespace)
|
||||
if err == nil {
|
||||
// 设置副本数为0
|
||||
replicas := int32(0)
|
||||
statefulSetInNamespace.Spec.Replicas = &replicas
|
||||
if err := r.Update(ctx, statefulSetInNamespace); err != nil {
|
||||
logger.Error(err, "Failed to scale down StatefulSet replicas to 0")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
logger.Info("StatefulSet scaled down to 0 replicas due to stop request")
|
||||
|
||||
// 标记容器为未就绪
|
||||
app.Status.Ready = false
|
||||
if err := r.Status().Update(ctx, app); err != nil {
|
||||
logger.Error(err, "Failed to update DevcontainerApp status")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// 继续处理其他逻辑(如更新 Service)
|
||||
}
|
||||
}
|
||||
|
||||
// 2. 根据 DevcontainerApp 配置信息进行处理
|
||||
// 2.1 StatefulSet 处理
|
||||
statefulSet := devcontainer_controller_utils.NewStatefulSet(app)
|
||||
err = k8s_sigs_controller_runtime_utils.SetControllerReference(app, statefulSet, r.Scheme)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// 2.2 查找 集群中同名称的 StatefulSet
|
||||
statefulSetInNamespace := &apps_v1.StatefulSet{}
|
||||
err = r.Get(ctx, req.NamespacedName, statefulSetInNamespace)
|
||||
if err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
err = r.Create(ctx, statefulSet)
|
||||
if err != nil && !errors.IsAlreadyExists(err) {
|
||||
logger.Error(err, "Failed to create StatefulSet")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
} else {
|
||||
// 处理重启注解
|
||||
if restartedAt, exists := app.Annotations["devstar.io/restartedAt"]; exists {
|
||||
// 检查注解是否已经应用到StatefulSet
|
||||
needsRestart := true
|
||||
|
||||
if statefulSetInNamespace.Spec.Template.Annotations != nil {
|
||||
if currentRestartTime, exists := statefulSetInNamespace.Spec.Template.Annotations["devstar.io/restartedAt"]; exists && currentRestartTime == restartedAt {
|
||||
needsRestart = false
|
||||
}
|
||||
} else {
|
||||
statefulSetInNamespace.Spec.Template.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
if needsRestart {
|
||||
logger.Info("DevContainer restart requested", "name", app.Name, "time", restartedAt)
|
||||
|
||||
// 将重启注解传递到 Pod 模板以触发滚动更新
|
||||
statefulSetInNamespace.Spec.Template.Annotations["devstar.io/restartedAt"] = restartedAt
|
||||
|
||||
// 确保副本数至少为1(防止之前被停止)
|
||||
replicas := int32(1)
|
||||
if statefulSetInNamespace.Spec.Replicas != nil && *statefulSetInNamespace.Spec.Replicas > 0 {
|
||||
replicas = *statefulSetInNamespace.Spec.Replicas
|
||||
}
|
||||
statefulSetInNamespace.Spec.Replicas = &replicas
|
||||
|
||||
if err := r.Update(ctx, statefulSetInNamespace); err != nil {
|
||||
logger.Error(err, "Failed to update StatefulSet for restart")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
logger.Info("StatefulSet restarted successfully")
|
||||
}
|
||||
}
|
||||
|
||||
// 若 StatefulSet.Status.readyReplicas 变化,则更新 DevcontainerApp.Status.Ready 域
|
||||
if statefulSetInNamespace.Status.ReadyReplicas > 0 {
|
||||
app.Status.Ready = true
|
||||
if err := r.Status().Update(ctx, app); err != nil {
|
||||
logger.Error(err, "Failed to update DevcontainerApp.Status.Ready", "DevcontainerApp.Status.Ready", app.Status.Ready)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
logger.Info("DevContainer is READY", "ReadyReplicas", statefulSetInNamespace.Status.ReadyReplicas)
|
||||
} else if app.Status.Ready {
|
||||
// 只有当目前状态为Ready但实际不再Ready时才更新
|
||||
app.Status.Ready = false
|
||||
if err := r.Status().Update(ctx, app); err != nil {
|
||||
logger.Error(err, "Failed to un-mark DevcontainerApp.Status.Ready", "DevcontainerApp.Status.Ready", app.Status.Ready)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
logger.Info("DevContainer is NOT ready", "ReadyReplicas", statefulSetInNamespace.Status.ReadyReplicas)
|
||||
}
|
||||
|
||||
// 修复方法:加上判断条件,避免循环触发更新
|
||||
needsUpdate := false
|
||||
|
||||
// 检查镜像是否变更
|
||||
if app.Spec.StatefulSet.Image != statefulSetInNamespace.Spec.Template.Spec.Containers[0].Image {
|
||||
needsUpdate = true
|
||||
}
|
||||
|
||||
// 检查副本数 - 如果指定了 desiredReplicas 注解但不为 0(停止已在前面处理)
|
||||
if desiredReplicas, exists := app.Annotations["devstar.io/desiredReplicas"]; exists && desiredReplicas != "0" {
|
||||
replicas, err := strconv.ParseInt(desiredReplicas, 10, 32)
|
||||
if err == nil {
|
||||
currentReplicas := int32(1) // 默认值
|
||||
if statefulSetInNamespace.Spec.Replicas != nil {
|
||||
currentReplicas = *statefulSetInNamespace.Spec.Replicas
|
||||
}
|
||||
|
||||
if currentReplicas != int32(replicas) {
|
||||
r32 := int32(replicas)
|
||||
statefulSet.Spec.Replicas = &r32
|
||||
needsUpdate = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if needsUpdate {
|
||||
if err := r.Update(ctx, statefulSet); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
logger.Info("StatefulSet updated", "name", statefulSet.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// 2.3 Service 处理
|
||||
service := devcontainer_controller_utils.NewService(app)
|
||||
if err := k8s_sigs_controller_runtime_utils.SetControllerReference(app, service, r.Scheme); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
serviceInCluster := &core_v1.Service{}
|
||||
err = r.Get(ctx, types.NamespacedName{Name: app.Name, Namespace: app.Namespace}, serviceInCluster)
|
||||
if err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
err = r.Create(ctx, service)
|
||||
if err == nil {
|
||||
// 创建 NodePort Service 成功只执行一次 ==> 将NodePort 端口分配信息更新到 app.Status
|
||||
logger.Info("[DevStar][DevContainer] NodePort Assigned", "nodePortAssigned", service.Spec.Ports[0].NodePort)
|
||||
|
||||
// 设置主 SSH 端口的 NodePort
|
||||
app.Status.NodePortAssigned = uint16(service.Spec.Ports[0].NodePort)
|
||||
|
||||
// 处理额外端口
|
||||
extraPortsAssigned := []devcontainer_v1.ExtraPortAssigned{}
|
||||
|
||||
// 处理额外端口,从第二个端口开始(索引为1)
|
||||
// 因为第一个端口(索引为0)是 SSH 端口
|
||||
for i := 1; i < len(service.Spec.Ports); i++ {
|
||||
port := service.Spec.Ports[i]
|
||||
|
||||
// 查找对应的端口规格
|
||||
var containerPort uint16 = 0
|
||||
|
||||
// 如果存在额外端口配置,尝试匹配
|
||||
if app.Spec.Service.ExtraPorts != nil {
|
||||
for _, ep := range app.Spec.Service.ExtraPorts {
|
||||
if (ep.Name != "" && ep.Name == port.Name) ||
|
||||
(uint16(port.Port) == ep.ServicePort) {
|
||||
containerPort = ep.ContainerPort
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 如果没有找到匹配项,使用目标端口
|
||||
if containerPort == 0 && port.TargetPort.IntVal > 0 {
|
||||
containerPort = uint16(port.TargetPort.IntVal)
|
||||
}
|
||||
|
||||
// 添加到额外端口列表
|
||||
extraPortsAssigned = append(extraPortsAssigned, devcontainer_v1.ExtraPortAssigned{
|
||||
Name: port.Name,
|
||||
ServicePort: uint16(port.Port),
|
||||
ContainerPort: containerPort,
|
||||
NodePort: uint16(port.NodePort),
|
||||
})
|
||||
|
||||
logger.Info("[DevStar][DevContainer] Extra Port NodePort Assigned",
|
||||
"name", port.Name,
|
||||
"servicePort", port.Port,
|
||||
"nodePort", port.NodePort)
|
||||
}
|
||||
|
||||
// 更新 CRD 状态,包括额外端口
|
||||
app.Status.ExtraPortsAssigned = extraPortsAssigned
|
||||
|
||||
if err := r.Status().Update(ctx, app); err != nil {
|
||||
logger.Error(err, "Failed to update NodePorts of DevcontainerApp",
|
||||
"nodePortAssigned", service.Spec.Ports[0].NodePort,
|
||||
"extraPortsCount", len(extraPortsAssigned))
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
} else if !errors.IsAlreadyExists(err) {
|
||||
logger.Error(err, "Failed to create DevcontainerApp NodePort Service", "nodePortServiceName", service.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
} else {
|
||||
// Service 已存在,检查它的端口信息
|
||||
// 检查是否需要更新状态
|
||||
needStatusUpdate := false
|
||||
|
||||
// 如果主端口未记录,记录之
|
||||
if app.Status.NodePortAssigned == 0 && len(serviceInCluster.Spec.Ports) > 0 {
|
||||
app.Status.NodePortAssigned = uint16(serviceInCluster.Spec.Ports[0].NodePort)
|
||||
needStatusUpdate = true
|
||||
logger.Info("[DevStar][DevContainer] Found existing main NodePort",
|
||||
"nodePort", serviceInCluster.Spec.Ports[0].NodePort)
|
||||
}
|
||||
|
||||
// 处理额外端口
|
||||
if len(serviceInCluster.Spec.Ports) > 1 {
|
||||
// 如果额外端口状态为空,或者数量不匹配
|
||||
if app.Status.ExtraPortsAssigned == nil ||
|
||||
len(app.Status.ExtraPortsAssigned) != len(serviceInCluster.Spec.Ports)-1 {
|
||||
|
||||
extraPortsAssigned := []devcontainer_v1.ExtraPortAssigned{}
|
||||
|
||||
// 从索引 1 开始,跳过主端口
|
||||
for i := 1; i < len(serviceInCluster.Spec.Ports); i++ {
|
||||
port := serviceInCluster.Spec.Ports[i]
|
||||
|
||||
// 查找对应的端口规格
|
||||
var containerPort uint16 = 0
|
||||
|
||||
// 如果存在额外端口配置,尝试匹配
|
||||
if app.Spec.Service.ExtraPorts != nil {
|
||||
for _, ep := range app.Spec.Service.ExtraPorts {
|
||||
if (ep.Name != "" && ep.Name == port.Name) ||
|
||||
(uint16(port.Port) == ep.ServicePort) {
|
||||
containerPort = ep.ContainerPort
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 如果没有找到匹配项,使用目标端口
|
||||
if containerPort == 0 && port.TargetPort.IntVal > 0 {
|
||||
containerPort = uint16(port.TargetPort.IntVal)
|
||||
}
|
||||
|
||||
// 添加到额外端口列表
|
||||
extraPortsAssigned = append(extraPortsAssigned, devcontainer_v1.ExtraPortAssigned{
|
||||
Name: port.Name,
|
||||
ServicePort: uint16(port.Port),
|
||||
ContainerPort: containerPort,
|
||||
NodePort: uint16(port.NodePort),
|
||||
})
|
||||
|
||||
logger.Info("[DevStar][DevContainer] Found existing extra NodePort",
|
||||
"name", port.Name,
|
||||
"nodePort", port.NodePort)
|
||||
}
|
||||
|
||||
// 更新额外端口状态
|
||||
app.Status.ExtraPortsAssigned = extraPortsAssigned
|
||||
needStatusUpdate = true
|
||||
}
|
||||
}
|
||||
|
||||
// 如果需要更新状态
|
||||
if needStatusUpdate {
|
||||
if err := r.Status().Update(ctx, app); err != nil {
|
||||
logger.Error(err, "Failed to update NodePorts status for existing service")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
logger.Info("[DevStar][DevContainer] Updated NodePorts status for existing service",
|
||||
"mainNodePort", app.Status.NodePortAssigned,
|
||||
"extraPortsCount", len(app.Status.ExtraPortsAssigned))
|
||||
}
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// cleanupPersistentVolumeClaims 查找并删除与 DevcontainerApp 关联的所有 PVC
|
||||
func (r *DevcontainerAppReconciler) cleanupPersistentVolumeClaims(ctx context.Context, app *devcontainer_v1.DevcontainerApp) error {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
// 查找关联的 PVC
|
||||
pvcList := &core_v1.PersistentVolumeClaimList{}
|
||||
|
||||
// 按标签筛选
|
||||
labelSelector := client.MatchingLabels{
|
||||
"app": app.Name,
|
||||
}
|
||||
if err := r.List(ctx, pvcList, client.InNamespace(app.Namespace), labelSelector); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 如果按标签没找到,尝试按名称模式查找
|
||||
if len(pvcList.Items) == 0 {
|
||||
if err := r.List(ctx, pvcList, client.InNamespace(app.Namespace)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 筛选出名称包含 DevcontainerApp 名称的 PVC
|
||||
var filteredItems []core_v1.PersistentVolumeClaim
|
||||
for _, pvc := range pvcList.Items {
|
||||
// StatefulSet PVC 命名格式通常为: <volumeClaimTemplate名称>-<StatefulSet名称>-<序号>
|
||||
// 检查是否包含 app 名称作为名称的一部分
|
||||
if strings.Contains(pvc.Name, app.Name+"-") {
|
||||
filteredItems = append(filteredItems, pvc)
|
||||
logger.Info("Found PVC to delete", "name", pvc.Name)
|
||||
}
|
||||
}
|
||||
pvcList.Items = filteredItems
|
||||
}
|
||||
|
||||
// 删除找到的 PVC
|
||||
for i := range pvcList.Items {
|
||||
logger.Info("Deleting PVC", "name", pvcList.Items[i].Name)
|
||||
if err := r.Delete(ctx, &pvcList.Items[i]); err != nil && !errors.IsNotFound(err) {
|
||||
logger.Error(err, "Failed to delete PVC", "name", pvcList.Items[i].Name)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *DevcontainerAppReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&devcontainer_v1.DevcontainerApp{}).
|
||||
Owns(&apps_v1.StatefulSet{}).
|
||||
Owns(&core_v1.Service{}).
|
||||
Complete(r)
|
||||
}
|
24
modules/k8s/controller/devcontainer/templates/service.yaml
Normal file
24
modules/k8s/controller/devcontainer/templates/service.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{.ObjectMeta.Name}}-svc
|
||||
namespace: {{.ObjectMeta.Namespace}}
|
||||
spec:
|
||||
selector:
|
||||
app: {{.ObjectMeta.Name}}
|
||||
devstar-resource-type: devstar-devcontainer
|
||||
type: NodePort
|
||||
ports:
|
||||
- name: ssh-port
|
||||
protocol: TCP
|
||||
port: 22
|
||||
targetPort: {{.Spec.StatefulSet.ContainerPort}}
|
||||
{{ if .Spec.Service.NodePort}}
|
||||
nodePort: {{.Spec.Service.NodePort}}
|
||||
{{ end }}
|
||||
{{- range .Spec.Service.ExtraPorts }}
|
||||
- name: {{ .Name | default (printf "port-%d" .ServicePort) }}
|
||||
protocol: TCP
|
||||
port: {{ .ServicePort }}
|
||||
targetPort: {{ .ContainerPort }}
|
||||
{{- end }}
|
248
modules/k8s/controller/devcontainer/templates/statefulset.yaml
Normal file
248
modules/k8s/controller/devcontainer/templates/statefulset.yaml
Normal file
@@ -0,0 +1,248 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{.ObjectMeta.Name}}
|
||||
namespace: {{.ObjectMeta.Namespace}}
|
||||
labels:
|
||||
app: {{.ObjectMeta.Name}}
|
||||
devstar-resource-type: devstar-devcontainer
|
||||
spec:
|
||||
podManagementPolicy: OrderedReady
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{.ObjectMeta.Name}}
|
||||
devstar-resource-type: devstar-devcontainer
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{.ObjectMeta.Name}}
|
||||
devstar-resource-type: devstar-devcontainer
|
||||
spec:
|
||||
# 安全策略,禁止挂载 ServiceAccount Token
|
||||
automountServiceAccountToken: false
|
||||
volumes:
|
||||
# 添加 ttyd 共享卷
|
||||
- name: ttyd-shared
|
||||
emptyDir: {}
|
||||
initContainers:
|
||||
# 用户配置初始化
|
||||
- name: init-user-config
|
||||
image: {{.Spec.StatefulSet.Image}}
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
echo "=== Checking /target-root directory ==="
|
||||
ls -la /target-root/ 2>/dev/null || echo "Directory not found"
|
||||
|
||||
# 检查是否为空目录或首次初始化
|
||||
file_count=$(find /target-root -maxdepth 1 \( -type f -o -type d \) ! -name '.' ! -name '..' 2>/dev/null | wc -l)
|
||||
echo "Found $file_count items in /target-root"
|
||||
|
||||
if [ "$file_count" -lt 2 ]; then
|
||||
echo "Empty or minimal directory detected - initializing user home..."
|
||||
cp -a /root/. /target-root/
|
||||
echo "User config initialized from image defaults"
|
||||
else
|
||||
echo "User config already exists - skipping initialization to preserve user data"
|
||||
echo "Current contents:"
|
||||
ls -la /target-root/
|
||||
fi
|
||||
volumeMounts:
|
||||
- name: pvc-devcontainer
|
||||
mountPath: /target-root
|
||||
subPath: user-home
|
||||
|
||||
# SSH 配置和公钥初始化
|
||||
- name: init-root-ssh-dir
|
||||
image: devstar.cn/public/busybox:27a71e19c956
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
# 确保目录存在
|
||||
mkdir -p /root/.ssh
|
||||
mkdir -p /etc/ssh
|
||||
|
||||
# 创建标准的 sshd_config 文件(如果不存在)
|
||||
if [ ! -f /etc/ssh/sshd_config ]; then
|
||||
cat > /etc/ssh/sshd_config << 'EOF'
|
||||
# OpenSSH Server Configuration
|
||||
Port 22
|
||||
AddressFamily any
|
||||
ListenAddress 0.0.0.0
|
||||
|
||||
# Host Keys
|
||||
HostKey /etc/ssh/ssh_host_rsa_key
|
||||
HostKey /etc/ssh/ssh_host_ecdsa_key
|
||||
HostKey /etc/ssh/ssh_host_ed25519_key
|
||||
|
||||
# Logging
|
||||
SyslogFacility AUTH
|
||||
LogLevel INFO
|
||||
|
||||
# Authentication
|
||||
LoginGraceTime 2m
|
||||
PermitRootLogin yes
|
||||
StrictModes yes
|
||||
MaxAuthTries 6
|
||||
MaxSessions 10
|
||||
|
||||
PubkeyAuthentication yes
|
||||
AuthorizedKeysFile .ssh/authorized_keys
|
||||
|
||||
PasswordAuthentication no
|
||||
PermitEmptyPasswords no
|
||||
ChallengeResponseAuthentication no
|
||||
|
||||
# Forwarding
|
||||
X11Forwarding yes
|
||||
X11DisplayOffset 10
|
||||
PrintMotd no
|
||||
PrintLastLog yes
|
||||
TCPKeepAlive yes
|
||||
|
||||
# Environment
|
||||
AcceptEnv LANG LC_*
|
||||
|
||||
# Subsystem
|
||||
Subsystem sftp /usr/lib/openssh/sftp-server
|
||||
|
||||
# PAM
|
||||
UsePAM yes
|
||||
EOF
|
||||
echo "Created sshd_config"
|
||||
fi
|
||||
|
||||
# 导入 SSH 公钥(如果不存在)
|
||||
{{range .Spec.StatefulSet.SSHPublicKeyList}}
|
||||
if ! grep -q "{{.}}" /root/.ssh/authorized_keys 2>/dev/null; then
|
||||
echo "{{.}}" >> /root/.ssh/authorized_keys
|
||||
fi
|
||||
{{end}}
|
||||
|
||||
# 设置正确的权限
|
||||
chmod 755 /root
|
||||
chmod 700 /root/.ssh/
|
||||
chmod 600 /root/.ssh/authorized_keys 2>/dev/null || true
|
||||
chmod 644 /etc/ssh/sshd_config 2>/dev/null || true
|
||||
|
||||
# 确保文件所有者正确
|
||||
chown -R root:root /root/.ssh/
|
||||
|
||||
echo 'SSH configuration and keys initialized.'
|
||||
volumeMounts:
|
||||
- name: pvc-devcontainer
|
||||
mountPath: /root
|
||||
subPath: user-home
|
||||
- name: pvc-devcontainer
|
||||
mountPath: /etc/ssh
|
||||
subPath: ssh-host-keys
|
||||
|
||||
- name: init-git-repo-dir
|
||||
image: {{.Spec.StatefulSet.Image}}
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- if [ ! -d '/data/workspace' ]; then git clone {{.Spec.StatefulSet.GitRepositoryURL}} /data/workspace && echo "Git Repository cloned."; else echo "Folder already exists."; fi
|
||||
volumeMounts:
|
||||
- name: pvc-devcontainer
|
||||
mountPath: /data
|
||||
subPath: user-data
|
||||
|
||||
# ttyd 二进制文件复制
|
||||
- name: init-ttyd
|
||||
image: tsl0922/ttyd:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
echo "Copying ttyd binary to shared volume..."
|
||||
cp /usr/bin/ttyd /ttyd-shared/ttyd
|
||||
chmod +x /ttyd-shared/ttyd
|
||||
echo "ttyd binary copied successfully"
|
||||
ls -la /ttyd-shared/ttyd
|
||||
volumeMounts:
|
||||
- name: ttyd-shared
|
||||
mountPath: /ttyd-shared
|
||||
|
||||
containers:
|
||||
- name: {{.ObjectMeta.Name}}
|
||||
image: {{.Spec.StatefulSet.Image}}
|
||||
command:
|
||||
{{range .Spec.StatefulSet.Command}}
|
||||
- {{.}}
|
||||
{{end}}
|
||||
imagePullPolicy: IfNotPresent
|
||||
# securityContext: TODO: 设置 DevContainer 安全策略
|
||||
ports:
|
||||
- name: ssh-port
|
||||
protocol: TCP
|
||||
containerPort: {{.Spec.StatefulSet.ContainerPort}}
|
||||
{{- range .Spec.Service.ExtraPorts }}
|
||||
- name: {{ .Name | default (printf "port-%d" .ContainerPort) }}
|
||||
protocol: TCP
|
||||
containerPort: {{ .ContainerPort }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: pvc-devcontainer
|
||||
mountPath: /data
|
||||
subPath: user-data
|
||||
- name: pvc-devcontainer
|
||||
mountPath: /root
|
||||
subPath: user-home
|
||||
- name: pvc-devcontainer
|
||||
mountPath: /etc/ssh
|
||||
subPath: ssh-host-keys
|
||||
# 挂载 ttyd 共享卷
|
||||
- name: ttyd-shared
|
||||
mountPath: /ttyd-shared
|
||||
# 其他配置保持不变...
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- exec ls ~
|
||||
failureThreshold: 6
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- exec cat /etc/ssh/ssh_host*.pub
|
||||
failureThreshold: 6
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
limits:
|
||||
cpu: 300m
|
||||
ephemeral-storage: 8Gi
|
||||
memory: 512Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
ephemeral-storage: 50Mi
|
||||
memory: 128Mi
|
||||
volumeClaimTemplates:
|
||||
- apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: pvc-devcontainer
|
||||
spec:
|
||||
storageClassName: openebs-hostpath
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
60
modules/k8s/controller/devcontainer/utils/template_utils.go
Normal file
60
modules/k8s/controller/devcontainer/utils/template_utils.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"text/template"
|
||||
|
||||
devcontainer_apps_v1 "code.gitea.io/gitea/modules/k8s/api/devcontainer/v1"
|
||||
app_v1 "k8s.io/api/apps/v1"
|
||||
core_v1 "k8s.io/api/core/v1"
|
||||
yaml_util "k8s.io/apimachinery/pkg/util/yaml"
|
||||
)
|
||||
|
||||
const (
|
||||
TemplatePath = "modules/k8s/controller/devcontainer/templates/"
|
||||
)
|
||||
|
||||
// parseTemplate 解析 Go Template 模板文件
|
||||
func parseTemplate(templateName string, app *devcontainer_apps_v1.DevcontainerApp) []byte {
|
||||
tmpl, err := template.
|
||||
New(templateName + ".yaml").
|
||||
Funcs(template.FuncMap{"default": DefaultFunc}).
|
||||
ParseFiles(TemplatePath + templateName + ".yaml")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
b := new(bytes.Buffer)
|
||||
err = tmpl.Execute(b, app)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return b.Bytes()
|
||||
}
|
||||
|
||||
// NewStatefulSet 创建 StatefulSet
|
||||
func NewStatefulSet(app *devcontainer_apps_v1.DevcontainerApp) *app_v1.StatefulSet {
|
||||
statefulSet := &app_v1.StatefulSet{}
|
||||
err := yaml_util.Unmarshal(parseTemplate("statefulset", app), statefulSet)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return statefulSet
|
||||
}
|
||||
|
||||
// NewService 创建 Service
|
||||
func NewService(app *devcontainer_apps_v1.DevcontainerApp) *core_v1.Service {
|
||||
service := &core_v1.Service{}
|
||||
err := yaml_util.Unmarshal(parseTemplate("service", app), service)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return service
|
||||
}
|
||||
|
||||
// DefaultFunc 函数用于实现默认值
|
||||
func DefaultFunc(value interface{}, defaultValue interface{}) interface{} {
|
||||
if value == nil || value == "" {
|
||||
return defaultValue
|
||||
}
|
||||
return value
|
||||
}
|
Reference in New Issue
Block a user