对接集群的资源对象
node—provider/k8s/node.go
import (
"context"
v1 "k8s.io/api/core/v1"
)
func (c *Client) ListNode(ctx context.Context, req *ListRequest) (*v1.NodeList, error) {
return c.client.CoreV1().Nodes().List(ctx, req.Opts)
}
func (c *Client) GetNode(ctx context.Context, req *GetRequest) (*v1.Node, error) {
return c.client.CoreV1().Nodes().Get(ctx, req.Name, req.Opts)
}
namespace—provider/k8s/namespace.go
import (
"context"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func (c *Client) ListNamespace(ctx context.Context, req *ListRequest) (*v1.NamespaceList, error) {
set, err := c.client.CoreV1().Namespaces().List(ctx, req.Opts)
if err != nil {
return nil, err
}
if req.SkipManagedFields {
for i := range set.Items {
set.Items[i].ManagedFields = nil
}
}
return set, nil
}
func (c *Client) CreateNamespace(ctx context.Context, req *v1.Namespace) (*v1.Namespace, error) {
return c.client.CoreV1().Namespaces().Create(ctx, req, metav1.CreateOptions{})
}
func (c *Client) ListResourceQuota(ctx context.Context) (*v1.ResourceQuotaList, error) {
return c.client.CoreV1().ResourceQuotas("").List(ctx, metav1.ListOptions{})
}
func (c *Client) CreateResourceQuota(ctx context.Context, req *v1.ResourceQuota) (*v1.ResourceQuota, error) {
return c.client.CoreV1().ResourceQuotas(req.Namespace).Create(ctx, req, metav1.CreateOptions{})
}
pod—provider/k8s/pod.go
import (
"context"
"io"
"github.com/go-playground/validator/v10"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/remotecommand"
)
var (
validate = validator.New()
)
func NewCreateRequest() *CreateRequest {
return &CreateRequest{
Opts: metav1.CreateOptions{},
}
}
type CreateRequest struct {
Namespace string
Opts metav1.CreateOptions
}
func (c *Client) CreatePod(ctx context.Context, pod *v1.Pod, req *CreateRequest) (*v1.Pod, error) {
return c.client.CoreV1().Pods(req.Namespace).Create(ctx, pod, req.Opts)
}
func NewListRequest() *ListRequest {
return &ListRequest{}
}
type ListRequest struct {
Namespace string
SkipManagedFields bool
Opts metav1.ListOptions
}
func (c *Client) ListPod(ctx context.Context, req *ListRequest) (*v1.PodList, error) {
return c.client.CoreV1().Pods(req.Namespace).List(ctx, req.Opts)
}
func NewGetRequest(name string) *GetRequest {
return &GetRequest{
Namespace: DEFAULT_NAMESPACE,
Name: name,
}
}
type GetRequest struct {
Namespace string
Name string
Opts metav1.GetOptions
}
func (c *Client) GetPod(ctx context.Context, req *GetRequest) (*v1.Pod, error) {
return c.client.CoreV1().Pods(req.Namespace).Get(ctx, req.Name, req.Opts)
}
func NewDeleteRequest(name string) *DeleteRequest {
return &DeleteRequest{
Namespace: DEFAULT_NAMESPACE,
Name: name,
}
}
type DeleteRequest struct {
Namespace string
Name string
Opts metav1.DeleteOptions
}
func (c *Client) DeletePod(ctx context.Context, req *DeleteRequest) error {
return c.client.CoreV1().Pods("").Delete(ctx, "", req.Opts)
}
func NewLoginContainerRequest(cmd []string, ce ContainerExecutor) *LoginContainerRequest {
return &LoginContainerRequest{
Command: cmd,
Excutor: ce,
}
}
type LoginContainerRequest struct {
Namespace string `json:"namespace" validate:"required"`
PodName string `json:"pod_name" validate:"required"`
ContainerName string `json:"container_name"`
Command []string `json:"command"`
Excutor ContainerExecutor `json:"-"`
}
func (req *LoginContainerRequest) Validate() error {
return validate.Struct(req)
}
// 登录容器
func (c *Client) LoginContainer(req *LoginContainerRequest) error {
restReq := c.client.CoreV1().RESTClient().Post().
Resource("pods").
Name(req.PodName).
Namespace(req.Namespace).
SubResource("exec")
restReq.VersionedParams(&v1.PodExecOptions{
Container: req.ContainerName,
Command: req.Command,
Stdin: true,
Stdout: true,
Stderr: true,
TTY: true,
}, scheme.ParameterCodec)
executor, err := remotecommand.NewSPDYExecutor(c.restconf, "POST", restReq.URL())
if err != nil {
return err
}
return executor.Stream(remotecommand.StreamOptions{
Stdin: req.Excutor,
Stdout: req.Excutor,
Stderr: req.Excutor,
Tty: true,
TerminalSizeQueue: req.Excutor,
})
}
func NewWatchConainterLogRequest() *WatchConainterLogRequest {
return &WatchConainterLogRequest{
TailLines: 100,
Follow: false,
Previous: false,
}
}
type WatchConainterLogRequest struct {
Namespace string `json:"namespace" validate:"required"`
PodName string `json:"pod_name" validate:"required"`
ContainerName string `json:"container_name"`
TailLines int64 `json:"tail_lines"`
Follow bool `json:"follow"`
Previous bool `json:"previous"`
}
func (req *WatchConainterLogRequest) Validate() error {
return validate.Struct(req)
}
// 查看容器日志
func (c *Client) WatchConainterLog(ctx context.Context, req *WatchConainterLogRequest) (io.ReadCloser, error) {
opt := &v1.PodLogOptions{
Container: req.ContainerName,
Follow: req.Follow,
TailLines: &req.TailLines,
Previous: req.Previous,
InsecureSkipTLSVerifyBackend: true,
}
restReq := c.client.CoreV1().Pods(req.Namespace).GetLogs(req.PodName, opt)
return restReq.Stream(ctx)
}
deployment—provider/k8s/deployment.go
import (
"context"
"time"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/autoscaling/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
watch "k8s.io/apimachinery/pkg/watch"
)
func (c *Client) ListDeployment(ctx context.Context, req *ListRequest) (*appsv1.DeploymentList, error) {
ds, err := c.client.AppsV1().Deployments(req.Namespace).List(ctx, req.Opts)
if err != nil {
return nil, err
}
if req.SkipManagedFields {
for i := range ds.Items {
ds.Items[i].ManagedFields = nil
}
}
return ds, nil
}
func (c *Client) GetDeployment(ctx context.Context, req *GetRequest) (*appsv1.Deployment, error) {
return c.client.AppsV1().Deployments(req.Namespace).Get(ctx, req.Name, metav1.GetOptions{})
}
func (c *Client) WatchDeployment(ctx context.Context, req *appsv1.Deployment) (watch.Interface, error) {
return c.client.AppsV1().Deployments(req.Namespace).Watch(ctx, metav1.ListOptions{})
}
func (c *Client) CreateDeployment(ctx context.Context, req *appsv1.Deployment) (*appsv1.Deployment, error) {
return c.client.AppsV1().Deployments(req.Namespace).Create(ctx, req, metav1.CreateOptions{})
}
func (c *Client) UpdateDeployment(ctx context.Context, req *appsv1.Deployment) (*appsv1.Deployment, error) {
return c.client.AppsV1().Deployments(req.Namespace).Update(ctx, req, metav1.UpdateOptions{})
}
func (c *Client) ScaleDeployment(ctx context.Context, req *ScaleRequest) (*v1.Scale, error) {
return c.client.AppsV1().Deployments(req.Scale.Namespace).UpdateScale(ctx, req.Scale.Name, req.Scale, req.Options)
}
// 原生并没有重新部署的功能, 通过变更注解时间来触发重新部署
// dpObj.Spec.Template.Annotations["cattle.io/timestamp"] = time.Now().Format(time.RFC3339)
func (c *Client) ReDeploy(ctx context.Context, req *GetRequest) (*appsv1.Deployment, error) {
// 获取Deploy
d, err := c.GetDeployment(ctx, req)
if err != nil {
return nil, err
}
// 添加一个时间戳来是Deploy对象发送变更
d.Spec.Template.Annotations["mpaas/timestamp"] = time.Now().Format(time.RFC3339)
return c.client.AppsV1().Deployments(req.Namespace).Update(ctx, d, metav1.UpdateOptions{})
}
func (c *Client) DeleteDeployment(ctx context.Context, req *DeleteRequest) error {
return c.client.AppsV1().Deployments(req.Namespace).Delete(ctx, req.Name, req.Opts)
}
service—provider/k8s/service.go
import (
"context"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func (c *Client) CreateService(ctx context.Context, req *v1.Service) (*v1.Service, error) {
return c.client.CoreV1().Services(req.Namespace).Create(ctx, req, metav1.CreateOptions{})
}
func (c *Client) ListService(ctx context.Context, req *ListRequest) (*v1.ServiceList, error) {
return c.client.CoreV1().Services(req.Namespace).List(ctx, req.Opts)
}
func (c *Client) GetService(ctx context.Context, req *GetRequest) (*v1.Service, error) {
return c.client.CoreV1().Services(req.Namespace).Get(ctx, req.Name, req.Opts)
}
pv—provider/k8s/storage.go
import (
"context"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
)
func (c *Client) ListPersistentVolume(ctx context.Context, req *ListRequest) (*v1.PersistentVolumeList, error) {
return c.client.CoreV1().PersistentVolumes().List(ctx, req.Opts)
}
func (c *Client) GetPersistentVolume(ctx context.Context, req *GetRequest) (*v1.PersistentVolume, error) {
return c.client.CoreV1().PersistentVolumes().Get(ctx, req.Name, req.Opts)
}
func (c *Client) ListPersistentVolumeClaims(ctx context.Context, req *ListRequest) (*v1.PersistentVolumeClaimList, error) {
return c.client.CoreV1().PersistentVolumeClaims(req.Namespace).List(ctx, req.Opts)
}
func (c *Client) GetPersistentVolumeClaims(ctx context.Context, req *GetRequest) (*v1.PersistentVolumeClaim, error) {
return c.client.CoreV1().PersistentVolumeClaims(req.Namespace).Get(ctx, req.Name, req.Opts)
}
func (c *Client) ListStorageClass(ctx context.Context, req *ListRequest) (*storagev1.StorageClassList, error) {
return c.client.StorageV1().StorageClasses().List(ctx, req.Opts)
}
func (c *Client) GetStorageClass(ctx context.Context, req *GetRequest) (*storagev1.StorageClass, error) {
return c.client.StorageV1().StorageClasses().Get(ctx, req.Name, req.Opts)
}
configmap—provider/k8s/configmap.go
import (
"context"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func (c *Client) ListConfigMap(ctx context.Context, req *ListRequest) (*v1.ConfigMapList, error) {
return c.client.CoreV1().ConfigMaps(req.Namespace).List(ctx, req.Opts)
}
func (c *Client) GetConfigMap(ctx context.Context, req *GetRequest) (*v1.ConfigMap, error) {
return c.client.CoreV1().ConfigMaps(req.Namespace).Get(ctx, req.Name, req.Opts)
}
func (c *Client) CreateConfigMap(ctx context.Context, req *v1.ConfigMap) (*v1.ConfigMap, error) {
return c.client.CoreV1().ConfigMaps(req.Namespace).Create(ctx, req, metav1.CreateOptions{})
}
secret—provider/k8s/secret.go
import (
"context"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func (c *Client) CreateSecret(ctx context.Context, req *v1.Secret) (*v1.Secret, error) {
return c.client.CoreV1().Secrets(req.Namespace).Create(ctx, req, metav1.CreateOptions{})
}
func (c *Client) ListSecret(ctx context.Context, req *ListRequest) (*v1.SecretList, error) {
return c.client.CoreV1().Secrets(req.Namespace).List(ctx, req.Opts)
}
func (c *Client) GetSecret(ctx context.Context, req *GetRequest) (*v1.Secret, error) {
return c.client.CoreV1().Secrets(req.Namespace).Get(ctx, req.Name, req.Opts)
}
ingress—provider/k8s/ingress.go
package k8s
import (
"context"
v1 "k8s.io/api/networking/v1"
)
func (c *Client) ListIngress(ctx context.Context, req *ListRequest) (*v1.IngressList, error) {
return c.client.NetworkingV1().Ingresses(req.Namespace).List(ctx, req.Opts)
}
集群的ingress接口提供的方法
type IngressInterface interface {
Create(ctx context.Context, ingress *v1.Ingress, opts v1.CreateOptions) (*v1.Ingress, error)
Update(ctx context.Context, ingress *v1.Ingress, opts v1.UpdateOptions) (*v1.Ingress, error)
UpdateStatus(ctx context.Context, ingress *v1.Ingress, opts v1.UpdateOptions) (*v1.Ingress, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1.Ingress, error)
List(ctx context.Context, opts v1.ListOptions) (*v1.IngressList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1.Ingress, err error)
Apply(ctx context.Context, ingress *v1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1.Ingress, err error)
ApplyStatus(ctx context.Context, ingress *v1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1.Ingress, err error)
IngressExpansion
}
daemonset—provider/k8s/daemonset.go
package k8s
import (
"context"
appsv1 "k8s.io/api/apps/v1"
)
func (c *Client) ListDaemonSet(ctx context.Context, req *ListRequest) (*appsv1.DaemonSetList, error) {
return c.client.AppsV1().DaemonSets(req.Namespace).List(ctx, req.Opts)
}
集群的daemonset接口提供的方法
type DaemonSetInterface interface {
Create(ctx context.Context, daemonSet *v1.DaemonSet, opts v1.CreateOptions) (*v1.DaemonSet, error)
Update(ctx context.Context, daemonSet *v1.DaemonSet, opts v1.UpdateOptions) (*v1.DaemonSet, error)
UpdateStatus(ctx context.Context, daemonSet *v1.DaemonSet, opts v1.UpdateOptions) (*v1.DaemonSet, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1.DaemonSet, error)
List(ctx context.Context, opts v1.ListOptions) (*v1.DaemonSetList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1.DaemonSet, err error)
Apply(ctx context.Context, daemonSet *v1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1.DaemonSet, err error)
ApplyStatus(ctx context.Context, daemonSet *v1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1.DaemonSet, err error)
DaemonSetExpansion
}
statefulset—provider/k8s/statefulset.go
import (
"context"
appsv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func (c *Client) CreateStatefulSet(ctx context.Context, req *appsv1.StatefulSet) (*appsv1.StatefulSet, error) {
return c.client.AppsV1().StatefulSets(req.Namespace).Create(ctx, req, metav1.CreateOptions{})
}
func (c *Client) ListStatefulSet(ctx context.Context, req *ListRequest) (*appsv1.StatefulSetList, error) {
return c.client.AppsV1().StatefulSets(req.Namespace).List(ctx, req.Opts)
}
func (c *Client) GetStatefulSet(ctx context.Context, req *GetRequest) (*appsv1.StatefulSet, error) {
return c.client.AppsV1().StatefulSets(req.Namespace).Get(ctx, req.Name, req.Opts)
}
集群的StatefulSet接口提供的方法
type StatefulSetInterface interface {
Create(ctx context.Context, statefulSet *v1.StatefulSet, opts v1.CreateOptions) (*v1.StatefulSet, error)
Update(ctx context.Context, statefulSet *v1.StatefulSet, opts v1.UpdateOptions) (*v1.StatefulSet, error)
UpdateStatus(ctx context.Context, statefulSet *v1.StatefulSet, opts v1.UpdateOptions) (*v1.StatefulSet, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1.StatefulSet, error)
List(ctx context.Context, opts v1.ListOptions) (*v1.StatefulSetList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1.StatefulSet, err error)
Apply(ctx context.Context, statefulSet *v1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1.StatefulSet, err error)
ApplyStatus(ctx context.Context, statefulSet *v1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1.StatefulSet, err error)
GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (*v1.Scale, error)
UpdateScale(ctx context.Context, statefulSetName string, scale *v1.Scale, opts v1.UpdateOptions) (*v1.Scale, error)
ApplyScale(ctx context.Context, statefulSetName string, scale *v1.ScaleApplyConfiguration, opts v1.ApplyOptions) (*v1.Scale, error)
StatefulSetExpansion
}
job—provider/k8s/job.go
import (
"context"
v1 "k8s.io/api/batch/v1"
)
func (c *Client) ListJob(ctx context.Context, req *ListRequest) (*v1.JobList, error) {
return c.client.BatchV1().Jobs(req.Namespace).List(ctx, req.Opts)
}
集群的Job接口提供的方法
type JobInterface interface {
Create(ctx context.Context, job *v1.Job, opts v1.CreateOptions) (*v1.Job, error)
Update(ctx context.Context, job *v1.Job, opts v1.UpdateOptions) (*v1.Job, error)
UpdateStatus(ctx context.Context, job *v1.Job, opts v1.UpdateOptions) (*v1.Job, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1.Job, error)
List(ctx context.Context, opts v1.ListOptions) (*v1.JobList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1.Job, err error)
Apply(ctx context.Context, job *v1.JobApplyConfiguration, opts v1.ApplyOptions) (result *v1.Job, err error)
ApplyStatus(ctx context.Context, job *v1.JobApplyConfiguration, opts v1.ApplyOptions) (result *v1.Job, err error)
JobExpansion
}
cronjob—provider/k8s/cronjob.go
import (
"context"
v1 "k8s.io/api/batch/v1"
)
func (c *Client) ListCronJob(ctx context.Context, req *ListRequest) (*v1.CronJobList, error) {
return c.client.BatchV1().CronJobs(req.Namespace).List(ctx, req.Opts)
}
集群的CronJob接口提供的方法
type CronJobInterface interface {
Create(ctx context.Context, cronJob *v1.CronJob, opts v1.CreateOptions) (*v1.CronJob, error)
Update(ctx context.Context, cronJob *v1.CronJob, opts v1.UpdateOptions) (*v1.CronJob, error)
UpdateStatus(ctx context.Context, cronJob *v1.CronJob, opts v1.UpdateOptions) (*v1.CronJob, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1.CronJob, error)
List(ctx context.Context, opts v1.ListOptions) (*v1.CronJobList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1.CronJob, err error)
Apply(ctx context.Context, cronJob *v1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *v1.CronJob, err error)
ApplyStatus(ctx context.Context, cronJob *v1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *v1.CronJob, err error)
CronJobExpansion
}
event—provider/k8s/event.go
import (
"context"
v1 "k8s.io/api/events/v1"
)
func (c *Client) ListEvent(ctx context.Context, req *ListRequest) (*v1.EventList, error) {
return c.client.EventsV1().Events(req.Namespace).List(ctx, req.Opts)
}
集群的Event接口提供的方法
type EventInterface interface {
Create(ctx context.Context, event *v1.Event, opts v1.CreateOptions) (*v1.Event, error)
Update(ctx context.Context, event *v1.Event, opts v1.UpdateOptions) (*v1.Event, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1.Event, error)
List(ctx context.Context, opts v1.ListOptions) (*v1.EventList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1.Event, err error)
Apply(ctx context.Context, event *v1.EventApplyConfiguration, opts v1.ApplyOptions) (result *v1.Event, err error)
EventExpansion
}
集群操作方法的单元测试
provider/k8s/client_test.go
加载测试依赖
var (
client *k8s.Client
ctx = context.Background()
)
func init() {
zap.DevelopmentSetup()
// 获取当前文件在哪个目录下
//wd, err := os.Getwd()
//fmt.Println(wd)
//if err != nil {
// panic(err)
//}
kc, err := os.ReadFile(filepath.Join("C:\\Users\\zengz\\.kube", "config"))
if err != nil {
panic(err)
}
client, err = k8s.NewClient(string(kc))
if err != nil {
panic(err)
}
}
测试集群的资源对象列表
func TestServerResources(t *testing.T) {
rs, err := client.ServerResources()
if err != nil {
t.Log(err)
}
for i := range rs {
t.Log(rs[i].GroupVersion, rs[i].APIVersion)
for _, r := range rs[i].APIResources {
t.Log(r.Name)
}
}
}
== RUN TestServerResources
client_test.go:39: v1
client_test.go:41: bindings
client_test.go:41: componentstatuses
client_test.go:41: configmaps
client_test.go:41: endpoints
client_test.go:41: events
client_test.go:41: limitranges
client_test.go:41: namespaces
client_test.go:41: namespaces/finalize
client_test.go:41: namespaces/status
client_test.go:41: nodes
client_test.go:41: nodes/proxy
client_test.go:41: nodes/status
client_test.go:41: persistentvolumeclaims
client_test.go:41: persistentvolumeclaims/status
client_test.go:41: persistentvolumes
client_test.go:41: persistentvolumes/status
client_test.go:41: pods
client_test.go:41: pods/attach
client_test.go:41: pods/binding
client_test.go:41: pods/ephemeralcontainers
client_test.go:41: pods/eviction
client_test.go:41: pods/exec
client_test.go:41: pods/log
client_test.go:41: pods/portforward
client_test.go:41: pods/proxy
client_test.go:41: pods/status
client_test.go:41: podtemplates
client_test.go:41: replicationcontrollers
client_test.go:41: replicationcontrollers/scale
client_test.go:41: replicationcontrollers/status
client_test.go:41: resourcequotas
client_test.go:41: resourcequotas/status
client_test.go:41: secrets
client_test.go:41: serviceaccounts
client_test.go:41: serviceaccounts/token
client_test.go:41: services
client_test.go:41: services/proxy
client_test.go:41: services/status
client_test.go:39: apiregistration.k8s.io/v1 v1
client_test.go:41: apiservices
client_test.go:41: apiservices/status
client_test.go:39: apps/v1 v1
client_test.go:41: controllerrevisions
client_test.go:41: daemonsets
client_test.go:41: daemonsets/status
client_test.go:41: deployments
client_test.go:41: deployments/scale
client_test.go:41: deployments/status
client_test.go:41: replicasets
client_test.go:41: replicasets/scale
client_test.go:41: replicasets/status
client_test.go:41: statefulsets
client_test.go:41: statefulsets/scale
client_test.go:41: statefulsets/status
client_test.go:39: events.k8s.io/v1 v1
client_test.go:41: events
client_test.go:39: authentication.k8s.io/v1 v1
client_test.go:41: tokenreviews
client_test.go:39: authorization.k8s.io/v1 v1
client_test.go:41: localsubjectaccessreviews
client_test.go:41: selfsubjectaccessreviews
client_test.go:41: selfsubjectrulesreviews
client_test.go:41: subjectaccessreviews
client_test.go:39: autoscaling/v2 v1
client_test.go:41: horizontalpodautoscalers
client_test.go:41: horizontalpodautoscalers/status
client_test.go:39: autoscaling/v1 v1
client_test.go:41: horizontalpodautoscalers
client_test.go:41: horizontalpodautoscalers/status
client_test.go:39: autoscaling/v2beta2 v1
client_test.go:41: horizontalpodautoscalers
client_test.go:41: horizontalpodautoscalers/status
client_test.go:39: batch/v1 v1
client_test.go:41: cronjobs
client_test.go:41: cronjobs/status
client_test.go:41: jobs
client_test.go:41: jobs/status
client_test.go:39: certificates.k8s.io/v1 v1
client_test.go:41: certificatesigningrequests
client_test.go:41: certificatesigningrequests/approval
client_test.go:41: certificatesigningrequests/status
client_test.go:39: networking.k8s.io/v1 v1
client_test.go:41: ingressclasses
client_test.go:41: ingresses
client_test.go:41: ingresses/status
client_test.go:41: networkpolicies
client_test.go:41: networkpolicies/status
client_test.go:39: policy/v1 v1
client_test.go:41: poddisruptionbudgets
client_test.go:41: poddisruptionbudgets/status
client_test.go:39: rbac.authorization.k8s.io/v1 v1
client_test.go:41: clusterrolebindings
client_test.go:41: clusterroles
client_test.go:41: rolebindings
client_test.go:41: roles
client_test.go:39: storage.k8s.io/v1 v1
client_test.go:41: csidrivers
client_test.go:41: csinodes
client_test.go:41: csistoragecapacities
client_test.go:41: storageclasses
client_test.go:41: volumeattachments
client_test.go:41: volumeattachments/status
client_test.go:39: storage.k8s.io/v1beta1 v1
client_test.go:41: csistoragecapacities
client_test.go:39: admissionregistration.k8s.io/v1 v1
client_test.go:41: mutatingwebhookconfigurations
client_test.go:41: validatingwebhookconfigurations
client_test.go:39: apiextensions.k8s.io/v1 v1
client_test.go:41: customresourcedefinitions
client_test.go:41: customresourcedefinitions/status
client_test.go:39: scheduling.k8s.io/v1 v1
client_test.go:41: priorityclasses
client_test.go:39: coordination.k8s.io/v1 v1
client_test.go:41: leases
client_test.go:39: node.k8s.io/v1 v1
client_test.go:41: runtimeclasses
client_test.go:39: discovery.k8s.io/v1 v1
client_test.go:41: endpointslices
client_test.go:39: flowcontrol.apiserver.k8s.io/v1beta2 v1
client_test.go:41: flowschemas
client_test.go:41: flowschemas/status
client_test.go:41: prioritylevelconfigurations
client_test.go:41: prioritylevelconfigurations/status
client_test.go:39: flowcontrol.apiserver.k8s.io/v1beta1 v1
client_test.go:41: flowschemas
client_test.go:41: flowschemas/status
client_test.go:41: prioritylevelconfigurations
client_test.go:41: prioritylevelconfigurations/status
client_test.go:39: helm.cattle.io/v1 v1
client_test.go:41: helmchartconfigs
client_test.go:41: helmcharts
client_test.go:39: k3s.cattle.io/v1 v1
client_test.go:41: addons
client_test.go:39: traefik.containo.us/v1alpha1 v1
client_test.go:41: middlewaretcps
client_test.go:41: tlsstores
client_test.go:41: ingressroutetcps
client_test.go:41: serverstransports
client_test.go:41: ingressroutes
client_test.go:41: tlsoptions
client_test.go:41: middlewares
client_test.go:41: ingressrouteudps
client_test.go:41: traefikservices
client_test.go:39: metrics.k8s.io/v1beta1 v1
client_test.go:41: nodes
client_test.go:41: pods
--- PASS: TestServerResources (0.09s)
PASS
测试列出节点
func TestListNode(t *testing.T) {
v, err := client.ListNode(ctx, k8s.NewListRequest())
if err != nil {
t.Fatal(err)
}
for i := range v.Items {
t.Log(v.Items[i].Name)
}
}
=== RUN TestListNode
client_test.go:52: vm-4-9-centos
--- PASS: TestListNode (0.04s)
PASS
测试列出命名空间
func TestListNamespace(t *testing.T) {
v, err := client.ListNamespace(ctx, k8s.NewListRequest())
if err != nil {
t.Log(err)
}
for i := range v.Items {
t.Log(v.Items[i].Name)
}
}
测试创建命名空间
func TestCreateNamespace(t *testing.T) {
ns := &corev1.Namespace{}
ns.Name = "wendao"
v, err := client.CreateNamespace(ctx, ns)
if err != nil {
t.Log(err)
}
t.Log(v.Name)
}
=== RUN TestCreateNamespace
client_test.go:73: wendao
--- PASS: TestCreateNamespace (0.04s)
PASS
测试列出Deployment
func TestListDeployment(t *testing.T) {
req := k8s.NewListRequest()
req.Namespace = "kube-system"
v, err := client.ListDeployment(ctx, req)
if err != nil {
t.Log(err)
}
for i := range v.Items {
item := v.Items[i]
t.Log(item.Namespace, item.Name)
}
}
=== RUN TestListDeployment
client_test.go:95: kube-system coredns
client_test.go:95: kube-system local-path-provisioner
client_test.go:95: kube-system metrics-server
client_test.go:95: kube-system traefik
--- PASS: TestListDeployment (0.05s)
PASS
测试列出Deployment的详细信息
func TestGetDeployment(t *testing.T) {
req := k8s.NewGetRequest("coredns")
req.Namespace = "kube-system"
v, err := client.GetDeployment(ctx, req)
if err != nil {
t.Log(err)
}
// 序列化
yd, err := yaml.Marshal(v)
if err != nil {
t.Log(err)
}
t.Log(string(yd))
}
= RUN TestGetDeployment
client_test.go:112: metadata:
annotations:
deployment.kubernetes.io/revision: "1"
objectset.rio.cattle.io/applied: H4sIAAAAAAAA/6xV3XLbNhN9lW/2mtSPncQKZ76L1HKbTBpVE9m5yXgyK3ApogKxKADKZj18986SlCwnjp10eiUK2D04e7BncQfo9CfyQbOFDNC5MN5NIYGttjlkMCdnuKnIRkigoog5RoTsDtBajhg12yB/ef0nqRgojrzmkcIYDY00j7WAQPLdfb6x5NPNbgsZbE/D0c5umvzvvbb5/9/kOdtnISxWBBko9pTb8EPhwaGSnG29pjQ0IVIFbQIG12S6orazkKJz+5AeVz69pUhB0IZjz9nTfLF64tgSQwkZrBWdzE5PXs9m0+nZi1OcnM5e4frldFKcFK/OqDh7cfJiol6eCZFvSnqCdHCkhHIgQyqyl+8Koyp/f6qaVjKjx0ibRkI8G6Pt5srlGKmHuL2yuENtcG0IsmmbQGycEPj4IFbWqXJmn3fUKU/q2R5xV2wjaks+QPb5DtBv5ANSxbaABMYU1XgQYyyCF9oQXCegK9wII08bHaJvRsqmJdrN3yXXIzS6qa0KI8XV2KNVJflxpb0XoHSA639hgFrWxizZaNVABu+KBcelp9BbwOgdWQph6XndVVqgNrWny9JTKNnkkJ0mUMbofqMo+w6j3Pu4JDSxhAQc+wjZbDKbJBBUSd0dv728XIqG2uqo0czJYLMixTYPkL2aJODIa84PS1NJrpWiEI5OniYQdUVcx/vAx/pIKPQaHyRfdqxenh6ih0jPkRUbyOBqLgyfSUmjcg/TLs8fTXs9PUqsKHqtwiOJ1wl4wlz/K8kls7lXfDqb/qji3wp+8hN6ewpce0Vdzxtd6Rh6T1TspaWmZ5MPGrrAv2oK/a5ytWxNJlU3aIfQPlI8Qqr2OjbnbCPddmWiMXyz9HqnDW3oIig03TyGrEATKAGFDtfa6Kh7Kpjn4qfFxeWXX94t5l9WFx8/vTu/EAvlnp3soTFw3fai/2FN85E5/qoNDYMmi76mNoEdm7qiD1zboY8q+VwOuh/5FI66zxZ6k/aZcH/CHvP7GGNVh8jVEVT3P30G8VqaJ7fh4OQ5FVgbMbHlnFZHg3JNEUcP5zoHyMBoW9/KRTmvuVPfYAiLnkUvSapMHSL5VHkdtUIDcld+pxW9UUoqWnztvsiG/P7l/HwHWxJ250N+99qFro4E2EmkkISLWy2dIkJRUZCKkMGCV6qkvDZSfg8jpaWeDX1Vj9jPs0mdQUv/KXKFUv/jkNdSrWPDm2bl5H7O2cp7o/d9070Nq59+syq8XW3ppnfgcMD7juVDbiWH2DVNAjcl2SsbMOpQ6P4xgzkvOB4KFbZ9Mx1mY6E3H9AJER2penBd+/cn2Y+bw4oI2QctOKe3LEocou6X5LivJnP7HbcM8/OezcO89GAQdtJWaA5Gfcox7XXbtu0/AQAA///KZUuFAQoAAA
objectset.rio.cattle.io/id: ""
objectset.rio.cattle.io/owner-gvk: k3s.cattle.io/v1, Kind=Addon
objectset.rio.cattle.io/owner-name: coredns
objectset.rio.cattle.io/owner-namespace: kube-system
creationTimestamp: "2023-01-13T04:46:15Z"
generation: 1
labels:
k8s-app: kube-dns
kubernetes.io/name: CoreDNS
objectset.rio.cattle.io/hash: bce283298811743a0386ab510f2f67ef74240c57
managedFields:
- apiVersion: apps/v1
fieldsType: FieldsV1
fieldsV1:
f:metadata:
f:annotations:
.: {}
f:objectset.rio.cattle.io/applied: {}
f:objectset.rio.cattle.io/id: {}
f:objectset.rio.cattle.io/owner-gvk: {}
f:objectset.rio.cattle.io/owner-name: {}
f:objectset.rio.cattle.io/owner-namespace: {}
f:labels:
.: {}
f:k8s-app: {}
f:kubernetes.io/name: {}
f:objectset.rio.cattle.io/hash: {}
f:spec:
f:progressDeadlineSeconds: {}
f:replicas: {}
f:revisionHistoryLimit: {}
f:selector: {}
f:strategy:
f:rollingUpdate:
.: {}
f:maxSurge: {}
f:maxUnavailable: {}
f:type: {}
f:template:
f:metadata:
f:labels:
.: {}
f:k8s-app: {}
f:spec:
f:containers:
k:{"name":"coredns"}:
.: {}
f:args: {}
f:image: {}
f:imagePullPolicy: {}
f:livenessProbe:
.: {}
f:failureThreshold: {}
f:httpGet:
.: {}
f:path: {}
f:port: {}
f:scheme: {}
f:initialDelaySeconds: {}
f:periodSeconds: {}
f:successThreshold: {}
f:timeoutSeconds: {}
f:name: {}
f:ports:
.: {}
k:{"containerPort":53,"protocol":"TCP"}:
.: {}
f:containerPort: {}
f:name: {}
f:protocol: {}
k:{"containerPort":53,"protocol":"UDP"}:
.: {}
f:containerPort: {}
f:name: {}
f:protocol: {}
k:{"containerPort":9153,"protocol":"TCP"}:
.: {}
f:containerPort: {}
f:name: {}
f:protocol: {}
f:readinessProbe:
.: {}
f:failureThreshold: {}
f:httpGet:
.: {}
f:path: {}
f:port: {}
f:scheme: {}
f:periodSeconds: {}
f:successThreshold: {}
f:timeoutSeconds: {}
f:resources:
.: {}
f:limits:
.: {}
f:memory: {}
f:requests:
.: {}
f:cpu: {}
f:memory: {}
f:securityContext:
.: {}
f:allowPrivilegeEscalation: {}
f:capabilities:
.: {}
f:add: {}
f:drop: {}
f:readOnlyRootFilesystem: {}
f:terminationMessagePath: {}
f:terminationMessagePolicy: {}
f:volumeMounts:
.: {}
k:{"mountPath":"/etc/coredns"}:
.: {}
f:mountPath: {}
f:name: {}
f:readOnly: {}
k:{"mountPath":"/etc/coredns/custom"}:
.: {}
f:mountPath: {}
f:name: {}
f:readOnly: {}
f:dnsPolicy: {}
f:nodeSelector: {}
f:priorityClassName: {}
f:restartPolicy: {}
f:schedulerName: {}
f:securityContext: {}
f:serviceAccount: {}
f:serviceAccountName: {}
f:terminationGracePeriodSeconds: {}
f:tolerations: {}
f:topologySpreadConstraints:
.: {}
k:{"topologyKey":"kubernetes.io/hostname","whenUnsatisfiable":"DoNotSchedule"}:
.: {}
f:labelSelector: {}
f:maxSkew: {}
f:topologyKey: {}
f:whenUnsatisfiable: {}
f:volumes:
.: {}
k:{"name":"config-volume"}:
.: {}
f:configMap:
.: {}
f:defaultMode: {}
f:items: {}
f:name: {}
f:name: {}
k:{"name":"custom-config-volume"}:
.: {}
f:configMap:
.: {}
f:defaultMode: {}
f:name: {}
f:optional: {}
f:name: {}
manager: deploy@vm-4-9-centos
operation: Update
time: "2023-01-13T04:46:15Z"
- apiVersion: apps/v1
fieldsType: FieldsV1
fieldsV1:
f:metadata:
f:annotations:
f:deployment.kubernetes.io/revision: {}
f:status:
f:availableReplicas: {}
f:conditions:
.: {}
k:{"type":"Available"}:
.: {}
f:lastTransitionTime: {}
f:lastUpdateTime: {}
f:message: {}
f:reason: {}
f:status: {}
f:type: {}
k:{"type":"Progressing"}:
.: {}
f:lastTransitionTime: {}
f:lastUpdateTime: {}
f:message: {}
f:reason: {}
f:status: {}
f:type: {}
f:observedGeneration: {}
f:readyReplicas: {}
f:replicas: {}
f:updatedReplicas: {}
manager: k3s
operation: Update
subresource: status
time: "2023-01-13T04:46:32Z"
name: coredns
namespace: kube-system
resourceVersion: "458"
uid: 786a3f23-6dad-49fc-b990-48dbc69be5c5
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kube-dns
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
k8s-app: kube-dns
spec:
containers:
- args:
- -conf
- /etc/coredns/Corefile
image: registry.cn-hangzhou.aliyuncs.com/rancher/mirrored-coredns-coredns:1.9.4
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: coredns
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /ready
port: 8181
scheme: HTTP
periodSeconds: 2
successThreshold: 1
timeoutSeconds: 1
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /etc/coredns
name: config-volume
readOnly: true
- mountPath: /etc/coredns/custom
name: custom-config-volume
readOnly: true
dnsPolicy: Default
nodeSelector:
beta.kubernetes.io/os: linux
priorityClassName: system-cluster-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: coredns
serviceAccountName: coredns
terminationGracePeriodSeconds: 30
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
topologySpreadConstraints:
- labelSelector:
matchLabels:
k8s-app: kube-dns
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
volumes:
- configMap:
defaultMode: 420
items:
- key: Corefile
path: Corefile
- key: NodeHosts
path: NodeHosts
name: coredns
name: config-volume
- configMap:
defaultMode: 420
name: coredns-custom
optional: true
name: custom-config-volume
status:
availableReplicas: 1
conditions:
- lastTransitionTime: "2023-01-13T04:46:26Z"
lastUpdateTime: "2023-01-13T04:46:26Z"
message: Deployment has minimum availability.
reason: MinimumReplicasAvailable
status: "True"
type: Available
- lastTransitionTime: "2023-01-13T04:46:26Z"
lastUpdateTime: "2023-01-13T04:46:32Z"
message: ReplicaSet "coredns-fd7f5dc55" has successfully progressed.
reason: NewReplicaSetAvailable
status: "True"
type: Progressing
observedGeneration: 1
readyReplicas: 1
replicas: 1
updatedReplicas: 1
--- PASS: TestGetDeployment (0.05s)
PASS
测试创建Deployment
func TestCreateDeployment(t *testing.T) {
req := &v1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "nginx",
Namespace: "wendao",
},
Spec: v1.DeploymentSpec{
Replicas: tea.Int32(2),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"k8s-app": "nginx"},
},
Strategy: v1.DeploymentStrategy{
Type: v1.RollingUpdateDeploymentStrategyType,
RollingUpdate: &v1.RollingUpdateDeployment{
MaxSurge: k8s.NewIntStr(1),
MaxUnavailable: k8s.NewIntStr(0),
},
},
// Pod模板参数
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{},
Labels: map[string]string{
"k8s-app": "nginx",
},
},
Spec: corev1.PodSpec{
// Pod参数
DNSPolicy: corev1.DNSClusterFirst,
RestartPolicy: corev1.RestartPolicyAlways,
SchedulerName: "default-scheduler",
TerminationGracePeriodSeconds: tea.Int64(30),
// Container参数
Containers: []corev1.Container{
{
Name: "nginx",
Image: "nginx:latest",
ImagePullPolicy: corev1.PullAlways,
Env: []corev1.EnvVar{
{Name: "APP_NAME", Value: "nginx"},
{Name: "APP_VERSION", Value: "v1"},
},
Resources: corev1.ResourceRequirements{
Limits: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("500m"),
corev1.ResourceMemory: resource.MustParse("1Gi"),
},
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("50m"),
corev1.ResourceMemory: resource.MustParse("50Mi"),
},
},
},
},
},
},
},
}
yamlReq, err := yaml.Marshal(req)
if err != nil {
t.Fatal(err)
}
fmt.Println(string(yamlReq))
d, err := client.CreateDeployment(ctx, req)
if err != nil {
t.Log(err)
}
t.Log(d)
}
=== RUN TestCreateDeployment
metadata:
creationTimestamp: null
name: nginx
namespace: wendao
spec:
replicas: 2
selector:
matchLabels:
k8s-app: nginx
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
k8s-app: nginx
spec:
containers:
- env:
- name: APP_NAME
value: nginx
- name: APP_VERSION
value: v1
image: nginx:latest
imagePullPolicy: Always
name: nginx
resources:
limits:
cpu: 500m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
terminationGracePeriodSeconds: 30
status: {}
client_test.go:185: &Deployment{ObjectMeta:{nginx wendao fd89001f-7def-425d-a4e0-cea4f4a85407 265132 1 2023-01-28 17:26:18 +0800 CST <nil> <nil> map[] map[] [] [] [{Go-http-client Update apps/v1 2023-01-28 17:26:18 +0800 CST FieldsV1 {"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:k8s-app":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"nginx\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"APP_NAME\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"APP_VERSION\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{".":{},"f:limits":{".":{},"f:cpu":{},"f:memory":{}},"f:requests":{".":{},"f:cpu":{},"f:memory":{}}},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} }]},Spec:DeploymentSpec{Replicas:*2,Selector:&v1.LabelSelector{MatchLabels:map[string]string{k8s-app: nginx,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[k8s-app:nginx] map[] [] [] []} {[] [] [{nginx nginx:latest [] [] [] [] [{APP_NAME nginx nil} {APP_VERSION v1 nil}] {map[cpu:{{500 -3} {<nil>} 500m DecimalSI} memory:{{1073741824 0} {<nil>} 1Gi BinarySI}] map[cpu:{{50 -3} {<nil>} 50m DecimalSI} memory:{{52428800 0} {<nil>} 50Mi BinarySI}]} [] [] nil nil nil nil /dev/termination-log File Always nil false false false}] [] Always 0xc0003d4570 <nil> ClusterFirst map[] <nil> false false false <nil> PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] <nil> nil [] <nil> <nil> <nil> map[] [] <nil> nil}},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:0,MaxSurge:1,},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:0,Replicas:0,UpdatedReplicas:0,AvailableReplicas:0,UnavailableReplicas:0,Conditions:[]DeploymentCondition{},ReadyReplicas:0,CollisionCount:nil,},}
--- PASS: TestCreateDeployment (0.06s)
PASS
[root@VM-4-9-centos ~]# kubectl get deployment -n wendao -o wide
NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
nginx 2/2 2 2 81s nginx nginx:latest k8s-app=nginx
测试对Deployment的副本数进行伸缩
func TestScaleDeployment(t *testing.T) {
req := k8s.NewScaleRequest()
req.Scale.Namespace = "wendao"
req.Scale.Name = "nginx"
req.Scale.Spec.Replicas = 1
v, err := client.ScaleDeployment(ctx, req)
if err != nil {
t.Fatal(err)
}
// 序列化
yd, err := yaml.Marshal(v)
if err != nil {
t.Fatal(err)
}
t.Log(string(yd))
}
=== RUN TestScaleDeployment
client_test.go:203: metadata:
creationTimestamp: "2023-01-28T09:26:18Z"
name: nginx
namespace: wendao
resourceVersion: "265186"
uid: fd89001f-7def-425d-a4e0-cea4f4a85407
spec:
replicas: 1
status:
replicas: 2
selector: k8s-app=nginx
--- PASS: TestScaleDeployment (0.04s)
PASS
[root@VM-4-9-centos ~]# kubectl get deployment -n wendao -o wide
NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
nginx 1/1 1 1 2m27s nginx nginx:latest k8s-app=nginx
测试列出pod信息
func TestListPod(t *testing.T) {
req := k8s.NewListRequest()
req.Namespace = "kube-system"
req.Opts.LabelSelector = "k8s-app=kube-dns"
pods, err := client.ListPod(ctx, req)
if err != nil {
t.Fatal(err)
}
// 序列化
for _, v := range pods.Items {
t.Log(v.Namespace, v.Name)
}
}
=== RUN TestListPod
client_test.go:233: kube-system coredns-fd7f5dc55-74d5d
--- PASS: TestListPod (0.05s)
PASS
测试获取pod的详细信息
func TestGetPod(t *testing.T) {
req := k8s.NewGetRequest("coredns-fd7f5dc55-74d5d")
req.Namespace = "kube-system"
pods, err := client.GetPod(ctx, req)
if err != nil {
t.Fatal(err)
}
// 序列化
yd, err := yaml.Marshal(pods)
if err != nil {
t.Fatal(err)
}
t.Log(string(yd))
}
=== RUN TestGetPod
client_test.go:250: metadata:
creationTimestamp: "2023-01-13T04:46:26Z"
generateName: coredns-fd7f5dc55-
labels:
k8s-app: kube-dns
pod-template-hash: fd7f5dc55
managedFields:
- apiVersion: v1
fieldsType: FieldsV1
fieldsV1:
f:metadata:
f:generateName: {}
f:labels:
.: {}
f:k8s-app: {}
f:pod-template-hash: {}
f:ownerReferences:
.: {}
k:{"uid":"2ec085ae-9d9d-4fa6-8780-3106656025ea"}: {}
f:spec:
f:containers:
k:{"name":"coredns"}:
.: {}
f:args: {}
f:image: {}
f:imagePullPolicy: {}
f:livenessProbe:
.: {}
f:failureThreshold: {}
f:httpGet:
.: {}
f:path: {}
f:port: {}
测试删除Deployment
func TestDeleteDeployment(t *testing.T) {
req := k8s.NewDeleteRequest("nginx")
req.Namespace = "wendao"
err := client.DeleteDeployment(ctx, req)
if err != nil {
t.Fatal(err)
}
}
=== RUN TestDeleteDeployment
--- PASS: TestDeleteDeployment (0.03s)
PASS
[root@VM-4-9-centos ~]# kubectl get deployment -n wendao -o wide
No resources found in wendao namespace.
总结
对集群的资源对象的增删改查的测试都通过了,操作还是比较简单的,基本上就是调接口
评论区