Skip to content
28 changes: 18 additions & 10 deletions cmd/nginx-supportpkg.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,17 +31,17 @@ import (

func Execute() {

var namespaces []string
var product string
var jobList []jobs.Job
collector := data_collector.DataCollector{}

var rootCmd = &cobra.Command{
Use: "nginx-supportpkg",
Short: "nginx-supportpkg - a tool to create Ingress Controller diagnostics package",
Long: `nginx-supportpkg - a tool to create Ingress Controller diagnostics package`,
Run: func(cmd *cobra.Command, args []string) {

collector, err := data_collector.NewDataCollector(namespaces...)
err := data_collector.NewDataCollector(&collector)
if err != nil {
fmt.Println(fmt.Errorf("unable to start data collector: %s", err))
os.Exit(1)
Expand All @@ -57,21 +57,25 @@ func Execute() {
jobList = slices.Concat(jobs.CommonJobList(), jobs.NGFJobList())
case "ngx":
jobList = slices.Concat(jobs.CommonJobList(), jobs.NGXJobList())
case "nim":
jobList = slices.Concat(jobs.CommonJobList(), jobs.NIMJobList())
default:
fmt.Printf("Error: product must be in the following list: [nic, ngf, ngx]\n")
fmt.Printf("Error: product must be in the following list: [nic, ngf, ngx, nim]\n")
os.Exit(1)
}

if collector.AllNamespacesExist() {
failedJobs := 0
for _, job := range jobList {
fmt.Printf("Running job %s...", job.Name)
err = job.Collect(collector)
if err != nil {
fmt.Printf(" Error: %s\n", err)
err, Skipped := job.Collect(&collector)
if Skipped {
fmt.Print(" SKIPPED\n")
} else if err != nil {
fmt.Printf(" FAILED: %s\n", err)
failedJobs++
} else {
fmt.Print(" OK\n")
fmt.Print(" COMPLETED\n")
}
}

Expand All @@ -94,7 +98,7 @@ func Execute() {
},
}

rootCmd.Flags().StringSliceVarP(&namespaces, "namespace", "n", []string{}, "list of namespaces to collect information from")
rootCmd.Flags().StringSliceVarP(&collector.Namespaces, "namespace", "n", []string{}, "list of namespaces to collect information from")
if err := rootCmd.MarkFlagRequired("namespace"); err != nil {
fmt.Println(err)
os.Exit(1)
Expand All @@ -106,6 +110,9 @@ func Execute() {
os.Exit(1)
}

rootCmd.Flags().BoolVarP(&collector.ExcludeDBData, "exclude-db-data", "d", false, "exclude DB data collection")
rootCmd.Flags().BoolVarP(&collector.ExcludeTimeSeriesData, "exclude-time-series-data", "t", false, "exclude time series data collection")

versionStr := "nginx-supportpkg - version: " + version.Version + " - build: " + version.Build + "\n"
rootCmd.SetVersionTemplate(versionStr)
rootCmd.Version = versionStr
Expand All @@ -115,8 +122,9 @@ func Execute() {
"Usage:" +
"\n nginx-supportpkg -h|--help" +
"\n nginx-supportpkg -v|--version" +
"\n nginx-supportpkg [-n|--namespace] ns1 [-n|--namespace] ns2 [-p|--product] [nic,ngf,ngx]" +
"\n nginx-supportpkg [-n|--namespace] ns1,ns2 [-p|--product] [nic,ngf,ngx] \n")
"\n nginx-supportpkg [-n|--namespace] ns1 [-n|--namespace] ns2 [-p|--product] [nic,ngf,ngx,nim]" +
"\n nginx-supportpkg [-n|--namespace] ns1,ns2 [-p|--product] [nic,ngf,ngx,nim]" +
"\n nginx-supportpkg [-n|--namespace] ns1 [-n|--namespace] ns2 [-p|--product] [nim] [-d|--exclude-db-data] [-t|--exclude-time-series-data] \n")

if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
Expand Down
68 changes: 34 additions & 34 deletions pkg/data_collector/data_collector.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,15 @@ import (
"compress/gzip"
"context"
"fmt"
"io"
"log"
"os"
"path/filepath"
"strconv"
"time"

helmClient "github.com/mittwald/go-helm-client"
"github.com/nginxinc/nginx-k8s-supportpkg/pkg/crds"
"io"
corev1 "k8s.io/api/core/v1"
crdClient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand All @@ -38,35 +44,32 @@ import (
"k8s.io/client-go/tools/remotecommand"
"k8s.io/client-go/util/homedir"
metricsClient "k8s.io/metrics/pkg/client/clientset/versioned"
"log"
"os"
"path/filepath"
"strconv"
"time"
)

type DataCollector struct {
BaseDir string
Namespaces []string
Logger *log.Logger
LogFile *os.File
K8sRestConfig *rest.Config
K8sCoreClientSet *kubernetes.Clientset
K8sCrdClientSet *crdClient.Clientset
K8sMetricsClientSet *metricsClient.Clientset
K8sHelmClientSet map[string]helmClient.Client
BaseDir string
Namespaces []string
Logger *log.Logger
LogFile *os.File
K8sRestConfig *rest.Config
K8sCoreClientSet *kubernetes.Clientset
K8sCrdClientSet *crdClient.Clientset
K8sMetricsClientSet *metricsClient.Clientset
K8sHelmClientSet map[string]helmClient.Client
ExcludeDBData bool
ExcludeTimeSeriesData bool
}

func NewDataCollector(namespaces ...string) (*DataCollector, error) {
func NewDataCollector(collector *DataCollector) error {

tmpDir, err := os.MkdirTemp("", "-pkg-diag")
if err != nil {
return nil, fmt.Errorf("unable to create temp directory: %s", err)
return fmt.Errorf("unable to create temp directory: %s", err)
}

logFile, err := os.OpenFile(filepath.Join(tmpDir, "supportpkg.log"), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nil, fmt.Errorf("unable to create log file: %s", err)
return fmt.Errorf("unable to create log file: %s", err)
}

// Find config
Expand All @@ -77,30 +80,27 @@ func NewDataCollector(namespaces ...string) (*DataCollector, error) {
config, err := clientcmd.BuildConfigFromFlags("", kubeConfig)

if err != nil {
return nil, fmt.Errorf("unable to connect to k8s using file %s: %s", kubeConfig, err)
}

dc := DataCollector{
BaseDir: tmpDir,
Namespaces: namespaces,
LogFile: logFile,
Logger: log.New(logFile, "", log.LstdFlags|log.LUTC|log.Lmicroseconds|log.Lshortfile),
K8sHelmClientSet: make(map[string]helmClient.Client),
return fmt.Errorf("unable to connect to k8s using file %s: %s", kubeConfig, err)
}
// Set up the DataCollector options
collector.BaseDir = tmpDir
collector.LogFile = logFile
collector.Logger = log.New(logFile, "", log.LstdFlags|log.LUTC|log.Lmicroseconds|log.Lshortfile)
collector.K8sHelmClientSet = make(map[string]helmClient.Client)

//Initialize clients
dc.K8sRestConfig = config
dc.K8sCoreClientSet, _ = kubernetes.NewForConfig(config)
dc.K8sCrdClientSet, _ = crdClient.NewForConfig(config)
dc.K8sMetricsClientSet, _ = metricsClient.NewForConfig(config)
for _, namespace := range dc.Namespaces {
dc.K8sHelmClientSet[namespace], _ = helmClient.NewClientFromRestConf(&helmClient.RestConfClientOptions{
collector.K8sRestConfig = config
collector.K8sCoreClientSet, _ = kubernetes.NewForConfig(config)
collector.K8sCrdClientSet, _ = crdClient.NewForConfig(config)
collector.K8sMetricsClientSet, _ = metricsClient.NewForConfig(config)
for _, namespace := range collector.Namespaces {
collector.K8sHelmClientSet[namespace], _ = helmClient.NewClientFromRestConf(&helmClient.RestConfClientOptions{
Options: &helmClient.Options{Namespace: namespace},
RestConfig: config,
})
}

return &dc, nil
return nil
}

func (c *DataCollector) WrapUp(product string) (string, error) {
Expand Down
92 changes: 89 additions & 3 deletions pkg/jobs/common_job_list.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,13 @@ import (
"context"
"encoding/json"
"fmt"
"github.com/nginxinc/nginx-k8s-supportpkg/pkg/data_collector"
"io"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"path/filepath"
"time"

"github.com/nginxinc/nginx-k8s-supportpkg/pkg/data_collector"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

func CommonJobList() []Job {
Expand Down Expand Up @@ -88,6 +89,91 @@ func CommonJobList() []Job {
ch <- jobResult
},
},
{
Name: "pv-list",
Timeout: time.Second * 10,
Execute: func(dc *data_collector.DataCollector, ctx context.Context, ch chan JobResult) {
jobResult := JobResult{Files: make(map[string][]byte), Error: nil}
for _, namespace := range dc.Namespaces {
result, err := dc.K8sCoreClientSet.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{})
if err != nil {
dc.Logger.Printf("\tCould not retrieve persistent volumes list %s: %v\n", namespace, err)
} else {
jsonResult, _ := json.MarshalIndent(result, "", " ")
jobResult.Files[filepath.Join(dc.BaseDir, "resources", namespace, "persistentvolumes.json")] = jsonResult
}
}
ch <- jobResult
},
},
{
Name: "pvc-list",
Timeout: time.Second * 10,
Execute: func(dc *data_collector.DataCollector, ctx context.Context, ch chan JobResult) {
jobResult := JobResult{Files: make(map[string][]byte), Error: nil}
for _, namespace := range dc.Namespaces {
result, err := dc.K8sCoreClientSet.CoreV1().PersistentVolumeClaims(namespace).List(ctx, metav1.ListOptions{})
if err != nil {
dc.Logger.Printf("\tCould not retrieve persistent volume claims list %s: %v\n", namespace, err)
} else {
jsonResult, _ := json.MarshalIndent(result, "", " ")
jobResult.Files[filepath.Join(dc.BaseDir, "resources", namespace, "persistentvolumeclaims.json")] = jsonResult
}
}
ch <- jobResult
},
},
{
Name: "sc-list",
Timeout: time.Second * 10,
Execute: func(dc *data_collector.DataCollector, ctx context.Context, ch chan JobResult) {
jobResult := JobResult{Files: make(map[string][]byte), Error: nil}
for _, namespace := range dc.Namespaces {
result, err := dc.K8sCoreClientSet.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
if err != nil {
dc.Logger.Printf("\tCould not retrieve storage classes list %s: %v\n", namespace, err)
} else {
jsonResult, _ := json.MarshalIndent(result, "", " ")
jobResult.Files[filepath.Join(dc.BaseDir, "resources", namespace, "storageclasses.json")] = jsonResult
}
}
ch <- jobResult
},
},
{
Name: "apiresources-list",
Timeout: time.Second * 10,
Execute: func(dc *data_collector.DataCollector, ctx context.Context, ch chan JobResult) {
jobResult := JobResult{Files: make(map[string][]byte), Error: nil}
for _, namespace := range dc.Namespaces {
result, err := dc.K8sCoreClientSet.DiscoveryClient.ServerPreferredResources()
if err != nil {
dc.Logger.Printf("\tCould not retrieve API resources list %s: %v\n", namespace, err)
} else {
jsonResult, _ := json.MarshalIndent(result, "", " ")
jobResult.Files[filepath.Join(dc.BaseDir, "resources", namespace, "apiresources.json")] = jsonResult
}
}
ch <- jobResult
},
},
{
Name: "apiversions-list",
Timeout: time.Second * 10,
Execute: func(dc *data_collector.DataCollector, ctx context.Context, ch chan JobResult) {
jobResult := JobResult{Files: make(map[string][]byte), Error: nil}
for _, namespace := range dc.Namespaces {
result, err := dc.K8sCoreClientSet.DiscoveryClient.ServerGroups()
if err != nil {
dc.Logger.Printf("\tCould not retrieve API versions list %s: %v\n", namespace, err)
} else {
jsonResult, _ := json.MarshalIndent(result, "", " ")
jobResult.Files[filepath.Join(dc.BaseDir, "resources", namespace, "apiversions.json")] = jsonResult
}
}
ch <- jobResult
},
},
{
Name: "events-list",
Timeout: time.Second * 10,
Expand Down
25 changes: 15 additions & 10 deletions pkg/jobs/job.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,12 @@ package jobs

import (
"context"
"errors"
"fmt"
"github.com/nginxinc/nginx-k8s-supportpkg/pkg/data_collector"
"os"
"path/filepath"
"time"

"github.com/nginxinc/nginx-k8s-supportpkg/pkg/data_collector"
)

type Job struct {
Expand All @@ -35,11 +35,12 @@ type Job struct {
}

type JobResult struct {
Files map[string][]byte
Error error
Files map[string][]byte
Error error
Skipped bool
}

func (j Job) Collect(dc *data_collector.DataCollector) error {
func (j Job) Collect(dc *data_collector.DataCollector) (error, bool) {
ch := make(chan JobResult, 1)

ctx, cancel := context.WithTimeout(context.Background(), j.Timeout)
Expand All @@ -51,28 +52,32 @@ func (j Job) Collect(dc *data_collector.DataCollector) error {
select {
case <-ctx.Done():
dc.Logger.Printf("\tJob %s has timed out: %s\n---\n", j.Name, ctx.Err())
return errors.New(fmt.Sprintf("Context cancelled: %v", ctx.Err()))
return fmt.Errorf("Context cancelled: %v", ctx.Err()), false

case jobResults := <-ch:
if jobResults.Skipped {
dc.Logger.Printf("\tJob %s has been skipped\n---\n", j.Name)
return nil, true
}
if jobResults.Error != nil {
dc.Logger.Printf("\tJob %s has failed: %s\n", j.Name, jobResults.Error)
return jobResults.Error
return jobResults.Error, false
}

for fileName, fileValue := range jobResults.Files {
err := os.MkdirAll(filepath.Dir(fileName), os.ModePerm)
if err != nil {
return fmt.Errorf("MkdirAll failed: %v", err)
return fmt.Errorf("MkdirAll failed: %v", err), jobResults.Skipped
}
file, _ := os.Create(fileName)
_, err = file.Write(fileValue)
if err != nil {
return fmt.Errorf("Write failed: %v", err)
return fmt.Errorf("Write failed: %v", err), jobResults.Skipped
}
_ = file.Close()
dc.Logger.Printf("\tJob %s wrote %d bytes to %s\n", j.Name, len(fileValue), fileName)
}
dc.Logger.Printf("\tJob %s completed successfully\n---\n", j.Name)
return nil
return nil, jobResults.Skipped
}
}
Loading