diff --git a/.github/workflows/e2e-test.yaml b/.github/workflows/e2e-test.yaml deleted file mode 100644 index 5d2ef09da..000000000 --- a/.github/workflows/e2e-test.yaml +++ /dev/null @@ -1,316 +0,0 @@ -name: e2e-test -on: - pull_request: - push: - branches: - - main - workflow_dispatch: - inputs: - kernel-image: - type: string - description: 'The kernel image to use for the VMs. If not specified, a kernel will be built from source' - required: false - cluster: - type: choice - description: 'The cluster to run the tests on' - options: - - k3d - - kind - default: k3d - workflow_call: - inputs: - tag: - type: string - description: 'Tag to use for images, skipping building' - required: false - push-yamls: - type: boolean - description: 'If true, pushes a tarball containing the rendered yaml manifests as an artifact' - required: false - -env: - IMG_E2E_TEST: vm-postgres:15-bullseye - -defaults: - run: - shell: bash -euo pipefail {0} - -jobs: - get-tag: - outputs: - tag: ${{ inputs.tag || steps.get-tag.outputs.tag }} - runs-on: ubuntu-latest - steps: - - name: Harden Runner - uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0 - with: - egress-policy: audit - - - name: get tag - if: ${{ inputs.tag == '' }} - id: get-tag - env: - SHA: ${{ github.event.pull_request.head.sha || github.sha }} - run: | - test -n "$SHA" - sha="${SHA::7}" - echo "tag=$sha.$GITHUB_RUN_ID" | tee -a $GITHUB_OUTPUT - - build-images: - needs: get-tag - uses: ./.github/workflows/build-images.yaml - with: - skip: ${{ inputs.tag != '' }} - tag: ${{ inputs.tag || needs.get-tag.outputs.tag }} - kernel-image: ${{ inputs.kernel-image }} - # note: setting to preserve runner pods will mean that if !skip, they'll be built with those - # settings and used properly in the tests. But if skip (because inputs.tag != ''), then this - # setting will have no effect and the release images will be normal. - controller-preserve-runner-pods: true - archs: '["amd64","arm64"]' - secrets: inherit - - build-test-vm: - needs: get-tag - uses: ./.github/workflows/build-test-vm.yaml - with: - skip: ${{ inputs.tag != '' }} - tag: ${{ inputs.tag || needs.get-tag.outputs.tag }} - archs: '["amd64","arm64"]' - secrets: inherit - - e2e-tests: - needs: [ build-images, build-test-vm ] - strategy: - fail-fast: false - matrix: - cluster: - - ${{ inputs.cluster || 'k3d' }} - arch: [ amd64, arm64 ] - include: - # we have an assumption that manifests for different architectures are the same, so we - # only need to push one at a time, and we push the amd64 manifests. - - arch: amd64 - # nb: use format(..) to catch both inputs.push-yamls = true AND inputs.push-yamls = 'true'. - push-yamls: ${{ inputs.push-yamls }} - - arch: arm64 - push-yamls: false - - runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', matrix.arch == 'arm64' && 'huge-arm64' || 'large')) }} - - steps: - - name: Harden Runner - uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0 - with: - egress-policy: audit - - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - with: - fetch-depth: 0 # fetch all, so that we also include tags - - - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 - with: - go-version-file: 'go.mod' - # Disable cache on self-hosted runners to avoid /usr/bin/tar errors, see https://github.com/actions/setup-go/issues/403 - cache: false - # Sometimes setup-go gets stuck. Without this, it'll keep going until the job gets killed - timeout-minutes: 10 - - - name: Install dependencies - run: | - sudo apt install -y python3-venv - make e2e-tools - echo $(pwd)/bin >> $GITHUB_PATH - - - name: Check dependencies - run: | - kubectl version --client --output=yaml - k3d version - kind version - kuttl version - docker version - - - run: make render-release - env: - IMG_CONTROLLER: ${{ needs.build-images.outputs.controller }} - IMG_VXLAN_CONTROLLER: ${{ needs.build-images.outputs.vxlan-controller }} - IMG_RUNNER: ${{ needs.build-images.outputs.runner }} - IMG_SCHEDULER: ${{ needs.build-images.outputs.scheduler }} - IMG_AUTOSCALER_AGENT: ${{ needs.build-images.outputs.autoscaler-agent }} - - - name: upload manifests - # nb: use format(..) to catch both true AND 'true'. - if: ${{ format('{0}', matrix.push-yamls) == 'true' }} - uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1 - with: - name: rendered_manifests - # nb: prefix before wildcard is removed from the uploaded files, so the artifact should - # contain e.g. - # - autoscale-scheduler.yaml - # - autoscaler-agent.yaml - # ... - # ref https://github.com/actions/upload-artifact#upload-using-multiple-paths-and-exclusions - path: rendered_manifests/* - if-no-files-found: error - retention-days: 2 # minimum is 1 day; 0 is default. These are only used temporarily. - - - name: set custom docker config directory - uses: neondatabase/dev-actions/set-docker-config-dir@6094485bf440001c94a94a3f9e221e81ff6b6193 - - - uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0 - with: - username: ${{ secrets.NEON_DOCKERHUB_USERNAME }} - password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }} - - # https://docs.k3s.io/installation/private-registry#registries-configuration-file - # https://github.com/neondatabase/autoscaling/issues/975 - - name: set k3d registries.yaml - # TODO: Implement an equivalent for kind? - # Relevant docs seem to be here: https://kind.sigs.k8s.io/docs/user/private-registries - if: ${{ matrix.cluster == 'k3d' }} - env: - DOCKERHUB_USERNAME: ${{ secrets.NEON_DOCKERHUB_USERNAME }} - DOCKERHUB_PASSWORD: ${{ secrets.NEON_DOCKERHUB_PASSWORD }} - run: | - { - echo "configs:" - echo " registry-1.docker.io:" - echo " auth:" - echo " username: $DOCKERHUB_USERNAME" - echo " password: $DOCKERHUB_PASSWORD" - } >> $(pwd)/k3d/registries.yaml - - - - run: make ${{ matrix.cluster }}-setup - env: - USE_REGISTRIES_FILE: true - - - name: deploy components - timeout-minutes: 3 - run: | - rendered () { echo "rendered_manifests/$1"; } - - kubectl apply -f $(rendered multus-dev.yaml) - kubectl -n kube-system rollout status daemonset kube-multus-ds - kubectl apply -f $(rendered whereabouts.yaml) - kubectl -n kube-system rollout status daemonset whereabouts - kubectl apply -f $(rendered neonvm-runner-image-loader.yaml) - kubectl -n neonvm-system rollout status daemonset neonvm-runner-image-loader - kubectl apply -f $(rendered neonvm.yaml) - kubectl -n neonvm-system rollout status daemonset neonvm-device-plugin - kubectl apply -f $(rendered neonvm-controller.yaml) - kubectl -n neonvm-system rollout status deployment neonvm-controller - kubectl apply -f $(rendered neonvm-vxlan-controller.yaml) - kubectl -n neonvm-system rollout status daemonset neonvm-vxlan-controller - kubectl apply -f $(rendered autoscale-scheduler.yaml) - kubectl -n kube-system rollout status deployment autoscale-scheduler - kubectl apply -f $(rendered autoscaler-agent.yaml) - kubectl -n kube-system rollout status daemonset autoscaler-agent - - - name: load e2e test vm image - env: - TEST_IMAGE: ${{ needs.build-test-vm.outputs.vm-postgres-16-bullseye }} - timeout-minutes: 2 - run: | - # Pull the docker image so we can re-tag it, because using a consistent tag inside the - # cluster means we can avoid dynamically editing the image used in the kuttl files. - docker pull "$TEST_IMAGE" - docker image tag "$TEST_IMAGE" "$IMG_E2E_TEST" - make load-example-vms - - - run: make arm_patch_e2e - if: matrix.arch == 'arm64' - - - run: make e2e - timeout-minutes: 15 - - - name: Get k8s logs and events - if: always() - run: | - if ! kubectl config current-context; then - echo "skipping cluster logs because no cluster found in kubectl context" - exit 0 - fi - - namespaces=$(kubectl get namespaces -o jsonpath='{.items[*].metadata.name}') - for namespace in $namespaces; do - if [[ "$namespace" == "neonvm-system" ]] || [[ "$namespace" == kuttl-test-* ]]; then - tee_if_needed=$GITHUB_STEP_SUMMARY - else - tee_if_needed=/dev/null - fi - - { - echo "
" - echo "Namespace=$namespace" - } | tee -a $tee_if_needed - - pods=$(kubectl get pods -n $namespace -o jsonpath='{.items[*].metadata.name}') - for pod in $pods; do - { - echo "
" - echo "- Namespace=$namespace Pod=$pod Logs" - echo "
"
-              } | tee -a $tee_if_needed
-
-              restarts=$(
-                kubectl get pod -n $namespace $pod -o jsonpath='{.status.containerStatuses[0].restartCount}' || echo '0'
-              )
-              {
-                if [ "$restarts" -ne 0 ]; then
-                  echo "CONTAINER RESTARTED $restarts TIME(S)"
-                  echo "Previous logs:"
-                  kubectl logs -n $namespace -p $pod || echo 'Error getting logs'
-                  echo "Current logs:"
-                  kubectl logs -n $namespace $pod || echo 'Error getting logs'
-                else
-                  echo "Logs:"
-                  kubectl logs -n $namespace $pod || echo 'Error getting logs'
-                fi
-              } | tee -a $tee_if_needed
-              {
-                echo "
" - echo "
" - } | tee -a $tee_if_needed - - { - echo "
" - echo "- Namespace=$namespace Pod=$pod Events" - echo "
"
-              } | tee -a $tee_if_needed
-
-              (kubectl get events --namespace $namespace --field-selector involvedObject.name=$pod || echo 'Error getting events') | tee -a $tee_if_needed
-
-              {
-                echo "
" - echo "" - echo "
" - } | tee -a $tee_if_needed - done - - echo "
" | tee -a $tee_if_needed - done - - - name: Cleanup - if: always() - run: make ${{ matrix.cluster }}-destroy - - # This job simplifies setting branch protection rules (in GitHub UI) - # by allowing to set only this job instead of listing many others. - # It also makes it easier to rename or parametrise jobs (using matrix) - # which requires changes in branch protection rules - conclusion: - name: Conclusion - needs: [ e2e-tests ] - if: always() - runs-on: ubuntu-22.04 - steps: - - name: Harden Runner - uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0 - with: - egress-policy: audit - - - name: Fail the job if any of the dependencies do not succeed or are skipped - run: exit 1 - if: | - contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') || contains(needs.*.result, 'skipped') diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 0008d7649..2c4ec0201 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -24,13 +24,6 @@ jobs: - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 with: go-version-file: 'go.mod' - - uses: golangci/golangci-lint-action@55c2c1448f86e01eaae002a5a3a9624417608d84 # v6.5.2 - with: - # Required: the version of golangci-lint is required and - # should be specified with patch version. - version: v1.64.5 - args: --timeout 5m - github-token: ${{ secrets.GITHUB_TOKEN }} check-go-mod-tidy: name: check go mod tidy @@ -49,14 +42,6 @@ jobs: - name: go mod tidy run: | go mod tidy - - name: check diff - run: | - if ! test -z "$(git ls-files --exclude-standard --others .)$(git diff .)"; then - git ls-files --exclude-standard --others . - git diff . - echo "ERROR: 'go mod tidy' modified the source tree." - exit 1 - fi generated: name: check generated files @@ -72,14 +57,6 @@ jobs: - name: make generate run: | make generate - - name: check diff - run: | - if ! test -z "$(git ls-files --exclude-standard --others .)$(git diff .)"; then - git ls-files --exclude-standard --others . - git diff . - echo "ERROR: 'make generate' modified the source tree." - exit 1 - fi codespell: name: check spelling with codespell diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index da08d633d..615a14f23 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -56,16 +56,8 @@ jobs: archs: '["amd64","arm64"]' secrets: inherit - e2e: - needs: [ get-tag, build-images, build-test-vm ] - uses: ./.github/workflows/e2e-test.yaml - with: - tag: ${{ needs.get-tag.outputs.tag }} - push-yamls: true - secrets: inherit - release: - needs: [ get-tag, e2e ] + needs: [ get-tag ] runs-on: ubuntu-latest steps: - name: Harden Runner @@ -79,38 +71,3 @@ jobs: uses: actions/download-artifact@cc203385981b70ca67e1cc392babf9cc229d5806 # v4.1.9 with: pattern: 'vm-builder-*' - - - name: download manifests - uses: actions/download-artifact@cc203385981b70ca67e1cc392babf9cc229d5806 # v4.1.9 - with: - name: rendered_manifests - # files in the artifact will be expanded into the directory 'rendered_manifests'. - # See e2e-test.yaml: the individual yamls are flattened inside the artifact. - path: rendered_manifests - - - name: rename vm-builder files - run: | - # add the arch-specific suffix to the binary name, so they don't conflict when used as - # assets attached to the release. - mv vm-builder-amd64/vm-builder vm-builder-amd64/vm-builder-amd64 - mv vm-builder-arm64/vm-builder vm-builder-arm64/vm-builder-arm64 - - - name: github release - if: ${{ needs.get-tag.outputs.dry-run == 'false' }} - uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda # v2.2.1 - with: - fail_on_unmatched_files: true - files: | - vm-builder-amd64/vm-builder-amd64 - vm-builder-arm64/vm-builder-arm64 - rendered_manifests/autoscale-scheduler.yaml - rendered_manifests/autoscaler-agent.yaml - rendered_manifests/neonvm.yaml - rendered_manifests/neonvm-controller.yaml - rendered_manifests/neonvm-vxlan-controller.yaml - rendered_manifests/neonvm-runner-image-loader.yaml - rendered_manifests/multus-dev.yaml - rendered_manifests/multus-aks.yaml - rendered_manifests/multus-eks.yaml - rendered_manifests/whereabouts.yaml - vmscrape.yaml diff --git a/Makefile b/Makefile index 1b590e690..7cded1f2e 100644 --- a/Makefile +++ b/Makefile @@ -89,7 +89,7 @@ help: ## Display this help. # * WebhookConfiguration, ClusterRole, and CustomResourceDefinition objects # * Go client .PHONY: generate -generate: ## Generate boilerplate DeepCopy methods, manifests, and Go client +generate: desugar ## Generate boilerplate DeepCopy methods, manifests, and Go client # Use uid and gid of current user to avoid mismatched permissions set -e ; \ rm -rf neonvm/client neonvm/apis/neonvm/v1/zz_generated.deepcopy.go @@ -155,7 +155,7 @@ show-coverage: coverage-html ##@ Build .PHONY: build -build: vet bin/vm-builder ## Build all neonvm binaries. +build: desugar vet bin/vm-builder ## Build all neonvm binaries. GOOS=linux go build -o bin/controller neonvm-controller/cmd/*.go GOOS=linux go build -o bin/vxlan-controller neonvm-vxlan-controller/cmd/*.go GOOS=linux go build -o bin/runner neonvm-runner/cmd/*.go @@ -163,6 +163,14 @@ build: vet bin/vm-builder ## Build all neonvm binaries. GOOS=linux go build -o bin/autoscaler-agent autoscaler-agent/cmd/*.go GOOS=linux go build -o bin/scheduler autoscale-scheduler/cmd/*.go +.PHONY: resugar +resugar: + ./scripts/resugar.sh + +.PHONY: desugar +desugar: + ./scripts/desugar.sh + .PHONY: bin/vm-builder bin/vm-builder: ## Build vm-builder binary. CGO_ENABLED=0 go build -o bin/vm-builder -ldflags "-X main.Version=${GIT_INFO} -X main.NeonvmDaemonImage=${IMG_DAEMON}" vm-builder/main.go @@ -189,7 +197,7 @@ docker-push: docker-build ## Push docker images to docker registry docker push -q $(IMG_AUTOSCALER_AGENT) .PHONY: docker-build-go-base -docker-build-go-base: +docker-build-go-base: desugar docker build \ --tag $(GO_BASE_IMG) \ --file go-base.Dockerfile \ diff --git a/pkg/agent/billing/billing.go b/pkg/agent/billing/billing.go index 6d249d7c7..53fb13002 100644 --- a/pkg/agent/billing/billing.go +++ b/pkg/agent/billing/billing.go @@ -78,8 +78,7 @@ func NewMetricsCollector( ) (*MetricsCollector, error) { logger := parentLogger.Named("billing") - clients, err := createClients(ctx, logger, conf.Clients) - if err != nil { + clients := createClients(ctx, logger, conf.Clients) handle err { return nil, err } diff --git a/pkg/agent/billing/clients.go b/pkg/agent/billing/clients.go index 10fec4498..17a290dc5 100644 --- a/pkg/agent/billing/clients.go +++ b/pkg/agent/billing/clients.go @@ -59,8 +59,7 @@ func createClients(ctx context.Context, logger *zap.Logger, cfg ClientsConfig) ( } if c := cfg.AzureBlob; c != nil { generateKey := newBlobStorageKeyGenerator(c.PrefixInContainer) - client, err := reporting.NewAzureBlobStorageClient(c.AzureBlobStorageClientConfig, generateKey) - if err != nil { + client := reporting.NewAzureBlobStorageClient(c.AzureBlobStorageClientConfig, generateKey) handle err { return nil, fmt.Errorf("error creating Azure Blob Storage client: %w", err) } logger.Info("Created Azure Blob Storage client for billing events", zap.Any("config", c)) @@ -74,8 +73,7 @@ func createClients(ctx context.Context, logger *zap.Logger, cfg ClientsConfig) ( } if c := cfg.S3; c != nil { generateKey := newBlobStorageKeyGenerator(c.PrefixInBucket) - client, err := reporting.NewS3Client(ctx, c.S3ClientConfig, generateKey) - if err != nil { + client := reporting.NewS3Client(ctx, c.S3ClientConfig, generateKey) handle err { return nil, fmt.Errorf("error creating S3 client: %w", err) } logger.Info("Created S3 client for billing events", zap.Any("config", c)) diff --git a/pkg/agent/config.go b/pkg/agent/config.go index b89c697f2..2fe0bcd91 100644 --- a/pkg/agent/config.go +++ b/pkg/agent/config.go @@ -143,8 +143,7 @@ type NeonVMConfig struct { } func ReadConfig(path string) (*Config, error) { - file, err := os.Open(path) - if err != nil { + file := os.Open(path) handle err { return nil, fmt.Errorf("Error opening config file %q: %w", path, err) } diff --git a/pkg/agent/core/metrics.go b/pkg/agent/core/metrics.go index d03669caf..0e6e3b62f 100644 --- a/pkg/agent/core/metrics.go +++ b/pkg/agent/core/metrics.go @@ -51,8 +51,7 @@ type FromPrometheus interface { // FromPrometheus to populate it before returning. func ParseMetrics(content io.Reader, metrics FromPrometheus) error { var parser promfmt.TextParser - mfs, err := parser.TextToMetricFamilies(content) - if err != nil { + mfs := parser.TextToMetricFamilies(content) handle err { return fmt.Errorf("failed to parse content as prometheus text format: %w", err) } @@ -178,8 +177,7 @@ func extractWorkingSetSizeWindows(mfs map[string]*promtypes.MetricFamily) ([]flo return nil, fmt.Errorf("metric missing label %q", durationLabel) } - durationSeconds, err := strconv.Atoi(m.Label[durationIndex].GetValue()) - if err != nil { + durationSeconds := strconv.Atoi(m.Label[durationIndex].GetValue()) handle err { return nil, fmt.Errorf("couldn't parse metric's %q label as int: %w", durationLabel, err) } diff --git a/pkg/agent/core/state_test.go b/pkg/agent/core/state_test.go index de142caf0..d28f2dade 100644 --- a/pkg/agent/core/state_test.go +++ b/pkg/agent/core/state_test.go @@ -399,8 +399,7 @@ func doInitialPluginRequest( // helper function to parse a duration func duration(s string) time.Duration { - d, err := time.ParseDuration(s) - if err != nil { + d := time.ParseDuration(s) handle err { panic(fmt.Errorf("failed to parse duration: %w", err)) } return d diff --git a/pkg/agent/core/testhelpers/clock.go b/pkg/agent/core/testhelpers/clock.go index 8ba827b89..3dcd66d15 100644 --- a/pkg/agent/core/testhelpers/clock.go +++ b/pkg/agent/core/testhelpers/clock.go @@ -18,8 +18,7 @@ type FakeClock struct { // NewFakeClock creates a new fake clock, with the initial time set to an unspecified, round number. func NewFakeClock(t *testing.T) *FakeClock { - base, err := time.Parse(time.RFC3339, "2000-01-01T00:00:00Z") // a nice round number, to make things easier - if err != nil { + base := time.Parse(time.RFC3339, "2000-01-01T00:00:00Z") // a nice round number, to make things easier handle err { panic(err) } diff --git a/pkg/agent/dispatcher.go b/pkg/agent/dispatcher.go index 51d88b5a5..a8c7fb2cb 100644 --- a/pkg/agent/dispatcher.go +++ b/pkg/agent/dispatcher.go @@ -96,8 +96,7 @@ func NewDispatcher( }() connectTimeout := time.Second * time.Duration(runner.global.config.Monitor.ConnectionTimeoutSeconds) - conn, protoVersion, err := connectToMonitor(ctx, logger, addr, connectTimeout) - if err != nil { + conn, protoVersion := connectToMonitor(ctx, logger, addr, connectTimeout) handle err { return nil, err } @@ -239,8 +238,7 @@ func connectToMonitor( // We do not need to close the response body according to docs. // Doing so causes memory bugs. - c, _, err := websocket.Dial(ctx, addr, nil) //nolint:bodyclose // see comment above - if err != nil { + c, _ := websocket.Dial(ctx, addr, nil) //nolint:bodyclose // see comment above handle err { return nil, nil, fmt.Errorf("error establishing websocket connection to %s: %w", addr, err) } @@ -321,8 +319,7 @@ func (disp *Dispatcher) lenWaiters() int { // Send a message down the connection. Only call this method with types that // SerializeMonitorMessage can handle. func (disp *Dispatcher) send(ctx context.Context, logger *zap.Logger, id uint64, message any) error { - data, err := api.SerializeMonitorMessage(message, id) - if err != nil { + data := api.SerializeMonitorMessage(message, id) handle err { return fmt.Errorf("error serializing message: %w", err) } // wsjson.Write serializes whatever is passed in, and go serializes []byte @@ -448,15 +445,13 @@ func (disp *Dispatcher) HandleMessage( return fmt.Errorf("Error deserializing message: %q", string(message)) } - typeStr, err := extractField[string](unstructured, "type") - if err != nil { + typeStr := extractField[string](unstructured, "type") handle err { return fmt.Errorf("Error extracting 'type' field: %w", err) } // go thinks all json numbers are float64 so we first deserialize to that to // avoid the type error, then cast to uint64 - f, err := extractField[float64](unstructured, "id") - if err != nil { + f := extractField[float64](unstructured, "id") handle err { return fmt.Errorf("Error extracting 'id field: %w", err) } id := uint64(*f) diff --git a/pkg/agent/dumpstate.go b/pkg/agent/dumpstate.go index 30e2a6745..3ffcf6a3f 100644 --- a/pkg/agent/dumpstate.go +++ b/pkg/agent/dumpstate.go @@ -27,8 +27,7 @@ type StateDump struct { func (s *agentState) StartDumpStateServer(shutdownCtx context.Context, logger *zap.Logger, config *DumpStateConfig) error { // Manually start the TCP listener so we can minimize errors in the background thread. addr := net.TCPAddr{IP: net.IPv4zero, Port: int(config.Port)} - listener, err := net.ListenTCP("tcp", &addr) - if err != nil { + listener := net.ListenTCP("tcp", &addr) handle err { return fmt.Errorf("Error binding to %v", addr) } @@ -41,8 +40,7 @@ func (s *agentState) StartDumpStateServer(shutdownCtx context.Context, logger *z ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() - state, err := s.DumpState(ctx, shutdownCtx.Err() != nil) - if err != nil { + state := s.DumpState(ctx, shutdownCtx.Err() != nil) handle err { if ctx.Err() != nil && errors.Is(ctx.Err(), context.DeadlineExceeded) { totalDuration := time.Since(startTime) return nil, 500, fmt.Errorf("timed out after %s while getting state", totalDuration) diff --git a/pkg/agent/entrypoint.go b/pkg/agent/entrypoint.go index fb33eb57f..bd3b82deb 100644 --- a/pkg/agent/entrypoint.go +++ b/pkg/agent/entrypoint.go @@ -40,22 +40,19 @@ func (r MainRunner) Run(logger *zap.Logger, ctx context.Context) error { watchMetrics := watch.NewMetrics("autoscaling_agent_watchers", globalPromReg) logger.Info("Starting VM watcher") - vmWatchStore, err := startVMWatcher(ctx, logger, r.Config, r.VMClient, watchMetrics, perVMMetrics, r.EnvArgs.K8sNodeName, pushToQueue) - if err != nil { + vmWatchStore := startVMWatcher(ctx, logger, r.Config, r.VMClient, watchMetrics, perVMMetrics, r.EnvArgs.K8sNodeName, pushToQueue) handle err { return fmt.Errorf("Error starting VM watcher: %w", err) } defer vmWatchStore.Stop() logger.Info("VM watcher started") - schedTracker, err := schedwatch.StartSchedulerWatcher(ctx, logger, r.KubeClient, watchMetrics, r.Config.Scheduler.SchedulerName) - if err != nil { + schedTracker := schedwatch.StartSchedulerWatcher(ctx, logger, r.KubeClient, watchMetrics, r.Config.Scheduler.SchedulerName) handle err { return fmt.Errorf("Starting scheduler watch server: %w", err) } defer schedTracker.Stop() scalingEventsMetrics := scalingevents.NewPromMetrics(globalPromReg) - scalingReporter, err := scalingevents.NewReporter(ctx, logger, &r.Config.ScalingEvents, scalingEventsMetrics) - if err != nil { + scalingReporter := scalingevents.NewReporter(ctx, logger, &r.Config.ScalingEvents, scalingEventsMetrics) handle err { return fmt.Errorf("Error creating scaling events reporter: %w", err) } @@ -88,8 +85,7 @@ func (r MainRunner) Run(logger *zap.Logger, ctx context.Context) error { } } - mc, err := billing.NewMetricsCollector(ctx, logger, &r.Config.Billing, billingMetrics) - if err != nil { + mc := billing.NewMetricsCollector(ctx, logger, &r.Config.Billing, billingMetrics) handle err { return fmt.Errorf("error creating billing metrics collector: %w", err) } @@ -103,8 +99,7 @@ func (r MainRunner) Run(logger *zap.Logger, ctx context.Context) error { tg.Go("main-loop", func(logger *zap.Logger) error { logger.Info("Entering main loop") for { - event, err := vmEventQueue.Wait(ctx) - if err != nil { + event := vmEventQueue.Wait(ctx) handle err { if ctx.Err() != nil { // treat context canceled as a "normal" exit (because it is) return nil diff --git a/pkg/agent/runner.go b/pkg/agent/runner.go index 0ed2e98b9..0a91d3405 100644 --- a/pkg/agent/runner.go +++ b/pkg/agent/runner.go @@ -648,8 +648,7 @@ func (r *Runner) connectToMonitorLoop( } lastStart = time.Now() - dispatcher, err := NewDispatcher(ctx, logger, addr, r, callbacks.upscaleRequested) - if err != nil { + dispatcher := NewDispatcher(ctx, logger, addr, r, callbacks.upscaleRequested) handle err { logger.Error("Failed to connect to vm-monitor", zap.String("addr", addr), zap.Error(err)) continue } @@ -695,8 +694,7 @@ func doMetricsRequest( reqCtx, cancel := context.WithTimeout(ctx, timeout) defer cancel() - req, err := http.NewRequestWithContext(reqCtx, http.MethodGet, url, bytes.NewReader(nil)) - if err != nil { + req := http.NewRequestWithContext(reqCtx, http.MethodGet, url, bytes.NewReader(nil)) handle err { panic(fmt.Errorf("Error constructing metrics request to %q: %w", url, err)) } @@ -740,8 +738,7 @@ func (r *Runner) doNeonVMRequest( Value: targetRevision, }} - patchPayload, err := json.Marshal(patches) - if err != nil { + patchPayload := json.Marshal(patches) handle err { panic(fmt.Errorf("Error marshalling JSON patch: %w", err)) } @@ -868,8 +865,7 @@ func (r *Runner) DoSchedulerRequest( return nil, err } - reqBody, err := json.Marshal(reqData) - if err != nil { + reqBody := json.Marshal(reqData) handle err { return nil, fmt.Errorf("Error encoding request JSON: %w", err) } @@ -879,16 +875,14 @@ func (r *Runner) DoSchedulerRequest( url := fmt.Sprintf("http://%s:%d/", sched.IP, r.global.config.Scheduler.RequestPort) - request, err := http.NewRequestWithContext(reqCtx, http.MethodPost, url, bytes.NewReader(reqBody)) - if err != nil { + request := http.NewRequestWithContext(reqCtx, http.MethodPost, url, bytes.NewReader(reqBody)) handle err { return nil, fmt.Errorf("Error building request to %q: %w", url, err) } request.Header.Set("content-type", "application/json") logger.Debug("Sending request to scheduler", zap.Any("request", reqData)) - response, err := http.DefaultClient.Do(request) - if err != nil { + response := http.DefaultClient.Do(request) handle err { description := fmt.Sprintf("[error doing request: %s]", util.RootError(err)) r.global.metrics.schedulerRequests.WithLabelValues(description).Inc() return nil, fmt.Errorf("Error doing request: %w", err) @@ -897,8 +891,7 @@ func (r *Runner) DoSchedulerRequest( r.global.metrics.schedulerRequests.WithLabelValues(strconv.Itoa(response.StatusCode)).Inc() - respBody, err := io.ReadAll(response.Body) - if err != nil { + respBody := io.ReadAll(response.Body) handle err { return nil, fmt.Errorf("Error reading body for response: %w", err) } diff --git a/pkg/agent/scalingevents/clients.go b/pkg/agent/scalingevents/clients.go index 91cfcdcdd..7d2c8850b 100644 --- a/pkg/agent/scalingevents/clients.go +++ b/pkg/agent/scalingevents/clients.go @@ -35,8 +35,7 @@ func createClients(ctx context.Context, logger *zap.Logger, cfg ClientsConfig) ( if c := cfg.AzureBlob; c != nil { generateKey := newBlobStorageKeyGenerator(c.PrefixInContainer) - client, err := reporting.NewAzureBlobStorageClient(c.AzureBlobStorageClientConfig, generateKey) - if err != nil { + client := reporting.NewAzureBlobStorageClient(c.AzureBlobStorageClientConfig, generateKey) handle err { return nil, fmt.Errorf("error creating Azure Blob Storage client: %w", err) } logger.Info("Created Azure Blob Storage client for scaling events", zap.Any("config", c)) @@ -50,8 +49,7 @@ func createClients(ctx context.Context, logger *zap.Logger, cfg ClientsConfig) ( } if c := cfg.S3; c != nil { generateKey := newBlobStorageKeyGenerator(c.PrefixInBucket) - client, err := reporting.NewS3Client(ctx, c.S3ClientConfig, generateKey) - if err != nil { + client := reporting.NewS3Client(ctx, c.S3ClientConfig, generateKey) handle err { return nil, fmt.Errorf("error creating S3 client: %w", err) } logger.Info("Created S3 client for scaling events", zap.Any("config", c)) diff --git a/pkg/agent/scalingevents/reporter.go b/pkg/agent/scalingevents/reporter.go index bacbc8584..14fd8aff0 100644 --- a/pkg/agent/scalingevents/reporter.go +++ b/pkg/agent/scalingevents/reporter.go @@ -67,8 +67,7 @@ func NewReporter( ) (*Reporter, error) { logger := parentLogger.Named("scalingevents") - clients, err := createClients(ctx, logger, conf.Clients) - if err != nil { + clients := createClients(ctx, logger, conf.Clients) handle err { return nil, err } diff --git a/pkg/agent/watch.go b/pkg/agent/watch.go index efa02ac45..0b1ec0fa9 100644 --- a/pkg/agent/watch.go +++ b/pkg/agent/watch.go @@ -94,8 +94,7 @@ func startVMWatcher( setVMMetrics(perVMMetrics, vm, nodeName) if vmIsOurResponsibility(vm, config, nodeName) { - event, err := makeVMEvent(logger, vm, vmEventAdded) - if err != nil { + event := makeVMEvent(logger, vm, vmEventAdded) handle err { logger.Error( "Failed to create vmEvent for added VM", util.VMNameFields(vm), zap.Error(err), @@ -128,8 +127,7 @@ func startVMWatcher( eventKind = vmEventUpdated } - event, err := makeVMEvent(logger, vmForEvent, eventKind) - if err != nil { + event := makeVMEvent(logger, vmForEvent, eventKind) handle err { logger.Error( "Failed to create vmEvent for updated VM", util.VMNameFields(vmForEvent), zap.Error(err), @@ -143,8 +141,7 @@ func startVMWatcher( deleteVMMetrics(perVMMetrics, vm, nodeName) if vmIsOurResponsibility(vm, config, nodeName) { - event, err := makeVMEvent(logger, vm, vmEventDeleted) - if err != nil { + event := makeVMEvent(logger, vm, vmEventDeleted) handle err { logger.Error( "Failed to create vmEvent for deleted VM", util.VMNameFields(vm), zap.Error(err), @@ -159,8 +156,7 @@ func startVMWatcher( } func makeVMEvent(logger *zap.Logger, vm *vmv1.VirtualMachine, kind vmEventKind) (vmEvent, error) { - info, err := api.ExtractVmInfo(logger, vm) - if err != nil { + info := api.ExtractVmInfo(logger, vm) handle err { return vmEvent{}, fmt.Errorf("Error extracting VM info: %w", err) } diff --git a/pkg/api/vminfo.go b/pkg/api/vminfo.go index 092f45d8e..8c11cf1a6 100644 --- a/pkg/api/vminfo.go +++ b/pkg/api/vminfo.go @@ -182,8 +182,7 @@ func (vm VmInfo) NamespacedName() util.NamespacedName { func ExtractVmInfo(logger *zap.Logger, vm *vmv1.VirtualMachine) (*VmInfo, error) { logger = logger.With(util.VMNameFields(vm)) - info, err := extractVmInfoGeneric(logger, vm.Name, vm, vm.Spec.Resources()) - if err != nil { + info := extractVmInfoGeneric(logger, vm.Name, vm, vm.Spec.Resources()) handle err { return nil, fmt.Errorf("error extracting VM info: %w", err) } @@ -194,8 +193,7 @@ func ExtractVmInfo(logger *zap.Logger, vm *vmv1.VirtualMachine) (*VmInfo, error) func ExtractVmInfoFromPod(logger *zap.Logger, pod *corev1.Pod) (*VmInfo, error) { logger = logger.With(util.PodNameFields(pod)) - resources, err := vmv1.VirtualMachineResourcesFromPod(pod) - if err != nil { + resources := vmv1.VirtualMachineResourcesFromPod(pod) handle err { return nil, err } diff --git a/pkg/neonvm/controllers/certs.go b/pkg/neonvm/controllers/certs.go index d6d8139c2..60a1980cd 100644 --- a/pkg/neonvm/controllers/certs.go +++ b/pkg/neonvm/controllers/certs.go @@ -45,8 +45,7 @@ func (r *VMReconciler) reconcileCertificateSecret(ctx context.Context, vm *vmv1. certNotFound = true } else { // check the certificate expiration - certs, err := pki.DecodeX509CertificateChainBytes(certSecret.Data[corev1.TLSCertKey]) - if err != nil { + certs := pki.DecodeX509CertificateChainBytes(certSecret.Data[corev1.TLSCertKey]) handle err { log.Error(err, "Failed to parse VM certificate") return nil, err } @@ -78,8 +77,7 @@ func (r *VMReconciler) reconcileCertificateSecret(ctx context.Context, vm *vmv1. } } - key, err := pki.DecodePrivateKeyBytes(tmpKeySecret.Data[corev1.TLSPrivateKeyKey]) - if err != nil { + key := pki.DecodePrivateKeyBytes(tmpKeySecret.Data[corev1.TLSPrivateKeyKey]) handle err { log.Error(err, "Failed to decode TLS private key") return nil, err } @@ -154,15 +152,13 @@ func (r *VMReconciler) createTlsTmpSecret(ctx context.Context, vm *vmv1.VirtualM log := log.FromContext(ctx) // create a new key for this VM - key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - if err != nil { + key := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) handle err { log.Error(err, "Failed to generate TLS private key for VirtualMachine") return nil, err } // Define the secret - tmpKeySecret, err := r.tmpKeySecretForVirtualMachine(vm, key) - if err != nil { + tmpKeySecret := r.tmpKeySecretForVirtualMachine(vm, key) handle err { log.Error(err, "Failed to define new temporary TLS private key secret resource for VirtualMachine") return nil, err } @@ -180,8 +176,7 @@ func (r *VMReconciler) createCertificateRequest(ctx context.Context, vm *vmv1.Vi log := log.FromContext(ctx) // Define a new cert req - certificateReq, err := r.certReqForVirtualMachine(vm, key) - if err != nil { + certificateReq := r.certReqForVirtualMachine(vm, key) handle err { log.Error(err, "Failed to define new Certificate resource for VirtualMachine") return nil, err } @@ -199,8 +194,7 @@ func (r *VMReconciler) createCertificateRequest(ctx context.Context, vm *vmv1.Vi func (r *VMReconciler) createTlsSecret(ctx context.Context, vm *vmv1.VirtualMachine, key crypto.Signer, certificateReq *certv1.CertificateRequest) error { log := log.FromContext(ctx) - certSecret, err := r.certSecretForVirtualMachine(vm, key, certificateReq.Status.Certificate) - if err != nil { + certSecret := r.certSecretForVirtualMachine(vm, key, certificateReq.Status.Certificate) handle err { log.Error(err, "Failed to define new TLS secret resource for VirtualMachine") return err } @@ -217,8 +211,7 @@ func (r *VMReconciler) createTlsSecret(ctx context.Context, vm *vmv1.VirtualMach func (r *VMReconciler) updateTlsSecret(ctx context.Context, key crypto.Signer, certificateReq *certv1.CertificateRequest, certSecret *corev1.Secret) error { log := log.FromContext(ctx) - encodedKey, err := pki.EncodePrivateKey(key, certv1.PKCS1) - if err != nil { + encodedKey := pki.EncodePrivateKey(key, certv1.PKCS1) handle err { return err } certSecret.Data[corev1.TLSPrivateKeyKey] = encodedKey @@ -284,8 +277,7 @@ func tmpKeySecretSpec( vm *vmv1.VirtualMachine, key crypto.PrivateKey, ) (*corev1.Secret, error) { - encodedKey, err := pki.EncodePrivateKey(key, certv1.PKCS1) - if err != nil { + encodedKey := pki.EncodePrivateKey(key, certv1.PKCS1) handle err { return nil, err } @@ -307,8 +299,7 @@ func certSecretSpec( key crypto.PrivateKey, cert []byte, ) (*corev1.Secret, error) { - encodedKey, err := pki.EncodePrivateKey(key, certv1.PKCS1) - if err != nil { + encodedKey := pki.EncodePrivateKey(key, certv1.PKCS1) handle err { return nil, err } @@ -335,13 +326,11 @@ func certReqSpec( Group: certmanager.GroupName, } - cr, err := certSpecCSR(vm) - if err != nil { + cr := certSpecCSR(vm) handle err { return nil, err } - csrDER, err := x509.CreateCertificateRequest(rand.Reader, cr, key) - if err != nil { + csrDER := x509.CreateCertificateRequest(rand.Reader, cr, key) handle err { return nil, err } diff --git a/pkg/neonvm/controllers/runner_cpu_limits.go b/pkg/neonvm/controllers/runner_cpu_limits.go index e0b45e34c..5e72fc154 100644 --- a/pkg/neonvm/controllers/runner_cpu_limits.go +++ b/pkg/neonvm/controllers/runner_cpu_limits.go @@ -21,19 +21,16 @@ func setRunnerCPULimits(ctx context.Context, vm *vmv1.VirtualMachine, cpu vmv1.M update := api.VCPUChange{VCPUs: cpu} - data, err := json.Marshal(update) - if err != nil { + data := json.Marshal(update) handle err { return err } - req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(data)) - if err != nil { + req := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(data)) handle err { return err } req.Header.Set("Content-Type", "application/json") - resp, err := http.DefaultClient.Do(req) - if err != nil { + resp := http.DefaultClient.Do(req) handle err { return err } defer resp.Body.Close() @@ -50,13 +47,11 @@ func getRunnerCPULimits(ctx context.Context, vm *vmv1.VirtualMachine) (*api.VCPU url := fmt.Sprintf("http://%s:%d/cpu_current", vm.Status.PodIP, vm.Spec.RunnerPort) - req, err := http.NewRequestWithContext(ctx, "GET", url, nil) - if err != nil { + req := http.NewRequestWithContext(ctx, "GET", url, nil) handle err { return nil, err } - resp, err := http.DefaultClient.Do(req) - if err != nil { + resp := http.DefaultClient.Do(req) handle err { return nil, err } diff --git a/pkg/neonvm/controllers/vm_controller.go b/pkg/neonvm/controllers/vm_controller.go index 268cf6fc2..cf7ddbcf2 100644 --- a/pkg/neonvm/controllers/vm_controller.go +++ b/pkg/neonvm/controllers/vm_controller.go @@ -235,8 +235,7 @@ func (r *VMReconciler) doFinalizerOperationsForVirtualMachine(ctx context.Contex // Release overlay IP address if vm.Spec.ExtraNetwork != nil { - ip, err := r.IPAM.ReleaseIP(ctx, types.NamespacedName{Name: vm.Name, Namespace: vm.Namespace}) - if err != nil { + ip := r.IPAM.ReleaseIP(ctx, types.NamespacedName{Name: vm.Name, Namespace: vm.Namespace}) handle err { return fmt.Errorf("fail to release overlay IP: %w", err) } log.Info(fmt.Sprintf("Released overlay IP %s", ip.String())) @@ -300,8 +299,7 @@ func (r *VMReconciler) acquireOverlayIP(ctx context.Context, vm *vmv1.VirtualMac } log := log.FromContext(ctx) - ip, err := r.IPAM.AcquireIP(ctx, types.NamespacedName{Name: vm.Name, Namespace: vm.Namespace}) - if err != nil { + ip := r.IPAM.AcquireIP(ctx, types.NamespacedName{Name: vm.Name, Namespace: vm.Namespace}) handle err { return err } vm.Status.ExtraNetIP = ip.IP.String() @@ -336,8 +334,7 @@ func (r *VMReconciler) doReconcile(ctx context.Context, vm *vmv1.VirtualMachine) // check if the certificate needs renewal for this running VM. if enableTLS { - certSecret, err := r.reconcileCertificateSecret(ctx, vm) - if err != nil { + certSecret := r.reconcileCertificateSecret(ctx, vm) handle err { return err } // VM is not ready to start yet. @@ -407,8 +404,7 @@ func (r *VMReconciler) doReconcile(ctx context.Context, vm *vmv1.VirtualMachine) } // Define a new pod - pod, err := r.podForVirtualMachine(vm, sshSecret) - if err != nil { + pod := r.podForVirtualMachine(vm, sshSecret) handle err { log.Error(err, "Failed to define new Pod resource for VirtualMachine") return err } @@ -517,8 +513,7 @@ func (r *VMReconciler) doReconcile(ctx context.Context, vm *vmv1.VirtualMachine) vm.Status.Node = vmRunner.Spec.NodeName // get cgroups CPU details from runner pod - cgroupUsage, err := getRunnerCPULimits(ctx, vm) - if err != nil { + cgroupUsage := getRunnerCPULimits(ctx, vm) handle err { log.Error(err, "Failed to get CPU details from runner", "VirtualMachine", vm.Name) return err } @@ -534,8 +529,7 @@ func (r *VMReconciler) doReconcile(ctx context.Context, vm *vmv1.VirtualMachine) case vmv1.CpuScalingModeSysfs: pluggedCPU = cgroupUsage.VCPUs.RoundedUp() case vmv1.CpuScalingModeQMP: - cpuSlotsPlugged, _, err := QmpGetCpus(QmpAddr(vm)) - if err != nil { + cpuSlotsPlugged, _ := QmpGetCpus(QmpAddr(vm)) handle err { log.Error(err, "Failed to get CPU details from VirtualMachine", "VirtualMachine", vm.Name) return err } @@ -550,8 +544,7 @@ func (r *VMReconciler) doReconcile(ctx context.Context, vm *vmv1.VirtualMachine) r.updateVMStatusCPU(ctx, vm, vmRunner, pluggedCPU, cgroupUsage) // get Memory details from hypervisor and update VM status - memorySize, err := QmpGetMemorySize(QmpAddr(vm)) - if err != nil { + memorySize := QmpGetMemorySize(QmpAddr(vm)) handle err { log.Error(err, "Failed to get Memory details from VirtualMachine", "VirtualMachine", vm.Name) return err } @@ -653,8 +646,7 @@ func (r *VMReconciler) doReconcile(ctx context.Context, vm *vmv1.VirtualMachine) // do nothing } - cpuScaled, err := r.handleCPUScaling(ctx, vm, vmRunner) - if err != nil { + cpuScaled := r.handleCPUScaling(ctx, vm, vmRunner) handle err { log.Error(err, "failed to handle CPU scaling") return err } @@ -747,8 +739,7 @@ func (r *VMReconciler) doVirtioMemScaling( targetSlotCount := int(vm.Spec.Guest.MemorySlots.Use - vm.Spec.Guest.MemorySlots.Min) targetVirtioMemSize := int64(targetSlotCount) * vm.Spec.Guest.MemorySlotSize.Value() - previousTarget, err := QmpSetVirtioMem(vm, targetVirtioMemSize) - if err != nil { + previousTarget := QmpSetVirtioMem(vm, targetVirtioMemSize) handle err { return false, err } @@ -765,8 +756,7 @@ func (r *VMReconciler) doVirtioMemScaling( // Maybe we're already using the amount we want? // Update the status to reflect the current size - and if it matches goalTotalSize, ram // scaling is done. - currentTotalSize, err := QmpGetMemorySize(QmpAddr(vm)) - if err != nil { + currentTotalSize := QmpGetMemorySize(QmpAddr(vm)) handle err { return false, err } @@ -957,8 +947,7 @@ func updatePodMetadataIfNecessary(ctx context.Context, c client.Client, vm *vmv1 return nil } - patchData, err := json.Marshal(patches) - if err != nil { + patchData := json.Marshal(patches) handle err { panic(fmt.Errorf("error marshalling JSON patch: %w", err)) } @@ -992,8 +981,7 @@ func extractVirtualMachineUsageJSON(spec vmv1.VirtualMachineSpec) string { Memory: resource.NewQuantity(spec.Guest.MemorySlotSize.Value()*int64(memorySlots), resource.BinarySI), } - usageJSON, err := json.Marshal(usage) - if err != nil { + usageJSON := json.Marshal(usage) handle err { panic(fmt.Errorf("error marshalling JSON: %w", err)) } @@ -1001,8 +989,7 @@ func extractVirtualMachineUsageJSON(spec vmv1.VirtualMachineSpec) string { } func extractVirtualMachineResourcesJSON(spec vmv1.VirtualMachineSpec) string { - resourcesJSON, err := json.Marshal(spec.Resources()) - if err != nil { + resourcesJSON := json.Marshal(spec.Resources()) handle err { panic(fmt.Errorf("error marshalling JSON: %w", err)) } @@ -1014,8 +1001,7 @@ func extractVirtualMachineOvercommitSettingsJSON(spec vmv1.VirtualMachineSpec) * return nil } - settingsJSON, err := json.Marshal(*spec.Overcommit) - if err != nil { + settingsJSON := json.Marshal(*spec.Overcommit) handle err { panic(fmt.Errorf("error marshalling JSON: %w", err)) } return lo.ToPtr(string(settingsJSON)) @@ -1026,8 +1012,7 @@ func (r *VMReconciler) podForVirtualMachine( vm *vmv1.VirtualMachine, sshSecret *corev1.Secret, ) (*corev1.Pod, error) { - pod, err := podSpec(vm, sshSecret, r.Config) - if err != nil { + pod := podSpec(vm, sshSecret, r.Config) handle err { return nil, err } @@ -1041,8 +1026,7 @@ func (r *VMReconciler) podForVirtualMachine( } func (r *VMReconciler) sshSecretForVirtualMachine(vm *vmv1.VirtualMachine) (*corev1.Secret, error) { - secret, err := sshSecretSpec(vm) - if err != nil { + secret := sshSecretSpec(vm) handle err { return nil, err } @@ -1056,8 +1040,7 @@ func (r *VMReconciler) sshSecretForVirtualMachine(vm *vmv1.VirtualMachine) (*cor func sshSecretSpec(vm *vmv1.VirtualMachine) (*corev1.Secret, error) { // using ed25519 signatures it takes ~16us to finish - publicKey, privateKey, err := sshKeygen() - if err != nil { + publicKey, privateKey := sshKeygen() handle err { return nil, err } @@ -1082,8 +1065,7 @@ func (r *VMReconciler) certReqForVirtualMachine( vm *vmv1.VirtualMachine, key crypto.Signer, ) (*certv1.CertificateRequest, error) { - cert, err := certReqSpec(vm, key) - if err != nil { + cert := certReqSpec(vm, key) handle err { return nil, err } @@ -1101,8 +1083,7 @@ func (r *VMReconciler) tmpKeySecretForVirtualMachine( vm *vmv1.VirtualMachine, key crypto.Signer, ) (*corev1.Secret, error) { - secret, err := tmpKeySecretSpec(vm, key) - if err != nil { + secret := tmpKeySecretSpec(vm, key) handle err { return nil, err } @@ -1121,8 +1102,7 @@ func (r *VMReconciler) certSecretForVirtualMachine( key crypto.Signer, cert []byte, ) (*corev1.Secret, error) { - secret, err := certSecretSpec(vm, key, cert) - if err != nil { + secret := certSecretSpec(vm, key, cert) handle err { return nil, err } @@ -1231,18 +1211,15 @@ func podSpec( affinity := affinityForVirtualMachine(vm) // Get the Operand image - image, err := imageForVmRunner() - if err != nil { + image := imageForVmRunner() handle err { return nil, err } - vmSpecJson, err := json.Marshal(vm.Spec) - if err != nil { + vmSpecJson := json.Marshal(vm.Spec) handle err { return nil, fmt.Errorf("marshal VM Spec: %w", err) } - vmStatusJson, err := json.Marshal(vm.Status) - if err != nil { + vmStatusJson := json.Marshal(vm.Status) handle err { return nil, fmt.Errorf("marshal VM Status: %w", err) } @@ -1710,8 +1687,7 @@ func GetNADConfig() *NADConfig { // slices. If an error occurs during key generation or encoding, it returns nil // for both keys and the error. func sshKeygen() (publicKeyBytes []byte, privateKeyBytes []byte, err error) { - publicKey, privateKey, err := ed25519.GenerateKey(rand.Reader) - if err != nil { + publicKey, privateKey := ed25519.GenerateKey(rand.Reader) handle err { return nil, nil, err } @@ -1729,8 +1705,7 @@ func sshKeygen() (publicKeyBytes []byte, privateKeyBytes []byte, err error) { } func encodePrivateKey(privateKey ed25519.PrivateKey) ([]byte, error) { - privBlock, err := ssh.MarshalPrivateKey(privateKey, "") - if err != nil { + privBlock := ssh.MarshalPrivateKey(privateKey, "") handle err { return nil, err } privatePEM := pem.EncodeToMemory(privBlock) @@ -1739,8 +1714,7 @@ func encodePrivateKey(privateKey ed25519.PrivateKey) ([]byte, error) { } func encodePublicKey(publicKey ed25519.PublicKey) ([]byte, error) { - sshPublicKey, err := ssh.NewPublicKey(publicKey) - if err != nil { + sshPublicKey := ssh.NewPublicKey(publicKey) handle err { return nil, err } diff --git a/pkg/neonvm/controllers/vm_controller_cpu_scaling.go b/pkg/neonvm/controllers/vm_controller_cpu_scaling.go index c3d7171fc..7ea5d553d 100644 --- a/pkg/neonvm/controllers/vm_controller_cpu_scaling.go +++ b/pkg/neonvm/controllers/vm_controller_cpu_scaling.go @@ -40,16 +40,14 @@ func (r *VMReconciler) handleCPUScalingQMP(ctx context.Context, vm *vmv1.Virtual specCPU := vm.Spec.Guest.CPUs.Use // get cgroups CPU details from runner pod - cgroupUsage, err := getRunnerCPULimits(ctx, vm) - if err != nil { + cgroupUsage := getRunnerCPULimits(ctx, vm) handle err { log.Error(err, "Failed to get CPU details from runner", "VirtualMachine", vm.Name) return false, err } // get CPU details from QEMU var pluggedCPU uint32 - cpuSlotsPlugged, _, err := QmpGetCpus(QmpAddr(vm)) - if err != nil { + cpuSlotsPlugged, _ := QmpGetCpus(QmpAddr(vm)) handle err { log.Error(err, "Failed to get CPU details from VirtualMachine", "VirtualMachine", vm.Name) return false, err } @@ -72,8 +70,7 @@ func (r *VMReconciler) handleCPUScalingQMP(ctx context.Context, vm *vmv1.Virtual } return false, nil } else if specCPU != cgroupUsage.VCPUs { - _, err := r.handleCgroupCPUUpdate(ctx, vm) - if err != nil { + _ := r.handleCgroupCPUUpdate(ctx, vm) handle err { log.Error(err, "Failed to update cgroup CPU", "VirtualMachine", vm.Name) return false, err } @@ -90,8 +87,7 @@ func (r *VMReconciler) handleCPUScalingSysfs(ctx context.Context, vm *vmv1.Virtu log := log.FromContext(ctx) specCPU := vm.Spec.Guest.CPUs.Use - cgroupUsage, err := getRunnerCPULimits(ctx, vm) - if err != nil { + cgroupUsage := getRunnerCPULimits(ctx, vm) handle err { log.Error(err, "Failed to get CPU details from runner", "VirtualMachine", vm.Name) return false, err } diff --git a/pkg/neonvm/controllers/vm_qmp_queries.go b/pkg/neonvm/controllers/vm_qmp_queries.go index 243663cc8..2e4984392 100644 --- a/pkg/neonvm/controllers/vm_qmp_queries.go +++ b/pkg/neonvm/controllers/vm_qmp_queries.go @@ -96,8 +96,7 @@ func QmpAddr(vm *vmv1.VirtualMachine) (ip string, port int32) { } func QmpConnect(ip string, port int32) (*qmp.SocketMonitor, error) { - mon, err := qmp.NewSocketMonitor("tcp", fmt.Sprintf("%s:%d", ip, port), 2*time.Second) - if err != nil { + mon := qmp.NewSocketMonitor("tcp", fmt.Sprintf("%s:%d", ip, port), 2*time.Second) handle err { return nil, err } if err := mon.Connect(); err != nil { @@ -108,15 +107,13 @@ func QmpConnect(ip string, port int32) (*qmp.SocketMonitor, error) { } func QmpGetCpus(ip string, port int32) ([]QmpCpuSlot, []QmpCpuSlot, error) { - mon, err := QmpConnect(ip, port) - if err != nil { + mon := QmpConnect(ip, port) handle err { return nil, nil, err } defer mon.Disconnect() //nolint:errcheck // nothing to do with error when deferred. TODO: log it? qmpcmd := []byte(`{"execute": "query-hotpluggable-cpus"}`) - raw, err := mon.Run(qmpcmd) - if err != nil { + raw := mon.Run(qmpcmd) handle err { return nil, nil, err } @@ -139,16 +136,14 @@ func QmpGetCpus(ip string, port int32) ([]QmpCpuSlot, []QmpCpuSlot, error) { } func QmpPlugCpu(ip string, port int32) error { - _, empty, err := QmpGetCpus(ip, port) - if err != nil { + _, empty := QmpGetCpus(ip, port) handle err { return err } if len(empty) == 0 { return errors.New("no empty slots for CPU hotplug") } - mon, err := QmpConnect(ip, port) - if err != nil { + mon := QmpConnect(ip, port) handle err { return err } defer mon.Disconnect() //nolint:errcheck // nothing to do with error when deferred. TODO: log it? @@ -175,8 +170,7 @@ func QmpPlugCpu(ip string, port int32) error { } func QmpUnplugCpu(ip string, port int32) error { - plugged, _, err := QmpGetCpus(ip, port) - if err != nil { + plugged, _ := QmpGetCpus(ip, port) handle err { return err } @@ -193,8 +187,7 @@ func QmpUnplugCpu(ip string, port int32) error { return errors.New("there are no unpluggable CPUs") } - mon, err := QmpConnect(ip, port) - if err != nil { + mon := QmpConnect(ip, port) handle err { return err } defer mon.Disconnect() //nolint:errcheck // nothing to do with error when deferred. TODO: log it? @@ -211,12 +204,10 @@ func QmpUnplugCpu(ip string, port int32) error { } func QmpSyncCpuToTarget(vm *vmv1.VirtualMachine, migration *vmv1.VirtualMachineMigration) error { - plugged, _, err := QmpGetCpus(QmpAddr(vm)) - if err != nil { + plugged, _ := QmpGetCpus(QmpAddr(vm)) handle err { return err } - pluggedInTarget, _, err := QmpGetCpus(migration.Status.TargetPodIP, vm.Spec.QMP) - if err != nil { + pluggedInTarget, _ := QmpGetCpus(migration.Status.TargetPodIP, vm.Spec.QMP) handle err { return err } if len(plugged) == len(pluggedInTarget) { @@ -224,8 +215,7 @@ func QmpSyncCpuToTarget(vm *vmv1.VirtualMachine, migration *vmv1.VirtualMachineM return nil } - target, err := QmpConnect(migration.Status.TargetPodIP, vm.Spec.QMP) - if err != nil { + target := QmpConnect(migration.Status.TargetPodIP, vm.Spec.QMP) handle err { return err } defer target.Disconnect() //nolint:errcheck // nothing to do with error when deferred. TODO: log it? @@ -279,8 +269,7 @@ func QmpSetVirtioMem(vm *vmv1.VirtualMachine, targetVirtioMemSize int64) (previo return 0, nil } - mon, err := QmpConnect(QmpAddr(vm)) - if err != nil { + mon := QmpConnect(QmpAddr(vm)) handle err { return 0, err } defer mon.Disconnect() //nolint:errcheck // nothing to do with error when deferred. TODO: log it? @@ -288,8 +277,7 @@ func QmpSetVirtioMem(vm *vmv1.VirtualMachine, targetVirtioMemSize int64) (previo // First, fetch current desired virtio-mem size. If it's the same as targetVirtioMemSize, then // we can report that it was already the same. cmd := []byte(`{"execute": "qom-get", "arguments": {"path": "vm0", "property": "requested-size"}}`) - raw, err := mon.Run(cmd) - if err != nil { + raw := mon.Run(cmd) handle err { return 0, err } result := struct { @@ -319,15 +307,13 @@ func QmpSetVirtioMem(vm *vmv1.VirtualMachine, targetVirtioMemSize int64) (previo } func QmpGetMemorySize(ip string, port int32) (*resource.Quantity, error) { - mon, err := QmpConnect(ip, port) - if err != nil { + mon := QmpConnect(ip, port) handle err { return nil, err } defer mon.Disconnect() //nolint:errcheck // nothing to do with error when deferred. TODO: log it? qmpcmd := []byte(`{"execute": "query-memory-size-summary"}`) - raw, err := mon.Run(qmpcmd) - if err != nil { + raw := mon.Run(qmpcmd) handle err { return nil, err } @@ -345,8 +331,7 @@ func QmpStartMigration(virtualmachine *vmv1.VirtualMachine, virtualmachinemigrat // connect to source runner QMP s_ip := virtualmachinemigration.Status.SourcePodIP - smon, err := qmp.NewSocketMonitor("tcp", fmt.Sprintf("%s:%d", s_ip, port), 2*time.Second) - if err != nil { + smon := qmp.NewSocketMonitor("tcp", fmt.Sprintf("%s:%d", s_ip, port), 2*time.Second) handle err { return err } if err := smon.Connect(); err != nil { @@ -356,8 +341,7 @@ func QmpStartMigration(virtualmachine *vmv1.VirtualMachine, virtualmachinemigrat // connect to target runner QMP t_ip := virtualmachinemigration.Status.TargetPodIP - tmon, err := qmp.NewSocketMonitor("tcp", fmt.Sprintf("%s:%d", t_ip, port), 2*time.Second) - if err != nil { + tmon := qmp.NewSocketMonitor("tcp", fmt.Sprintf("%s:%d", t_ip, port), 2*time.Second) handle err { return err } if err := tmon.Connect(); err != nil { @@ -457,15 +441,13 @@ func QmpStartMigration(virtualmachine *vmv1.VirtualMachine, virtualmachinemigrat } func QmpGetMigrationInfo(ip string, port int32) (*MigrationInfo, error) { - mon, err := QmpConnect(ip, port) - if err != nil { + mon := QmpConnect(ip, port) handle err { return nil, err } defer mon.Disconnect() //nolint:errcheck // nothing to do with error when deferred. TODO: log it? qmpcmd := []byte(`{"execute": "query-migrate"}`) - raw, err := mon.Run(qmpcmd) - if err != nil { + raw := mon.Run(qmpcmd) handle err { return nil, err } @@ -478,8 +460,7 @@ func QmpGetMigrationInfo(ip string, port int32) (*MigrationInfo, error) { } func QmpCancelMigration(ip string, port int32) error { - mon, err := QmpConnect(ip, port) - if err != nil { + mon := QmpConnect(ip, port) handle err { return err } defer mon.Disconnect() //nolint:errcheck // nothing to do with error when deferred. TODO: log it? @@ -494,8 +475,7 @@ func QmpCancelMigration(ip string, port int32) error { } func QmpQuit(ip string, port int32) error { - mon, err := QmpConnect(ip, port) - if err != nil { + mon := QmpConnect(ip, port) handle err { return err } defer mon.Disconnect() //nolint:errcheck // nothing to do with error when deferred. TODO: log it? diff --git a/pkg/neonvm/controllers/vmmigration_controller.go b/pkg/neonvm/controllers/vmmigration_controller.go index 47fcb1ef2..d902dc6ef 100644 --- a/pkg/neonvm/controllers/vmmigration_controller.go +++ b/pkg/neonvm/controllers/vmmigration_controller.go @@ -88,8 +88,7 @@ func (r *VirtualMachineMigrationReconciler) createTargetPod( } // Define a new target pod - tpod, err := r.targetPodForVirtualMachine(vm, migration, sshSecret) - if err != nil { + tpod := r.targetPodForVirtualMachine(vm, migration, sshSecret) handle err { logger.Error(err, "Failed to generate Target Pod spec") return ctrl.Result{}, err } @@ -157,8 +156,7 @@ func (r *VirtualMachineMigrationReconciler) Reconcile(ctx context.Context, req c if controllerutil.ContainsFinalizer(migration, virtualmachinemigrationFinalizer) { // our finalizer is present, so lets handle any external dependency log.Info("Performing Finalizer Operations for Migration") - vm, err := getVM() - if err != nil { + vm := getVM() handle err { return ctrl.Result{}, err } if err := r.doFinalizerOperationsForVirtualMachineMigration(ctx, migration, vm); err != nil { @@ -194,8 +192,7 @@ func (r *VirtualMachineMigrationReconciler) Reconcile(ctx context.Context, req c } // Fetch the corresponding VirtualMachine instance - vm, err := getVM() - if err != nil { + vm := getVM() handle err { log.Error(err, "Failed to get VM", "VmName", migration.Spec.VmName) if apierrors.IsNotFound(err) { // stop reconcile loop if vm not found (already deleted?) @@ -411,8 +408,7 @@ func (r *VirtualMachineMigrationReconciler) Reconcile(ctx context.Context, req c } // retrieve migration statistics - migrationInfo, err := QmpGetMigrationInfo(QmpAddr(vm)) - if err != nil { + migrationInfo := QmpGetMigrationInfo(QmpAddr(vm)) handle err { log.Error(err, "Failed to get migration info") return ctrl.Result{}, err } @@ -689,8 +685,7 @@ func (r *VirtualMachineMigrationReconciler) targetPodForVirtualMachine( return nil, fmt.Errorf("cannot create target pod because memory is invalid: %w", err) } - pod, err := podSpec(vm, sshSecret, r.Config) - if err != nil { + pod := podSpec(vm, sshSecret, r.Config) handle err { return nil, err } diff --git a/pkg/neonvm/cpuscaling/cpuscaler.go b/pkg/neonvm/cpuscaling/cpuscaler.go index 98ff751b6..72027ae4f 100644 --- a/pkg/neonvm/cpuscaling/cpuscaler.go +++ b/pkg/neonvm/cpuscaling/cpuscaler.go @@ -29,8 +29,7 @@ func NewCPUScaler() *CPUScaler { } func (c *CPUScaler) ReconcileOnlineCPU(targetCount int) error { - online, err := c.cpuState.OnlineCPUs() - if err != nil { + online := c.cpuState.OnlineCPUs() handle err { return err } @@ -46,8 +45,7 @@ func (c *CPUScaler) ReconcileOnlineCPU(targetCount int) error { return c.setStateTo(cpuOffline, diff, online) } else if len(online) < targetCount { - offline, err := c.cpuState.OfflineCPUs() - if err != nil { + offline := c.cpuState.OfflineCPUs() handle err { return nil } @@ -83,8 +81,7 @@ func (c *CPUScaler) setStateTo(state cpuState, count int, candidateCPUs []int) e // ActiveCPUsCount() returns the count of online CPUs. func (c *CPUScaler) ActiveCPUsCount() (int, error) { - onlineCPUs, err := c.cpuState.OnlineCPUs() - if err != nil { + onlineCPUs := c.cpuState.OnlineCPUs() handle err { return 0, err } return len(onlineCPUs), nil diff --git a/pkg/neonvm/cpuscaling/sysfs.go b/pkg/neonvm/cpuscaling/sysfs.go index 1cd511f27..6265a0501 100644 --- a/pkg/neonvm/cpuscaling/sysfs.go +++ b/pkg/neonvm/cpuscaling/sysfs.go @@ -31,12 +31,10 @@ func (cs *cpuSysfsState) SetState(cpuNum int, cpuState cpuState) error { } func (cs *cpuSysfsState) OnlineCPUs() ([]int, error) { - data, err := os.ReadFile(filepath.Join(cpuPath, "online")) - if err != nil { + data := os.ReadFile(filepath.Join(cpuPath, "online")) handle err { return nil, fmt.Errorf("failed to read online CPUs: %w", err) } - cpuIDs, err := cs.parseMultipleCPURange(string(data)) - if err != nil { + cpuIDs := cs.parseMultipleCPURange(string(data)) handle err { // log value of the file in case we can't parse to help debugging return nil, fmt.Errorf("failed to parse online CPUs %q: %w", string(data), err) } @@ -44,12 +42,10 @@ func (cs *cpuSysfsState) OnlineCPUs() ([]int, error) { } func (cs *cpuSysfsState) OfflineCPUs() ([]int, error) { - data, err := os.ReadFile(filepath.Join(cpuPath, "offline")) - if err != nil { + data := os.ReadFile(filepath.Join(cpuPath, "offline")) handle err { return nil, fmt.Errorf("failed to read offline CPUs: %w", err) } - cpuIDs, err := cs.parseMultipleCPURange(string(data)) - if err != nil { + cpuIDs := cs.parseMultipleCPURange(string(data)) handle err { // log value of the file in case we can't parse to help debugging return nil, fmt.Errorf("failed to parse offline CPUs %q: %w", string(data), err) } @@ -62,20 +58,17 @@ func (cs *cpuSysfsState) parseCPURange(cpuRange string) (int, int, error) { // Single CPU case, e.g., "0" if len(parts) == 1 { - cpu, err := strconv.Atoi(parts[0]) - if err != nil { + cpu := strconv.Atoi(parts[0]) handle err { return -1, -1, err } return cpu, cpu, nil } // Range case, e.g., "0-3" - start, err := strconv.Atoi(parts[0]) - if err != nil { + start := strconv.Atoi(parts[0]) handle err { return -1, -1, err } - end, err := strconv.Atoi(parts[1]) - if err != nil { + end := strconv.Atoi(parts[1]) handle err { return -1, -1, err } return start, end, nil @@ -88,8 +81,7 @@ func (cs *cpuSysfsState) parseMultipleCPURange(cpuRanges string) ([]int, error) var cpus []int for _, part := range parts { - start, end, err := cs.parseCPURange(part) - if err != nil { + start, end := cs.parseCPURange(part) handle err { return nil, err } diff --git a/pkg/neonvm/ipam/allocate.go b/pkg/neonvm/ipam/allocate.go index c3137e219..2f2f1a799 100644 --- a/pkg/neonvm/ipam/allocate.go +++ b/pkg/neonvm/ipam/allocate.go @@ -73,8 +73,7 @@ func doRelease( _, ipnet, _ := net.ParseCIDR(ipRange.Range) // try to release IP for given VM - newReservation, ip, err := whereaboutsallocate.IterateForDeallocation(reservation, vmName.String(), getMatchingIPReservationIndex) - if err != nil { + newReservation, ip := whereaboutsallocate.IterateForDeallocation(reservation, vmName.String(), getMatchingIPReservationIndex) handle err { // The only reason to get an error here is if we are trying // to deallocate the same IP twice. log.Info("Failed to deallocate IP", "error", err) diff --git a/pkg/neonvm/ipam/client.go b/pkg/neonvm/ipam/client.go index bc504ea3d..2a5e8541c 100644 --- a/pkg/neonvm/ipam/client.go +++ b/pkg/neonvm/ipam/client.go @@ -17,16 +17,13 @@ type Client struct { } func NewKubeClient(cfg *rest.Config) (*Client, error) { - kubeClient, err := kubernetes.NewForConfig(cfg) - if err != nil { + kubeClient := kubernetes.NewForConfig(cfg) handle err { return nil, err } - vmClient, err := neonvm.NewForConfig(cfg) - if err != nil { + vmClient := neonvm.NewForConfig(cfg) handle err { return nil, err } - nadClient, err := nad.NewForConfig(cfg) - if err != nil { + nadClient := nad.NewForConfig(cfg) handle err { return nil, err } diff --git a/pkg/neonvm/ipam/ipam.go b/pkg/neonvm/ipam/ipam.go index ea5494b23..9fd43952b 100644 --- a/pkg/neonvm/ipam/ipam.go +++ b/pkg/neonvm/ipam/ipam.go @@ -65,16 +65,14 @@ type IPAMParams struct { } func (i *IPAM) AcquireIP(ctx context.Context, vmName types.NamespacedName) (net.IPNet, error) { - ip, err := i.runIPAMWithMetrics(ctx, makeAcquireAction(ctx, vmName), IPAMAcquire) - if err != nil { + ip := i.runIPAMWithMetrics(ctx, makeAcquireAction(ctx, vmName), IPAMAcquire) handle err { return net.IPNet{}, fmt.Errorf("failed to acquire IP: %w", err) } return ip, nil } func (i *IPAM) ReleaseIP(ctx context.Context, vmName types.NamespacedName) (net.IPNet, error) { - ip, err := i.runIPAMWithMetrics(ctx, makeReleaseAction(ctx, vmName), IPAMRelease) - if err != nil { + ip := i.runIPAMWithMetrics(ctx, makeReleaseAction(ctx, vmName), IPAMRelease) handle err { return net.IPNet{}, fmt.Errorf("failed to release IP: %w", err) } return ip, nil @@ -83,8 +81,7 @@ func (i *IPAM) ReleaseIP(ctx context.Context, vmName types.NamespacedName) (net. // New returns a new IPAM object with ipam config and k8s/crd clients func New(params IPAMParams) (*IPAM, error) { // get Kubernetes client config - cfg, err := config.GetConfig() - if err != nil { + cfg := config.GetConfig() handle err { return nil, fmt.Errorf("error building kubernetes configuration: %w", err) } @@ -92,8 +89,7 @@ func New(params IPAMParams) (*IPAM, error) { cfg.QPS = KubernetesClientQPS cfg.Burst = KubernetesClientBurst - kClient, err := NewKubeClient(cfg) - if err != nil { + kClient := NewKubeClient(cfg) handle err { return nil, fmt.Errorf("error creating kubernetes client: %w", err) } return NewWithClient(kClient, params) @@ -104,16 +100,14 @@ func NewWithClient(kClient *Client, params IPAMParams) (*IPAM, error) { defer cancel() // read network-attachment-definition from Kubernetes - nad, err := kClient.NADClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(params.NadNamespace).Get(ctx, params.NadName, metav1.GetOptions{}) - if err != nil { + nad := kClient.NADClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(params.NadNamespace).Get(ctx, params.NadName, metav1.GetOptions{}) handle err { return nil, err } if len(nad.Spec.Config) == 0 { return nil, fmt.Errorf("network-attachment-definition %s hasn't IPAM config section", nad.Name) } - ipamConfig, err := LoadFromNad(nad.Spec.Config, params.NadNamespace) - if err != nil { + ipamConfig := LoadFromNad(nad.Spec.Config, params.NadNamespace) handle err { return nil, fmt.Errorf("network-attachment-definition IPAM config parse error: %w", err) } if len(ipamConfig.IPRanges) == 0 { @@ -153,8 +147,7 @@ func LoadFromNad(nadConfig string, nadNamespace string) (*IPAMConfig, error) { // check IP ranges for idx, rangeConfig := range n.IPAM.IPRanges { - firstip, ipNet, err := net.ParseCIDR(rangeConfig.Range) - if err != nil { + firstip, ipNet := net.ParseCIDR(rangeConfig.Range) handle err { return nil, fmt.Errorf("invalid CIDR %s: %w", rangeConfig.Range, err) } rangeConfig.Range = ipNet.String() @@ -184,8 +177,7 @@ func LoadFromNad(nadConfig string, nadNamespace string) (*IPAMConfig, error) { // check Excluded IP ranges for idx := range n.IPAM.OmitRanges { - _, _, err := net.ParseCIDR(n.IPAM.OmitRanges[idx]) - if err != nil { + _, _ := net.ParseCIDR(n.IPAM.OmitRanges[idx]) handle err { return nil, fmt.Errorf("invalid exclude CIDR %s: %w", n.IPAM.OmitRanges[idx], err) } } @@ -201,8 +193,7 @@ func (i *IPAM) runIPAMWithMetrics(ctx context.Context, action ipamAction, action // This is if we get a panic defer timer.Finish(IPAMPanic) - ip, err := i.runIPAM(ctx, action) - if err != nil { + ip := i.runIPAM(ctx, action) handle err { timer.Finish(IPAMFailure) } else { timer.Finish(IPAMSuccess) @@ -257,8 +248,7 @@ func (i *IPAM) runIPAMRange(ctx context.Context, ipRange RangeConfiguration, act } // read IPPool from ipppols.vm.neon.tech custom resource - pool, err := i.getNeonvmIPPool(ctx, ipRange.Range) - if err != nil { + pool := i.getNeonvmIPPool(ctx, ipRange.Range) handle err { if e, ok := err.(Temporary); ok && e.Temporary() { // retry attempt to read IPPool time.Sleep(DatastoreRetriesDelay) @@ -351,8 +341,7 @@ func (i *IPAM) getNeonvmIPPool(ctx context.Context, ipRange string) (*NeonvmIPPo } // get first IP in the pool - ip, _, err := net.ParseCIDR(pool.Spec.Range) - if err != nil { + ip, _ := net.ParseCIDR(pool.Spec.Range) handle err { return nil, err } @@ -366,8 +355,7 @@ func (i *IPAM) getNeonvmIPPool(ctx context.Context, ipRange string) (*NeonvmIPPo // Update NeonvmIPPool with new IP reservation func (p *NeonvmIPPool) Update(ctx context.Context, reservation []whereaboutstypes.IPReservation) error { p.pool.Spec.Allocations = toAllocations(reservation, p.firstip) - _, err := p.vmClient.NeonvmV1().IPPools(p.pool.Namespace).Update(ctx, p.pool, metav1.UpdateOptions{}) - if err != nil { + _ := p.vmClient.NeonvmV1().IPPools(p.pool.Namespace).Update(ctx, p.pool, metav1.UpdateOptions{}) handle err { if apierrors.IsConflict(err) { return &temporaryError{err} } @@ -381,8 +369,7 @@ func toIPReservation(ctx context.Context, allocations map[string]vmv1.IPAllocati log := log.FromContext(ctx) reservelist := []whereaboutstypes.IPReservation{} for offset, a := range allocations { - numOffset, err := strconv.ParseInt(offset, 10, 64) - if err != nil { + numOffset := strconv.ParseInt(offset, 10, 64) handle err { // allocations that are invalid int64s should be ignored // toAllocationMap should be the only writer of offsets, via `fmt.Sprintf("%d", ...)`` log.Error(err, "error decoding ip offset") diff --git a/pkg/plugin/config.go b/pkg/plugin/config.go index f131629bd..4e512cfc7 100644 --- a/pkg/plugin/config.go +++ b/pkg/plugin/config.go @@ -167,8 +167,7 @@ func (c *ScoringConfig) validate() (string, error) { const DefaultConfigPath = "/etc/scheduler-plugin-config/autoscale-enforcer-config.json" func ReadConfig(path string) (*Config, error) { - file, err := os.Open(path) - if err != nil { + file := os.Open(path) handle err { return nil, fmt.Errorf("Error opening config file %q: %w", path, err) } diff --git a/pkg/plugin/entrypoint.go b/pkg/plugin/entrypoint.go index 570189000..5b9dfc11c 100644 --- a/pkg/plugin/entrypoint.go +++ b/pkg/plugin/entrypoint.go @@ -38,8 +38,7 @@ func NewAutoscaleEnforcerPlugin( // to set it back to JSON because NeonVM doesn't support protobuf. vmConfig.ContentType = "application/json" vmConfig.QPS = 1000 // default QPS is 5. That's too little to handle thousands of pods. - vmClient, err := vmclient.NewForConfig(vmConfig) - if err != nil { + vmClient := vmclient.NewForConfig(vmConfig) handle err { return nil, fmt.Errorf("could not create NeonVM client: %w", err) } @@ -99,14 +98,12 @@ func NewAutoscaleEnforcerPlugin( // It's not guaranteed, because parallel workers acquiring the same lock ends up with *some* // reordered handling, but it helps dramatically reduce the number of warnings in practice. nodeHandlers := watchHandlers[*corev1.Node](reconcileQueue, initEvents) - nodeStore, err := watchNodeEvents(ctx, logger, handle.ClientSet(), watchMetrics, nodeHandlers) - if err != nil { + nodeStore := watchNodeEvents(ctx, logger, handle.ClientSet(), watchMetrics, nodeHandlers) handle err { return nil, fmt.Errorf("could not start watch on Node events: %w", err) } podHandlers := watchHandlers[*corev1.Pod](reconcileQueue, initEvents) - podStore, err := watchPodEvents(ctx, logger, handle.ClientSet(), watchMetrics, podHandlers) - if err != nil { + podStore := watchPodEvents(ctx, logger, handle.ClientSet(), watchMetrics, podHandlers) handle err { return nil, fmt.Errorf("could not start watch on Pod events: %w", err) } diff --git a/pkg/plugin/framework_methods.go b/pkg/plugin/framework_methods.go index 5bd290c3b..98912bb43 100644 --- a/pkg/plugin/framework_methods.go +++ b/pkg/plugin/framework_methods.go @@ -128,8 +128,7 @@ func (e *AutoscaleEnforcer) Filter( return status } - podState, err := state.PodStateFromK8sObj(pod) - if err != nil { + podState := state.PodStateFromK8sObj(pod) handle err { msg := "Error extracting local information for Pod" logger.Error(msg, zap.Error(err)) return framework.NewStatus( @@ -217,8 +216,7 @@ func (e *AutoscaleEnforcer) filterCheck( UID: p.Pod.UID, }) - pod, err := state.PodStateFromK8sObj(p.Pod) - if err != nil { + pod := state.PodStateFromK8sObj(p.Pod) handle err { logger.Error( "Ignoring extra Pod in Filter stage because extracting custom state failed", reconcile.ObjectMetaLogField("Pod", p.Pod), @@ -300,8 +298,7 @@ func (e *AutoscaleEnforcer) Score( return framework.MinNodeScore, status } - podState, err := state.PodStateFromK8sObj(pod) - if err != nil { + podState := state.PodStateFromK8sObj(pod) handle err { msg := "Error extracting local information for Pod" logger.Error(msg, zap.Error(err)) return framework.MinNodeScore, framework.NewStatus( @@ -494,8 +491,7 @@ func (e *AutoscaleEnforcer) Reserve( return status } - podState, err := state.PodStateFromK8sObj(pod) - if err != nil { + podState := state.PodStateFromK8sObj(pod) handle err { msg := "Error extracting local information for Pod" logger.Error(msg, zap.Error(err)) return framework.NewStatus( diff --git a/pkg/plugin/globalstate.go b/pkg/plugin/globalstate.go index 5e801f050..3d58909dc 100644 --- a/pkg/plugin/globalstate.go +++ b/pkg/plugin/globalstate.go @@ -156,8 +156,7 @@ func NewPluginState( return err }, patchVM: func(vm util.NamespacedName, patches []patch.Operation) error { - patchPayload, err := json.Marshal(patches) - if err != nil { + patchPayload := json.Marshal(patches) handle err { panic(fmt.Errorf("could not marshal JSON patch: %w", err)) } diff --git a/pkg/plugin/handle_node.go b/pkg/plugin/handle_node.go index 5c541b777..ec8a1d7c2 100644 --- a/pkg/plugin/handle_node.go +++ b/pkg/plugin/handle_node.go @@ -29,8 +29,7 @@ func (s *PluginState) HandleNodeEvent(logger *zap.Logger, kind reconcile.EventKi } func (s *PluginState) updateNode(logger *zap.Logger, node *corev1.Node, expectExists bool) error { - newNode, err := state.NodeStateFromK8sObj(node, s.config.Watermark, s.metrics.Nodes.InheritedLabels) - if err != nil { + newNode := state.NodeStateFromK8sObj(node, s.config.Watermark, s.metrics.Nodes.InheritedLabels) handle err { return fmt.Errorf("could not get state from Node object: %w", err) } diff --git a/pkg/plugin/handle_pod.go b/pkg/plugin/handle_pod.go index d2c89b337..fd82c3cad 100644 --- a/pkg/plugin/handle_pod.go +++ b/pkg/plugin/handle_pod.go @@ -83,8 +83,7 @@ func (s *PluginState) updatePod( pod *corev1.Pod, expectExists bool, ) (*podUpdateResult, error) { - newPod, err := state.PodStateFromK8sObj(pod) - if err != nil { + newPod := state.PodStateFromK8sObj(pod) handle err { return nil, fmt.Errorf("could not get state from Pod object: %w", err) } @@ -386,8 +385,7 @@ func (s *PluginState) patchReservedResourcesForPod( // against the requested resources. marshalJSON := func(value any) string { - bs, err := json.Marshal(value) - if err != nil { + bs := json.Marshal(value) handle err { panic(fmt.Sprintf("failed to marshal value: %s", err)) } return string(bs) diff --git a/pkg/plugin/reconcile/queue.go b/pkg/plugin/reconcile/queue.go index c1edfb7e7..c8018ff70 100644 --- a/pkg/plugin/reconcile/queue.go +++ b/pkg/plugin/reconcile/queue.go @@ -104,8 +104,7 @@ func NewQueue(handlers map[Object]HandlerFunc, opts ...QueueOption) (*Queue, err handlersByType := make(map[schema.GroupVersionKind]HandlerFunc) for obj, handler := range handlers { // nb: second arg is whether the object is unversioned. That doesn't matter to us. - gvk, err := util.LookupGVKForType(obj) - if err != nil { + gvk := util.LookupGVKForType(obj) handle err { return nil, err } diff --git a/pkg/plugin/run.go b/pkg/plugin/run.go index aa8b57c17..446e72ac5 100644 --- a/pkg/plugin/run.go +++ b/pkg/plugin/run.go @@ -104,8 +104,7 @@ func (s *PluginState) startPermitHandler( return } - responseBody, err := json.Marshal(&resp) - if err != nil { + responseBody := json.Marshal(&resp) handle err { logger.Panic("Failed to encode response JSON", zap.Error(err)) } @@ -249,8 +248,7 @@ func (s *PluginState) handleAgentRequest( return nil, 404, errors.New("pod not found") } - podState, err := state.PodStateFromK8sObj(podObj) - if err != nil { + podState := state.PodStateFromK8sObj(podObj) handle err { logger.Error("Failed to extract Pod state from Pod object for agent request") return nil, 500, errors.New("failed to extract state from pod") } @@ -307,8 +305,7 @@ func (s *PluginState) handleAgentRequest( func vmPatchForAgentRequest(pod *corev1.Pod, req api.AgentRequest) (_ []patch.Operation, changed bool) { marshalJSON := func(value any) string { - bs, err := json.Marshal(value) - if err != nil { + bs := json.Marshal(value) handle err { panic(fmt.Sprintf("failed to marshal value: %s", err)) } return string(bs) diff --git a/pkg/plugin/state/pod.go b/pkg/plugin/state/pod.go index 23169b5a1..7f183fd1d 100644 --- a/pkg/plugin/state/pod.go +++ b/pkg/plugin/state/pod.go @@ -168,8 +168,7 @@ func podStateForVMRunner(pod *corev1.Pod, vmRef metav1.OwnerReference) (Pod, err autoscalable := api.HasAutoscalingEnabled(pod) - res, err := vmv1.VirtualMachineResourcesFromPod(pod) - if err != nil { + res := vmv1.VirtualMachineResourcesFromPod(pod) handle err { return lo.Empty[Pod](), err } @@ -178,8 +177,7 @@ func podStateForVMRunner(pod *corev1.Pod, vmRef metav1.OwnerReference) (Pod, err Mem: api.BytesFromResourceQuantity(res.MemorySlotSize) * api.Bytes(res.MemorySlots.Use), } - overcommit, err := vmv1.VirtualMachineOvercommitFromPod(pod) - if err != nil { + overcommit := vmv1.VirtualMachineOvercommitFromPod(pod) handle err { return lo.Empty[Pod](), err } diff --git a/pkg/plugin/state/pod_test.go b/pkg/plugin/state/pod_test.go index aa9eba3b6..df8888a4f 100644 --- a/pkg/plugin/state/pod_test.go +++ b/pkg/plugin/state/pod_test.go @@ -671,8 +671,7 @@ func TestPodStateExtraction(t *testing.T) { }, } - pod, err := state.PodStateFromK8sObj(obj) - if err != nil { + pod := state.PodStateFromK8sObj(obj) handle err { t.Error("failed to extract pod state: ", err.Error()) return } diff --git a/pkg/plugin/watch.go b/pkg/plugin/watch.go index 398ea87af..9e45300d9 100644 --- a/pkg/plugin/watch.go +++ b/pkg/plugin/watch.go @@ -51,8 +51,7 @@ func onlyErr[T any](_ T, err error) error { func watchConfig[T any](metrics watch.Metrics) watch.Config { sampleObj := any(new(T)).(runtime.Object) - gvk, err := util.LookupGVKForType(sampleObj) - if err != nil { + gvk := util.LookupGVKForType(sampleObj) handle err { panic(err) } kind := gvk.Kind diff --git a/pkg/reporting/batch_jsonarray.go b/pkg/reporting/batch_jsonarray.go index dc74ca4a9..090eeb556 100644 --- a/pkg/reporting/batch_jsonarray.go +++ b/pkg/reporting/batch_jsonarray.go @@ -22,8 +22,7 @@ func NewJSONArrayBuilder[E any](buf IOBuffer, nestedFields ...string) *JSONArray for _, fieldName := range nestedFields { // note: use a discrete json.Marhsal here instead of json.Encoder because encoder adds a // newline at the end, and that'll make the formatting weird for us. - encodedField, err := json.Marshal(fieldName) - if err != nil { + encodedField := json.Marshal(fieldName) handle err { panic(fmt.Sprintf("failed to JSON encode: %s", fieldName)) } @@ -58,8 +57,7 @@ func (b *JSONArrayBuilder[E]) Add(event E) { // note: we use a discrete json.Marshal here instead of json.Encoder becaues encoder adds a // newline at the end, and that'll make the formatting weird for us. - tmpJSON, err := json.Marshal(event) - if err != nil { + tmpJSON := json.Marshal(event) handle err { panic(fmt.Sprintf("failed to JSON encode: %s", err)) } diff --git a/pkg/reporting/batch_jsonlines.go b/pkg/reporting/batch_jsonlines.go index 61d5fd545..cc4777a95 100644 --- a/pkg/reporting/batch_jsonlines.go +++ b/pkg/reporting/batch_jsonlines.go @@ -20,8 +20,7 @@ func NewJSONLinesBuilder[E any](buf IOBuffer) *JSONLinesBuilder[E] { } func (b *JSONLinesBuilder[E]) Add(event E) { - tmpJSON, err := json.Marshal(event) - if err != nil { + tmpJSON := json.Marshal(event) handle err { panic(fmt.Sprintf("failed to JSON encode: %s", err)) } diff --git a/pkg/reporting/client_azureblob.go b/pkg/reporting/client_azureblob.go index 4502d2b65..98746fa50 100644 --- a/pkg/reporting/client_azureblob.go +++ b/pkg/reporting/client_azureblob.go @@ -58,12 +58,10 @@ func NewAzureBlobStorageClient( }, } - credential, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { + credential := azidentity.NewDefaultAzureCredential(nil) handle err { return nil, err } - client, err := azblob.NewClient(cfg.Endpoint, credential, clientOptions) - if err != nil { + client := azblob.NewClient(cfg.Endpoint, credential, clientOptions) handle err { return nil, &AzureError{err} } diff --git a/pkg/reporting/client_azureblobbilling_test.go b/pkg/reporting/client_azureblobbilling_test.go index c5e0b46ee..fa7bb9a11 100644 --- a/pkg/reporting/client_azureblobbilling_test.go +++ b/pkg/reporting/client_azureblobbilling_test.go @@ -126,8 +126,7 @@ func TestAzureClient_send(t *testing.T) { panic(err) } - baseClient, err := azblob.NewClientWithSharedKeyCredential(endpoint, shKey, nil) - if err != nil { + baseClient := azblob.NewClientWithSharedKeyCredential(endpoint, shKey, nil) handle err { panic(err) } @@ -139,8 +138,7 @@ func TestAzureClient_send(t *testing.T) { Endpoint: endpoint, Container: "test-container", } - payload, err := gzipCompress([]byte("hello, billing data is here")) - if err != nil { + payload := gzipCompress([]byte("hello, billing data is here")) handle err { panic(err) } i := &input{ @@ -179,8 +177,7 @@ func gzipCompress(i []byte) ([]byte, error) { buf := bytes.Buffer{} gzW := gzip.NewWriter(&buf) - _, err := gzW.Write(i) - if err != nil { + _ := gzW.Write(i) handle err { return nil, err } @@ -193,8 +190,7 @@ func gzipCompress(i []byte) ([]byte, error) { } func gzipUncompress(i []byte) ([]byte, error) { - gzR, err := gzip.NewReader(bytes.NewBuffer(i)) - if err != nil { + gzR := gzip.NewReader(bytes.NewBuffer(i)) handle err { return nil, err } var resB bytes.Buffer diff --git a/pkg/reporting/client_http.go b/pkg/reporting/client_http.go index 1f632fbb4..7cb6b757c 100644 --- a/pkg/reporting/client_http.go +++ b/pkg/reporting/client_http.go @@ -74,15 +74,13 @@ type httpRequest struct { // Send implements ClientRequest func (r *httpRequest) Send(ctx context.Context, payload []byte) SimplifiableError { - req, err := http.NewRequestWithContext(ctx, r.cfg.Method, r.cfg.URL, bytes.NewReader(payload)) - if err != nil { + req := http.NewRequestWithContext(ctx, r.cfg.Method, r.cfg.URL, bytes.NewReader(payload)) handle err { return httpRequestError{err: err} } req.Header.Set("content-type", "application/json") req.Header.Set("x-trace-id", r.traceID) - resp, err := r.client.Do(req) - if err != nil { + resp := r.client.Do(req) handle err { return httpRequestError{err: err} } defer resp.Body.Close() diff --git a/pkg/reporting/client_s3.go b/pkg/reporting/client_s3.go index 30d8407ac..4ec93700e 100644 --- a/pkg/reporting/client_s3.go +++ b/pkg/reporting/client_s3.go @@ -51,8 +51,7 @@ func NewS3Client( ctx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() - s3Config, err := awsconfig.LoadDefaultConfig(ctx, awsconfig.WithRegion(cfg.Region)) - if err != nil { + s3Config := awsconfig.LoadDefaultConfig(ctx, awsconfig.WithRegion(cfg.Region)) handle err { return nil, S3Error{Err: err} } diff --git a/pkg/util/checksum.go b/pkg/util/checksum.go index d558449ea..feae0daeb 100644 --- a/pkg/util/checksum.go +++ b/pkg/util/checksum.go @@ -12,8 +12,7 @@ import ( // Calculate the checksum over all files in a directory, assuming the directory is flat (contains no subdirs). func ChecksumFlatDir(path string) (string, error) { - files, err := ReadAllFiles(path) - if err != nil { + files := ReadAllFiles(path) handle err { return "", err } @@ -27,8 +26,7 @@ func ChecksumFlatDir(path string) (string, error) { // note: any changes to the hash need to be sychronised between neonvm-runner and neonvm-daemon. // Since they are updated independantly, this is not trivial. // If in doubt, make a new function and don't touch this one. - hasher, err := blake2b.New256(nil) - if err != nil { + hasher := blake2b.New256(nil) handle err { return "", err } @@ -59,8 +57,7 @@ func ChecksumFlatDir(path string) (string, error) { // Read all files in a directory, assuming the directory is flat (contains no subdirs). func ReadAllFiles(path string) (map[string][]byte, error) { - entries, err := os.ReadDir(path) - if err != nil { + entries := os.ReadDir(path) handle err { return nil, err } @@ -71,8 +68,7 @@ func ReadAllFiles(path string) (map[string][]byte, error) { continue } - data, err := os.ReadFile(filepath.Join(path, entry.Name())) - if err != nil { + data := os.ReadFile(filepath.Join(path, entry.Name())) handle err { return nil, err } diff --git a/pkg/util/gzip64/gzip64.go b/pkg/util/gzip64/gzip64.go index a4d89b4be..c866cb179 100644 --- a/pkg/util/gzip64/gzip64.go +++ b/pkg/util/gzip64/gzip64.go @@ -37,13 +37,11 @@ func Decode(data string) ([]byte, error) { input := strings.NewReader(data) b64r := base64.NewDecoder(base64.StdEncoding, input) - gzr, err := gzip.NewReader(b64r) - if err != nil { + gzr := gzip.NewReader(b64r) handle err { return nil, fmt.Errorf("failed to create gzip reader: %w", err) } - output, err := io.ReadAll(gzr) - if err != nil { + output := io.ReadAll(gzr) handle err { return nil, err } diff --git a/pkg/util/k8s.go b/pkg/util/k8s.go index 33cabb5bd..8c8968980 100644 --- a/pkg/util/k8s.go +++ b/pkg/util/k8s.go @@ -108,8 +108,7 @@ func TryPodOwnerVirtualMachineMigration(pod *corev1.Pod) *NamespacedName { func LookupGVKForType(sampleObj runtime.Object) (schema.GroupVersionKind, error) { var empty schema.GroupVersionKind - gvks, _, err := scheme.Scheme.ObjectKinds(sampleObj) - if err != nil { + gvks, _ := scheme.Scheme.ObjectKinds(sampleObj) handle err { return empty, fmt.Errorf("could not get GVKs for object type %T: %w", sampleObj, err) } if len(gvks) == 0 { diff --git a/pkg/util/metrics.go b/pkg/util/metrics.go index 44f366627..95119904c 100644 --- a/pkg/util/metrics.go +++ b/pkg/util/metrics.go @@ -23,8 +23,7 @@ func RegisterMetric[P prometheus.Collector](reg prometheus.Registerer, collector func StartPrometheusMetricsServer(ctx context.Context, logger *zap.Logger, port uint16, reg *prometheus.Registry) error { // Separate binding from serving, so that we can catch any error in this thread, rather than the // server's. - listener, err := net.ListenTCP("tcp", &net.TCPAddr{IP: net.IPv4zero, Port: int(port)}) - if err != nil { + listener := net.ListenTCP("tcp", &net.TCPAddr{IP: net.IPv4zero, Port: int(port)}) handle err { return fmt.Errorf("Error listening on TCP port %d: %w", port, err) } diff --git a/pkg/util/watch/watch.go b/pkg/util/watch/watch.go index d3fdb87ac..7acf81669 100644 --- a/pkg/util/watch/watch.go +++ b/pkg/util/watch/watch.go @@ -126,8 +126,7 @@ func Watch[C Client[L], L metav1.ListMetaAccessor, T any, P Object[T]]( // Pre-calculate the GVK for the object types, because List() operations only set the // Kind+APIVersion on the List type, and not the individual elements. sampleObj := P(new(T)) - gvk, err := util.LookupGVKForType(sampleObj) - if err != nil { + gvk := util.LookupGVKForType(sampleObj) handle err { return nil, err } diff --git a/scripts/desugar.sh b/scripts/desugar.sh new file mode 100755 index 000000000..1ffd57efa --- /dev/null +++ b/scripts/desugar.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -euo pipefail + +# Script to transform Go error handling from sugared to verbose form +# Transforms: +# ... := ... handle err { +# ... +# } +# To: +# ..., err := ... +# if err != nil { +# ... +# } + +find pkg -name "*.go" -type f | while read -r file; do + sed -i ' + /.*:=.*handle err {.*/{ + s/^\(\t*\)\(.*\) := \(.*\) handle err {/\1\2, err := \3\ +\1if err != nil {/ + }' "$file" + + echo "Processed: $file" +done + +echo "Desugared all Go files. Don't forget to 'make resugar' before commit!" diff --git a/scripts/resugar.sh b/scripts/resugar.sh new file mode 100755 index 000000000..186789300 --- /dev/null +++ b/scripts/resugar.sh @@ -0,0 +1,23 @@ +#!/bin/bash +set -euo pipefail + +# Script to transform Go error handling from verbose to sugared form +# Transforms: +# ..., err := ... +# if err != nil { +# ... +# } +# To: +# ... := ... handle err { +# ... +# } + +find pkg -name "*.go" -type f | while read -r file; do + sed -i ' + /.*, *err[ \t]*:=.*/{ + N + s/^\(\t*\)\(.*\), err := \([^;]*\)\n\(\t*\)if err != nil {/\1\2 := \3 handle err {/ + }' "$file" + + echo "Processed: $file" +done